aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-sgi_uv27
-rw-r--r--Documentation/DocBook/mac80211.tmpl12
-rw-r--r--Documentation/dontdiff2
-rw-r--r--Documentation/feature-removal-schedule.txt18
-rw-r--r--Documentation/filesystems/ntfs.txt4
-rw-r--r--Documentation/filesystems/proc.txt19
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt11
-rw-r--r--Documentation/networking/LICENSE.qlge46
-rw-r--r--Documentation/networking/can.txt44
-rw-r--r--Documentation/networking/multiqueue.txt54
-rw-r--r--Documentation/networking/phonet.txt111
-rw-r--r--Documentation/networking/regulatory.txt194
-rw-r--r--Documentation/rfkill.txt32
-rw-r--r--Documentation/video4linux/CARDLIST.au08281
-rw-r--r--Documentation/video4linux/gspca.txt29
-rw-r--r--MAINTAINERS37
-rw-r--r--arch/arm/include/asm/byteorder.h25
-rw-r--r--arch/arm/include/asm/io.h5
-rw-r--r--arch/arm/include/asm/mach/map.h14
-rw-r--r--arch/arm/mach-omap1/mcbsp.c8
-rw-r--r--arch/arm/mach-omap2/mcbsp.c4
-rw-r--r--arch/arm/mm/mmu.c20
-rw-r--r--arch/arm/plat-mxc/clock.c1
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/include/mach/mcbsp.h2
-rw-r--r--arch/arm/plat-omap/mcbsp.c5
-rw-r--r--arch/avr32/kernel/asm-offsets.c6
-rw-r--r--arch/avr32/kernel/entry-avr32b.S59
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S2
-rw-r--r--arch/m68k/atari/atakeyb.c9
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/kernel/setup.c33
-rw-r--r--arch/mips/kernel/traps.c18
-rw-r--r--arch/mips/mm/c-r3k.c1
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/parisc/hpux/fs.c2
-rw-r--r--arch/powerpc/Makefile5
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h6
-rw-r--r--arch/powerpc/kernel/Makefile7
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/idle_6xx.S2
-rw-r--r--arch/powerpc/kernel/idle_e500.S3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c29
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/sh/configs/ap325rxa_defconfig22
-rw-r--r--arch/sh/configs/migor_defconfig21
-rw-r--r--arch/sh/include/asm/uaccess_64.h2
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S14
-rw-r--r--arch/sh/kernel/entry-common.S27
-rw-r--r--arch/sh/kernel/machine_kexec.c2
-rw-r--r--arch/sh/kernel/ptrace_64.c2
-rw-r--r--arch/sh/kernel/setup.c6
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/mm/consistent.c6
-rw-r--r--arch/sparc/include/asm/smp_32.h25
-rw-r--r--arch/sparc/kernel/of_device.c19
-rw-r--r--arch/sparc/kernel/sun4d_smp.c16
-rw-r--r--arch/sparc/kernel/sun4m_smp.c12
-rw-r--r--arch/sparc64/kernel/of_device.c20
-rw-r--r--arch/sparc64/kernel/smp.c14
-rw-r--r--arch/sparc64/mm/init.c2
-rw-r--r--arch/x86/boot/cpucheck.c8
-rw-r--r--arch/x86/kernel/alternative.c36
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/centaur.c11
-rw-r--r--arch/x86/kernel/cpu/common.c34
-rw-r--r--arch/x86/kernel/cpu/common_64.c74
-rw-r--r--arch/x86/kernel/cpu/cyrix.c32
-rw-r--r--arch/x86/kernel/cpu/feature_names.c3
-rw-r--r--arch/x86/kernel/hpet.c19
-rw-r--r--arch/x86/kernel/io_delay.c8
-rw-r--r--arch/x86/kernel/tsc.c234
-rw-r--r--arch/x86/pci/i386.c87
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/cmd-filter.c3
-rw-r--r--block/genhd.c15
-rw-r--r--crypto/async_tx/async_tx.c3
-rw-r--r--crypto/camellia.c84
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/dispatcher/dsobject.c2
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/sbshc.c7
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/toshiba_acpi.c261
-rw-r--r--drivers/ata/Kconfig6
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/libata-sff.c5
-rw-r--r--drivers/ata/pata_marvell.c51
-rw-r--r--drivers/ata/pata_sil680.c3
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c19
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/horizon.c8
-rw-r--r--drivers/atm/idt77252.c32
-rw-r--r--drivers/atm/idt77252.h4
-rw-r--r--drivers/atm/zatm.c6
-rw-r--r--drivers/block/aoe/aoe.h9
-rw-r--r--drivers/block/aoe/aoeblk.c8
-rw-r--r--drivers/block/aoe/aoechr.c8
-rw-r--r--drivers/block/aoe/aoecmd.c85
-rw-r--r--drivers/block/aoe/aoedev.c12
-rw-r--r--drivers/block/aoe/aoemain.c1
-rw-r--r--drivers/block/aoe/aoenet.c9
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_usb.h10
-rw-r--r--drivers/char/random.c19
-rw-r--r--drivers/clocksource/acpi_pm.c54
-rw-r--r--drivers/firmware/iscsi_ibft.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c271
-rw-r--r--drivers/ide/Kconfig16
-rw-r--r--drivers/ide/arm/palm_bk3710.c8
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-disk.c14
-rw-r--r--drivers/input/mouse/bcm5974.c74
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h2
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_pci.h4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c23
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c352
-rw-r--r--drivers/isdn/mISDN/timerdev.c22
-rw-r--r--drivers/md/bitmap.c45
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/common/tuners/mt2131.c2
-rw-r--r--drivers/media/common/tuners/mt2131.h2
-rw-r--r--drivers/media/common/tuners/mt2131_priv.h2
-rw-r--r--drivers/media/common/tuners/mxl5005s.c4
-rw-r--r--drivers/media/common/tuners/mxl5005s.h2
-rw-r--r--drivers/media/common/tuners/tuner-simple.c33
-rw-r--r--drivers/media/common/tuners/xc5000.c2
-rw-r--r--drivers/media/common/tuners/xc5000.h2
-rw-r--r--drivers/media/common/tuners/xc5000_priv.h2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c3
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c12
-rw-r--r--drivers/media/dvb/bt8xx/dst.c4
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c8
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb/frontends/au8522.c47
-rw-r--r--drivers/media/dvb/frontends/au8522.h11
-rw-r--r--drivers/media/dvb/frontends/cx22702.c2
-rw-r--r--drivers/media/dvb/frontends/cx22702.h2
-rw-r--r--drivers/media/dvb/frontends/cx24123.c6
-rw-r--r--drivers/media/dvb/frontends/cx24123.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1409.c3
-rw-r--r--drivers/media/dvb/frontends/s5h1409.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c3
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c3
-rw-r--r--drivers/media/dvb/frontends/tda10048.c4
-rw-r--r--drivers/media/dvb/frontends/tda10048.h2
-rw-r--r--drivers/media/dvb/siano/sms-cards.c2
-rw-r--r--drivers/media/dvb/siano/sms-cards.h2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h2
-rw-r--r--drivers/media/dvb/siano/smsdvb.c2
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-patch.c3
-rw-r--r--drivers/media/dvb/ttpci/budget.c3
-rw-r--r--drivers/media/radio/Makefile4
-rw-r--r--drivers/media/radio/dsbr100.c2
-rw-r--r--drivers/media/radio/miropcm20-radio.c266
-rw-r--r--drivers/media/radio/miropcm20-rds-core.c211
-rw-r--r--drivers/media/radio/miropcm20-rds-core.h19
-rw-r--r--drivers/media/radio/miropcm20-rds.c136
-rw-r--r--drivers/media/radio/radio-aimslab.c3
-rw-r--r--drivers/media/radio/radio-aztech.c3
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c3
-rw-r--r--drivers/media/radio/radio-maestro.c3
-rw-r--r--drivers/media/radio/radio-maxiradio.c28
-rw-r--r--drivers/media/radio/radio-rtrack2.c3
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/radio-si470x.c4
-rw-r--r--drivers/media/radio/radio-terratec.c3
-rw-r--r--drivers/media/radio/radio-trust.c3
-rw-r--r--drivers/media/radio/radio-zoltrix.c3
-rw-r--r--drivers/media/video/Makefile4
-rw-r--r--drivers/media/video/au0828/Kconfig1
-rw-r--r--drivers/media/video/au0828/au0828-cards.c9
-rw-r--r--drivers/media/video/au0828/au0828-cards.h3
-rw-r--r--drivers/media/video/au0828/au0828-core.c4
-rw-r--r--drivers/media/video/au0828/au0828-dvb.c25
-rw-r--r--drivers/media/video/au0828/au0828-i2c.c2
-rw-r--r--drivers/media/video/au0828/au0828-reg.h2
-rw-r--r--drivers/media/video/au0828/au0828.h2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c73
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-risc.c3
-rw-r--r--drivers/media/video/bt8xx/bttvp.h5
-rw-r--r--drivers/media/video/btcx-risc.c4
-rw-r--r--drivers/media/video/btcx-risc.h2
-rw-r--r--drivers/media/video/bw-qcam.c3
-rw-r--r--drivers/media/video/c-qcam.c3
-rw-r--r--drivers/media/video/cpia.c2
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c16
-rw-r--r--drivers/media/video/cx18/cx18-driver.c6
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c2
-rw-r--r--drivers/media/video/cx18/cx18-dvb.h2
-rw-r--r--drivers/media/video/cx18/cx18-irq.c2
-rw-r--r--drivers/media/video/cx18/cx18-queue.c129
-rw-r--r--drivers/media/video/cx18/cx18-queue.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-reg.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx23885/cx23885.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/video/dabusb.c1
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c1
-rw-r--r--drivers/media/video/gspca/conex.c28
-rw-r--r--drivers/media/video/gspca/etoms.c30
-rw-r--r--drivers/media/video/gspca/gspca.c119
-rw-r--r--drivers/media/video/gspca/gspca.h21
-rw-r--r--drivers/media/video/gspca/mars.c41
-rw-r--r--drivers/media/video/gspca/ov519.c1167
-rw-r--r--drivers/media/video/gspca/pac207.c87
-rw-r--r--drivers/media/video/gspca/pac7311.c1110
-rw-r--r--drivers/media/video/gspca/pac_common.h60
-rw-r--r--drivers/media/video/gspca/sonixb.c594
-rw-r--r--drivers/media/video/gspca/sonixj.c539
-rw-r--r--drivers/media/video/gspca/spca500.c20
-rw-r--r--drivers/media/video/gspca/spca501.c16
-rw-r--r--drivers/media/video/gspca/spca505.c16
-rw-r--r--drivers/media/video/gspca/spca506.c20
-rw-r--r--drivers/media/video/gspca/spca508.c21
-rw-r--r--drivers/media/video/gspca/spca561.c727
-rw-r--r--drivers/media/video/gspca/stk014.c20
-rw-r--r--drivers/media/video/gspca/sunplus.c167
-rw-r--r--drivers/media/video/gspca/t613.c41
-rw-r--r--drivers/media/video/gspca/tv8532.c20
-rw-r--r--drivers/media/video/gspca/vc032x.c31
-rw-r--r--drivers/media/video/gspca/zc3xx.c85
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c29
-rw-r--r--drivers/media/video/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/video/ks0127.c31
-rw-r--r--drivers/media/video/meye.c2
-rw-r--r--drivers/media/video/mxb.c12
-rw-r--r--drivers/media/video/ov511.c14
-rw-r--r--drivers/media/video/pms.c13
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c18
-rw-r--r--drivers/media/video/saa7115.c5
-rw-r--r--drivers/media/video/se401.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_devtable.h21
-rw-r--r--drivers/media/video/stv680.c2
-rw-r--r--drivers/media/video/usbvideo/ibmcam.c6
-rw-r--r--drivers/media/video/usbvideo/vicam.c2
-rw-r--r--drivers/media/video/v4l2-dev.c5
-rw-r--r--drivers/media/video/v4l2-ioctl.c4
-rw-r--r--drivers/media/video/vivi.c52
-rw-r--r--drivers/media/video/w9966.c2
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c1
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h19
-rw-r--r--drivers/misc/acer-wmi.c3
-rw-r--r--drivers/misc/fujitsu-laptop.c7
-rw-r--r--drivers/misc/hp-wmi.c91
-rw-r--r--drivers/misc/thinkpad_acpi.c1
-rw-r--r--drivers/mmc/card/block.c4
-rw-r--r--drivers/mmc/host/at91_mci.c20
-rw-r--r--drivers/mtd/mtdchar.c16
-rw-r--r--drivers/mtd/nand/tmio_nand.c8
-rw-r--r--drivers/net/3c505.c4
-rw-r--r--drivers/net/8139cp.c14
-rw-r--r--drivers/net/8139too.c7
-rw-r--r--drivers/net/Kconfig42
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/arcnet/arcnet.c18
-rw-r--r--drivers/net/arcnet/com20020.c16
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/Makefile2
-rw-r--r--drivers/net/atlx/atl2.c3127
-rw-r--r--drivers/net/atlx/atl2.h530
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/ax88796.c14
-rw-r--r--drivers/net/bfin_mac.c8
-rw-r--r--drivers/net/bnx2.c22
-rw-r--r--drivers/net/bnx2.h5
-rw-r--r--drivers/net/bnx2x.h5
-rw-r--r--drivers/net/bnx2x_main.c125
-rw-r--r--drivers/net/bonding/bond_alb.c28
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/cassini.c56
-rw-r--r--drivers/net/cassini.h1522
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c8
-rw-r--r--drivers/net/cxgb3/l2t.c39
-rw-r--r--drivers/net/cxgb3/l2t.h3
-rw-r--r--drivers/net/cxgb3/sge.c80
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/82571.c153
-rw-r--r--drivers/net/e1000e/defines.h15
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c60
-rw-r--r--drivers/net/e1000e/hw.h15
-rw-r--r--drivers/net/e1000e/ich8lan.c173
-rw-r--r--drivers/net/e1000e/lib.c7
-rw-r--r--drivers/net/e1000e/netdev.c435
-rw-r--r--drivers/net/e1000e/param.c27
-rw-r--r--drivers/net/e1000e/phy.c194
-rw-r--r--drivers/net/ehea/ehea.h4
-rw-r--r--drivers/net/ehea/ehea_main.c26
-rw-r--r--drivers/net/ehea/ehea_phyp.c2
-rw-r--r--drivers/net/ehea/ehea_qmr.c3
-rw-r--r--drivers/net/enc28j60.c56
-rw-r--r--drivers/net/enic/Makefile5
-rw-r--r--drivers/net/enic/cq_desc.h79
-rw-r--r--drivers/net/enic/cq_enet_desc.h169
-rw-r--r--drivers/net/enic/enic.h115
-rw-r--r--drivers/net/enic/enic_main.c1949
-rw-r--r--drivers/net/enic/enic_res.c370
-rw-r--r--drivers/net/enic/enic_res.h151
-rw-r--r--drivers/net/enic/rq_enet_desc.h60
-rw-r--r--drivers/net/enic/vnic_cq.c89
-rw-r--r--drivers/net/enic/vnic_cq.h113
-rw-r--r--drivers/net/enic/vnic_dev.c674
-rw-r--r--drivers/net/enic/vnic_dev.h106
-rw-r--r--drivers/net/enic/vnic_devcmd.h282
-rw-r--r--drivers/net/enic/vnic_enet.h47
-rw-r--r--drivers/net/enic/vnic_intr.c62
-rw-r--r--drivers/net/enic/vnic_intr.h92
-rw-r--r--drivers/net/enic/vnic_nic.h65
-rw-r--r--drivers/net/enic/vnic_resource.h63
-rw-r--r--drivers/net/enic/vnic_rq.c199
-rw-r--r--drivers/net/enic/vnic_rq.h204
-rw-r--r--drivers/net/enic/vnic_rss.h32
-rw-r--r--drivers/net/enic/vnic_stats.h70
-rw-r--r--drivers/net/enic/vnic_wq.c184
-rw-r--r--drivers/net/enic/vnic_wq.h154
-rw-r--r--drivers/net/enic/wq_enet_desc.h98
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/ibm_newemac/mal.h4
-rw-r--r--drivers/net/ibm_newemac/phy.c2
-rw-r--r--drivers/net/igb/igb_main.c12
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe.h103
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c628
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1060
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h58
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c302
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1932
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c244
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h63
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h563
-rw-r--r--drivers/net/jme.c3019
-rw-r--r--drivers/net/jme.h1199
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c33
-rw-r--r--drivers/net/ne.c9
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c30
-rw-r--r--drivers/net/pci-skeleton.c4
-rw-r--r--drivers/net/pcmcia/axnet_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/ppp_generic.c10
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/Makefile7
-rw-r--r--drivers/net/qlge/qlge.h1593
-rw-r--r--drivers/net/qlge/qlge_dbg.c858
-rw-r--r--drivers/net/qlge/qlge_ethtool.c415
-rw-r--r--drivers/net/qlge/qlge_main.c3956
-rw-r--r--drivers/net/qlge/qlge_mpi.c150
-rw-r--r--drivers/net/r6040.c4
-rw-r--r--drivers/net/r8169.c428
-rw-r--r--drivers/net/s2io.c62
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/sfc/bitfield.h178
-rw-r--r--drivers/net/sfc/boards.c12
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c489
-rw-r--r--drivers/net/sfc/efx.h14
-rw-r--r--drivers/net/sfc/enum.h9
-rw-r--r--drivers/net/sfc/ethtool.c184
-rw-r--r--drivers/net/sfc/falcon.c1019
-rw-r--r--drivers/net/sfc/falcon.h17
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h80
-rw-r--r--drivers/net/sfc/falcon_io.h1
-rw-r--r--drivers/net/sfc/falcon_xmac.c346
-rw-r--r--drivers/net/sfc/mac.h4
-rw-r--r--drivers/net/sfc/mdio_10g.c16
-rw-r--r--drivers/net/sfc/mdio_10g.h13
-rw-r--r--drivers/net/sfc/net_driver.h144
-rw-r--r--drivers/net/sfc/phy.h10
-rw-r--r--drivers/net/sfc/rx.c78
-rw-r--r--drivers/net/sfc/rx.h4
-rw-r--r--drivers/net/sfc/selftest.c391
-rw-r--r--drivers/net/sfc/selftest.h13
-rw-r--r--drivers/net/sfc/sfe4001.c248
-rw-r--r--drivers/net/sfc/spi.h89
-rw-r--r--drivers/net/sfc/tenxpress.c149
-rw-r--r--drivers/net/sfc/tx.c385
-rw-r--r--drivers/net/sfc/tx.h2
-rw-r--r--drivers/net/sfc/workarounds.h4
-rw-r--r--drivers/net/sfc/xfp_phy.c12
-rw-r--r--drivers/net/skfp/pmf.c29
-rw-r--r--drivers/net/sky2.c170
-rw-r--r--drivers/net/smc911x.c68
-rw-r--r--drivers/net/smc91x.c43
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/sundance.c95
-rw-r--r--drivers/net/tehuti.h8
-rw-r--r--drivers/net/tg3.c91
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tsi108_eth.c6
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/de4x5.c38
-rw-r--r--drivers/net/ucc_geth.c116
-rw-r--r--drivers/net/usb/hso.c335
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c31
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/via-rhine.c8
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/cycx_drv.c6
-rw-r--r--drivers/net/wan/cycx_x25.c12
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/Kconfig17
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/adm8211.c23
-rw-r--r--drivers/net/wireless/airo.c16
-rw-r--r--drivers/net/wireless/airport.c3
-rw-r--r--drivers/net/wireless/ath5k/Makefile12
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h556
-rw-r--r--drivers/net/wireless/ath5k/attach.c315
-rw-r--r--drivers/net/wireless/ath5k/base.c463
-rw-r--r--drivers/net/wireless/ath5k/base.h10
-rw-r--r--drivers/net/wireless/ath5k/caps.c193
-rw-r--r--drivers/net/wireless/ath5k/debug.c4
-rw-r--r--drivers/net/wireless/ath5k/desc.c667
-rw-r--r--drivers/net/wireless/ath5k/desc.h (renamed from drivers/net/wireless/ath5k/hw.h)400
-rw-r--r--drivers/net/wireless/ath5k/dma.c566
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c466
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h215
-rw-r--r--drivers/net/wireless/ath5k/gpio.c176
-rw-r--r--drivers/net/wireless/ath5k/hw.c4529
-rw-r--r--drivers/net/wireless/ath5k/initvals.c22
-rw-r--r--drivers/net/wireless/ath5k/pcu.c1002
-rw-r--r--drivers/net/wireless/ath5k/phy.c10
-rw-r--r--drivers/net/wireless/ath5k/qcu.c488
-rw-r--r--drivers/net/wireless/ath5k/reg.h102
-rw-r--r--drivers/net/wireless/ath5k/reset.c925
-rw-r--r--drivers/net/wireless/ath9k/Kconfig3
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h62
-rw-r--r--drivers/net/wireless/ath9k/beacon.c259
-rw-r--r--drivers/net/wireless/ath9k/core.c306
-rw-r--r--drivers/net/wireless/ath9k/core.h264
-rw-r--r--drivers/net/wireless/ath9k/hw.c228
-rw-r--r--drivers/net/wireless/ath9k/hw.h120
-rw-r--r--drivers/net/wireless/ath9k/main.c1257
-rw-r--r--drivers/net/wireless/ath9k/phy.h12
-rw-r--r--drivers/net/wireless/ath9k/rc.c186
-rw-r--r--drivers/net/wireless/ath9k/rc.h222
-rw-r--r--drivers/net/wireless/ath9k/recv.c86
-rw-r--r--drivers/net/wireless/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath9k/xmit.c404
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/Makefile7
-rw-r--r--drivers/net/wireless/b43/b43.h146
-rw-r--r--drivers/net/wireless/b43/debugfs.c79
-rw-r--r--drivers/net/wireless/b43/lo.c120
-rw-r--r--drivers/net/wireless/b43/lo.h4
-rw-r--r--drivers/net/wireless/b43/main.c397
-rw-r--r--drivers/net/wireless/b43/phy.h340
-rw-r--r--drivers/net/wireless/b43/phy_a.c643
-rw-r--r--drivers/net/wireless/b43/phy_a.h130
-rw-r--r--drivers/net/wireless/b43/phy_common.c381
-rw-r--r--drivers/net/wireless/b43/phy_common.h413
-rw-r--r--drivers/net/wireless/b43/phy_g.c (renamed from drivers/net/wireless/b43/phy.c)4420
-rw-r--r--drivers/net/wireless/b43/phy_g.h209
-rw-r--r--drivers/net/wireless/b43/phy_lp.c155
-rw-r--r--drivers/net/wireless/b43/phy_lp.h540
-rw-r--r--drivers/net/wireless/b43/phy_n.c (renamed from drivers/net/wireless/b43/nphy.c)154
-rw-r--r--drivers/net/wireless/b43/phy_n.h (renamed from drivers/net/wireless/b43/nphy.h)54
-rw-r--r--drivers/net/wireless/b43/rfkill.c5
-rw-r--r--drivers/net/wireless/b43/sysfs.c23
-rw-r--r--drivers/net/wireless/b43/tables.c43
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c4
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c8
-rw-r--r--drivers/net/wireless/b43legacy/main.c37
-rw-r--r--drivers/net/wireless/b43legacy/phy.c36
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/hermes.c124
-rw-r--r--drivers/net/wireless/hermes.h45
-rw-r--r--drivers/net/wireless/hermes_dld.c730
-rw-r--r--drivers/net/wireless/hermes_dld.h48
-rw-r--r--drivers/net/wireless/hermes_rid.h17
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2200.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-io.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c205
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c228
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c255
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c164
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c87
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c239
-rw-r--r--drivers/net/wireless/libertas/assoc.c750
-rw-r--r--drivers/net/wireless/libertas/assoc.h18
-rw-r--r--drivers/net/wireless/libertas/cmd.c430
-rw-r--r--drivers/net/wireless/libertas/cmd.h16
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c80
-rw-r--r--drivers/net/wireless/libertas/decl.h1
-rw-r--r--drivers/net/wireless/libertas/defs.h42
-rw-r--r--drivers/net/wireless/libertas/dev.h10
-rw-r--r--drivers/net/wireless/libertas/host.h51
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h84
-rw-r--r--drivers/net/wireless/libertas/if_cs.c15
-rw-r--r--drivers/net/wireless/libertas/if_usb.c182
-rw-r--r--drivers/net/wireless/libertas/if_usb.h5
-rw-r--r--drivers/net/wireless/libertas/main.c41
-rw-r--r--drivers/net/wireless/libertas/scan.c5
-rw-r--r--drivers/net/wireless/libertas/wext.c331
-rw-r--r--drivers/net/wireless/libertas_tf/Makefile6
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c669
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c766
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.h98
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h514
-rw-r--r--drivers/net/wireless/libertas_tf/main.c662
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c211
-rw-r--r--drivers/net/wireless/orinoco.c1959
-rw-r--r--drivers/net/wireless/orinoco.h61
-rw-r--r--drivers/net/wireless/orinoco_cs.c3
-rw-r--r--drivers/net/wireless/orinoco_nortel.c3
-rw-r--r--drivers/net/wireless/orinoco_pci.c3
-rw-r--r--drivers/net/wireless/orinoco_plx.c3
-rw-r--r--drivers/net/wireless/orinoco_tmd.c3
-rw-r--r--drivers/net/wireless/p54/p54.h55
-rw-r--r--drivers/net/wireless/p54/p54common.c685
-rw-r--r--drivers/net/wireless/p54/p54common.h121
-rw-r--r--drivers/net/wireless/p54/p54pci.c427
-rw-r--r--drivers/net/wireless/p54/p54pci.h20
-rw-r--r--drivers/net/wireless/p54/p54usb.c200
-rw-r--r--drivers/net/wireless/p54/p54usb.h11
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c8
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig81
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c58
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c59
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c215
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c97
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c129
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h49
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c244
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c133
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c447
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h38
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c457
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h38
-rw-r--r--drivers/net/wireless/rtl8180.h31
-rw-r--r--drivers/net/wireless/rtl8180_dev.c44
-rw-r--r--drivers/net/wireless/rtl8187.h6
-rw-r--r--drivers/net/wireless/rtl8187_dev.c20
-rw-r--r--drivers/net/wireless/rtl818x.h35
-rw-r--r--drivers/net/wireless/spectrum_cs.c426
-rw-r--r--drivers/net/wireless/wl3501_cs.c8
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c100
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h95
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c67
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h65
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c2
-rw-r--r--drivers/pci/setup-bus.c5
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c2
-rw-r--r--drivers/rtc/rtc-cmos.c38
-rw-r--r--drivers/rtc/rtc-lib.c5
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/serial/8250.c16
-rw-r--r--drivers/serial/8250.h1
-rw-r--r--drivers/ssb/pci.c84
-rw-r--r--drivers/usb/atm/usbatm.c5
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/video/atmel_lcdfb.c10
-rw-r--r--drivers/video/cirrusfb.c59
-rw-r--r--drivers/video/tdfxfb.c9
-rw-r--r--firmware/Makefile17
-rw-r--r--firmware/WHENCE10
-rw-r--r--firmware/sun/cassini.bin.ihex143
-rw-r--r--fs/cifs/CHANGES5
-rw-r--r--fs/cifs/README14
-rw-r--r--fs/cifs/cifsencrypt.c1
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/sess.c2
-rw-r--r--fs/nfs/super.c6
-rw-r--r--fs/nfsd/nfs4acl.c2
-rw-r--r--fs/nfsd/nfs4proc.c12
-rw-r--r--fs/ntfs/usnjrnl.h4
-rw-r--r--fs/proc/array.c59
-rw-r--r--fs/proc/proc_misc.c7
-rw-r--r--include/asm-generic/Kbuild.asm6
-rw-r--r--include/asm-generic/bug.h10
-rw-r--r--include/asm-generic/syscall.h2
-rw-r--r--include/asm-mips/cacheflush.h1
-rw-r--r--include/asm-um/dma-mapping.h7
-rw-r--r--include/asm-x86/cpufeature.h11
-rw-r--r--include/asm-x86/required-features.h8
-rw-r--r--include/linux/Kbuild7
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/ide.h5
-rw-r--r--include/linux/ieee80211.h31
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_phonet.h18
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/ip_vs.h160
-rw-r--r--include/linux/isdn_ppp.h2
-rw-r--r--include/linux/list.h13
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h1
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nl80211.h133
-rw-r--r--include/linux/pci_ids.h13
-rw-r--r--include/linux/phonet.h160
-rw-r--r--include/linux/pim.h18
-rw-r--r--include/linux/pkt_sched.h7
-rw-r--r--include/linux/quicklist.h7
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/rfkill.h7
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/skbuff.h150
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/ssb/ssb_regs.h19
-rw-r--r--include/linux/sunrpc/svc_rdma.h1
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_act/tc_skbedit.h44
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/videodev2.h2
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/cfg80211.h83
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip_vs.h310
-rw-r--r--include/net/mac80211.h138
-rw-r--r--include/net/netlink.h82
-rw-r--r--include/net/phonet/phonet.h112
-rw-r--r--include/net/phonet/pn_dev.h50
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tc_act/tc_skbedit.h34
-rw-r--r--include/net/tcp.h49
-rw-r--r--include/net/wireless.h61
-rw-r--r--include/net/xfrm.h25
-rw-r--r--kernel/auditsc.c3
-rw-r--r--kernel/cpuset.c312
-rw-r--r--kernel/exit.c84
-rw-r--r--kernel/pid_namespace.c3
-rw-r--r--kernel/pm_qos_params.c25
-rw-r--r--kernel/resource.c88
-rw-r--r--kernel/sched.c78
-rw-r--r--kernel/softlockup.c3
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/clockevents.c3
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/tick-broadcast.c78
-rw-r--r--kernel/time/tick-common.c1
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-oneshot.c46
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--lib/Kconfig.debug6
-rw-r--r--lib/debugobjects.c31
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/quicklist.c9
-rw-r--r--mm/truncate.c4
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/Kconfig9
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/br2684.c8
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/hci_conn.c21
-rw-r--r--net/bluetooth/hci_event.c11
-rw-r--r--net/bluetooth/l2cap.c34
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_ioctl.c28
-rw-r--r--net/bridge/br_netlink.c15
-rw-r--r--net/bridge/br_notify.c3
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_bpdu.c3
-rw-r--r--net/bridge/br_sysfs_br.c26
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/neighbour.c21
-rw-r--r--net/core/net-sysfs.c36
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skb_dma_map.c66
-rw-r--r--net/core/sock.c9
-rw-r--r--net/dccp/ccids/ccid2.c2
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/ccids/lib/loss_interval.c6
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/options.c13
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee80211/ieee80211_module.c8
-rw-r--r--net/ipv4/devinet.c15
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/inet_timewait_sock.c35
-rw-r--r--net/ipv4/ipvs/Kconfig17
-rw-r--r--net/ipv4/ipvs/Makefile3
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c249
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c817
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1370
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c58
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c61
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c220
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c249
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c32
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c65
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c178
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah_esp.c235
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c176
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c254
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c227
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c20
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c40
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c15
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c471
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp_input.c314
-rw-r--r--net/ipv4/tcp_ipv4.c31
-rw-r--r--net/ipv4/tcp_output.c202
-rw-r--r--net/ipv6/ip6_output.c64
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/cfg.c145
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_key.c9
-rw-r--r--net/mac80211/debugfs_netdev.c72
-rw-r--r--net/mac80211/debugfs_sta.c8
-rw-r--r--net/mac80211/event.c5
-rw-r--r--net/mac80211/ht.c992
-rw-r--r--net/mac80211/ieee80211_i.h391
-rw-r--r--net/mac80211/iface.c620
-rw-r--r--net/mac80211/key.c8
-rw-r--r--net/mac80211/main.c975
-rw-r--r--net/mac80211/mesh.c366
-rw-r--r--net/mac80211/mesh.h76
-rw-r--r--net/mac80211/mesh_hwmp.c230
-rw-r--r--net/mac80211/mesh_pathtbl.c84
-rw-r--r--net/mac80211/mesh_plink.c98
-rw-r--r--net/mac80211/mlme.c3867
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rc80211_pid.h2
-rw-r--r--net/mac80211/rc80211_pid_algo.c53
-rw-r--r--net/mac80211/rx.c312
-rw-r--r--net/mac80211/scan.c937
-rw-r--r--net/mac80211/spectmgmt.c86
-rw-r--r--net/mac80211/sta_info.c89
-rw-r--r--net/mac80211/sta_info.h28
-rw-r--r--net/mac80211/tkip.c2
-rw-r--r--net/mac80211/tx.c233
-rw-r--r--net/mac80211/util.c365
-rw-r--r--net/mac80211/wep.c14
-rw-r--r--net/mac80211/wext.c165
-rw-r--r--net/mac80211/wme.c6
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/wpa.c4
-rw-r--r--net/netfilter/nf_conntrack_irc.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c14
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/xt_time.c6
-rw-r--r--net/phonet/Kconfig16
-rw-r--r--net/phonet/Makefile9
-rw-r--r--net/phonet/af_phonet.c468
-rw-r--r--net/phonet/datagram.c197
-rw-r--r--net/phonet/pn_dev.c208
-rw-r--r--net/phonet/pn_netlink.c186
-rw-r--r--net/phonet/socket.c312
-rw-r--r--net/phonet/sysctl.c113
-rw-r--r--net/rfkill/rfkill-input.h1
-rw-r--r--net/rfkill/rfkill.c254
-rw-r--r--net/sched/Kconfig20
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_skbedit.c203
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_flow.c28
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_cmp.c9
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_dsmark.c8
-rw-r--r--net/sched/sch_generic.c30
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_multiq.c477
-rw-r--r--net/sched/sch_netem.c20
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/sctp/ulpqueue.c5
-rw-r--r--net/sunrpc/sysctl.c18
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c8
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
-rw-r--r--net/tipc/bcast.c22
-rw-r--r--net/tipc/bcast.h22
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/cluster.c16
-rw-r--r--net/tipc/cluster.h10
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c26
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/name_table.h2
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/node.c60
-rw-r--r--net/tipc/node.h42
-rw-r--r--net/tipc/node_subscr.c4
-rw-r--r--net/tipc/node_subscr.h10
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/zone.c4
-rw-r--r--net/tipc/zone.h2
-rw-r--r--net/wireless/Kconfig35
-rw-r--r--net/wireless/core.c171
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c235
-rw-r--r--net/wireless/reg.c805
-rw-r--r--net/wireless/reg.h44
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c106
-rw-r--r--security/device_cgroup.c18
-rw-r--r--security/selinux/ss/services.c6
-rw-r--r--sound/Kconfig4
-rw-r--r--sound/pci/hda/patch_realtek.c24
-rw-r--r--sound/pci/hda/patch_sigmatel.c5
-rw-r--r--sound/pci/oxygen/hifier.c4
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c40
919 files changed, 72749 insertions, 33076 deletions
diff --git a/Documentation/ABI/testing/sysfs-firmware-sgi_uv b/Documentation/ABI/testing/sysfs-firmware-sgi_uv
new file mode 100644
index 000000000000..4573fd4b7876
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-sgi_uv
@@ -0,0 +1,27 @@
1What: /sys/firmware/sgi_uv/
2Date: August 2008
3Contact: Russ Anderson <rja@sgi.com>
4Description:
5 The /sys/firmware/sgi_uv directory contains information
6 about the SGI UV platform.
7
8 Under that directory are a number of files:
9
10 partition_id
11 coherence_id
12
13 The partition_id entry contains the partition id.
14 SGI UV systems can be partitioned into multiple physical
15 machines, which each partition running a unique copy
16 of the operating system. Each partition will have a unique
17 partition id. To display the partition id, use the command:
18
19 cat /sys/firmware/sgi_uv/partition_id
20
21 The coherence_id entry contains the coherence id.
22 A partitioned SGI UV system can have one or more coherence
23 domain. The coherence id indicates which coherence domain
24 this partition is in. To display the coherence id, use the
25 command:
26
27 cat /sys/firmware/sgi_uv/coherence_id
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index b651e0a4b1c0..77c3c202991b 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -145,7 +145,6 @@ usage should require reading the full document.
145 this though and the recommendation to allow only a single 145 this though and the recommendation to allow only a single
146 interface in STA mode at first! 146 interface in STA mode at first!
147 </para> 147 </para>
148!Finclude/net/mac80211.h ieee80211_if_types
149!Finclude/net/mac80211.h ieee80211_if_init_conf 148!Finclude/net/mac80211.h ieee80211_if_init_conf
150!Finclude/net/mac80211.h ieee80211_if_conf 149!Finclude/net/mac80211.h ieee80211_if_conf
151 </chapter> 150 </chapter>
@@ -177,8 +176,7 @@ usage should require reading the full document.
177 <title>functions/definitions</title> 176 <title>functions/definitions</title>
178!Finclude/net/mac80211.h ieee80211_rx_status 177!Finclude/net/mac80211.h ieee80211_rx_status
179!Finclude/net/mac80211.h mac80211_rx_flags 178!Finclude/net/mac80211.h mac80211_rx_flags
180!Finclude/net/mac80211.h ieee80211_tx_control 179!Finclude/net/mac80211.h ieee80211_tx_info
181!Finclude/net/mac80211.h ieee80211_tx_status_flags
182!Finclude/net/mac80211.h ieee80211_rx 180!Finclude/net/mac80211.h ieee80211_rx
183!Finclude/net/mac80211.h ieee80211_rx_irqsafe 181!Finclude/net/mac80211.h ieee80211_rx_irqsafe
184!Finclude/net/mac80211.h ieee80211_tx_status 182!Finclude/net/mac80211.h ieee80211_tx_status
@@ -189,12 +187,11 @@ usage should require reading the full document.
189!Finclude/net/mac80211.h ieee80211_ctstoself_duration 187!Finclude/net/mac80211.h ieee80211_ctstoself_duration
190!Finclude/net/mac80211.h ieee80211_generic_frame_duration 188!Finclude/net/mac80211.h ieee80211_generic_frame_duration
191!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb 189!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
192!Finclude/net/mac80211.h ieee80211_get_hdrlen 190!Finclude/net/mac80211.h ieee80211_hdrlen
193!Finclude/net/mac80211.h ieee80211_wake_queue 191!Finclude/net/mac80211.h ieee80211_wake_queue
194!Finclude/net/mac80211.h ieee80211_stop_queue 192!Finclude/net/mac80211.h ieee80211_stop_queue
195!Finclude/net/mac80211.h ieee80211_start_queues
196!Finclude/net/mac80211.h ieee80211_stop_queues
197!Finclude/net/mac80211.h ieee80211_wake_queues 193!Finclude/net/mac80211.h ieee80211_wake_queues
194!Finclude/net/mac80211.h ieee80211_stop_queues
198 </sect1> 195 </sect1>
199 </chapter> 196 </chapter>
200 197
@@ -230,8 +227,7 @@ usage should require reading the full document.
230 <title>Multiple queues and QoS support</title> 227 <title>Multiple queues and QoS support</title>
231 <para>TBD</para> 228 <para>TBD</para>
232!Finclude/net/mac80211.h ieee80211_tx_queue_params 229!Finclude/net/mac80211.h ieee80211_tx_queue_params
233!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data 230!Finclude/net/mac80211.h ieee80211_tx_queue_stats
234!Finclude/net/mac80211.h ieee80211_tx_queue
235 </chapter> 231 </chapter>
236 232
237 <chapter id="AP"> 233 <chapter id="AP">
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 881e6dd03aea..27809357da58 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -5,6 +5,8 @@
5*.css 5*.css
6*.dvi 6*.dvi
7*.eps 7*.eps
8*.fw.gen.S
9*.fw
8*.gif 10*.gif
9*.grep 11*.grep
10*.grp 12*.grp
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index eb1a47b97427..c93fcdec246d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,24 @@ be removed from this file.
6 6
7--------------------------- 7---------------------------
8 8
9What: old static regulatory information and ieee80211_regdom module parameter
10When: 2.6.29
11Why: The old regulatory infrastructure has been replaced with a new one
12 which does not require statically defined regulatory domains. We do
13 not want to keep static regulatory domains in the kernel due to the
14 the dynamic nature of regulatory law and localization. We kept around
15 the old static definitions for the regulatory domains of:
16 * US
17 * JP
18 * EU
19 and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
20 set. We also kept around the ieee80211_regdom module parameter in case
21 some applications were relying on it. Changing regulatory domains
22 can now be done instead by using nl80211, as is done with iw.
23Who: Luis R. Rodriguez <lrodriguez@atheros.com>
24
25---------------------------
26
9What: dev->power.power_state 27What: dev->power.power_state
10When: July 2007 28When: July 2007
11Why: Broken design for runtime control over driver power states, confusing 29Why: Broken design for runtime control over driver power states, confusing
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index e79ee2db183a..ac2a261c5f7d 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -40,7 +40,7 @@ Web site
40======== 40========
41 41
42There is plenty of additional information on the linux-ntfs web site 42There is plenty of additional information on the linux-ntfs web site
43at http://linux-ntfs.sourceforge.net/ 43at http://www.linux-ntfs.org/
44 44
45The web site has a lot of additional information, such as a comprehensive 45The web site has a lot of additional information, such as a comprehensive
46FAQ, documentation on the NTFS on-disk format, information on the Linux-NTFS 46FAQ, documentation on the NTFS on-disk format, information on the Linux-NTFS
@@ -272,7 +272,7 @@ And you would know that /dev/hda2 has a size of 37768814 - 4209030 + 1 =
272For Win2k and later dynamic disks, you can for example use the ldminfo utility 272For Win2k and later dynamic disks, you can for example use the ldminfo utility
273which is part of the Linux LDM tools (the latest version at the time of 273which is part of the Linux LDM tools (the latest version at the time of
274writing is linux-ldm-0.0.8.tar.bz2). You can download it from: 274writing is linux-ldm-0.0.8.tar.bz2). You can download it from:
275 http://linux-ntfs.sourceforge.net/downloads.html 275 http://www.linux-ntfs.org/
276Simply extract the downloaded archive (tar xvjf linux-ldm-0.0.8.tar.bz2), go 276Simply extract the downloaded archive (tar xvjf linux-ldm-0.0.8.tar.bz2), go
277into it (cd linux-ldm-0.0.8) and change to the test directory (cd test). You 277into it (cd linux-ldm-0.0.8) and change to the test directory (cd test). You
278will find the precompiled (i386) ldminfo utility there. NOTE: You will not be 278will find the precompiled (i386) ldminfo utility there. NOTE: You will not be
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 64557821ee59..394eb2cc1c39 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1339,6 +1339,25 @@ Enables/Disables the protection of the per-process proc entries "maps" and
1339"smaps". When enabled, the contents of these files are visible only to 1339"smaps". When enabled, the contents of these files are visible only to
1340readers that are allowed to ptrace() the given process. 1340readers that are allowed to ptrace() the given process.
1341 1341
1342msgmni
1343------
1344
1345Maximum number of message queue ids on the system.
1346This value scales to the amount of lowmem. It is automatically recomputed
1347upon memory add/remove or ipc namespace creation/removal.
1348When a value is written into this file, msgmni's value becomes fixed, i.e. it
1349is not recomputed anymore when one of the above events occurs.
1350Use auto_msgmni to change this behavior.
1351
1352auto_msgmni
1353-----------
1354
1355Enables/Disables automatic recomputing of msgmni upon memory add/remove or
1356upon ipc namespace creation/removal (see the msgmni description above).
1357Echoing "1" into this file enables msgmni automatic recomputing.
1358Echoing "0" turns it off.
1359auto_msgmni default value is 1.
1360
1342 1361
13432.4 /proc/sys/vm - The virtual memory subsystem 13622.4 /proc/sys/vm - The virtual memory subsystem
1344----------------------------------------------- 1363-----------------------------------------------
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 02dc748b76c4..71f0fe1fc1b0 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -44,7 +44,7 @@ detailed description):
44 - LCD brightness control 44 - LCD brightness control
45 - Volume control 45 - Volume control
46 - Fan control and monitoring: fan speed, fan enable/disable 46 - Fan control and monitoring: fan speed, fan enable/disable
47 - Experimental: WAN enable and disable 47 - WAN enable and disable
48 48
49A compatibility table by model and feature is maintained on the web 49A compatibility table by model and feature is maintained on the web
50site, http://ibm-acpi.sf.net/. I appreciate any success or failure 50site, http://ibm-acpi.sf.net/. I appreciate any success or failure
@@ -1375,18 +1375,13 @@ with EINVAL, try to set pwm1_enable to 1 and pwm1 to at least 128 (255
1375would be the safest choice, though). 1375would be the safest choice, though).
1376 1376
1377 1377
1378EXPERIMENTAL: WAN 1378WAN
1379----------------- 1379---
1380 1380
1381procfs: /proc/acpi/ibm/wan 1381procfs: /proc/acpi/ibm/wan
1382sysfs device attribute: wwan_enable (deprecated) 1382sysfs device attribute: wwan_enable (deprecated)
1383sysfs rfkill class: switch "tpacpi_wwan_sw" 1383sysfs rfkill class: switch "tpacpi_wwan_sw"
1384 1384
1385This feature is marked EXPERIMENTAL because the implementation
1386directly accesses hardware registers and may not work as expected. USE
1387WITH CAUTION! To use this feature, you need to supply the
1388experimental=1 parameter when loading the module.
1389
1390This feature shows the presence and current state of a W-WAN (Sierra 1385This feature shows the presence and current state of a W-WAN (Sierra
1391Wireless EV-DO) device. 1386Wireless EV-DO) device.
1392 1387
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
new file mode 100644
index 000000000000..123b6edd7f18
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlge
@@ -0,0 +1,46 @@
1Copyright (c) 2003-2008 QLogic Corporation
2QLogic Linux Networking HBA Driver
3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the
7GNU General Public License as published by the Free Software
8Foundation (version 2 or a later version).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 297ba7b1ccaf..2035bc4932f2 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -35,8 +35,9 @@ This file contains
35 6.1 general settings 35 6.1 general settings
36 6.2 local loopback of sent frames 36 6.2 local loopback of sent frames
37 6.3 CAN controller hardware filters 37 6.3 CAN controller hardware filters
38 6.4 currently supported CAN hardware 38 6.4 The virtual CAN driver (vcan)
39 6.5 todo 39 6.5 currently supported CAN hardware
40 6.6 todo
40 41
41 7 Credits 42 7 Credits
42 43
@@ -584,7 +585,42 @@ solution for a couple of reasons:
584 @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus 585 @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
585 load without any problems ... 586 load without any problems ...
586 587
587 6.4 currently supported CAN hardware (September 2007) 588 6.4 The virtual CAN driver (vcan)
589
590 Similar to the network loopback devices, vcan offers a virtual local
591 CAN interface. A full qualified address on CAN consists of
592
593 - a unique CAN Identifier (CAN ID)
594 - the CAN bus this CAN ID is transmitted on (e.g. can0)
595
596 so in common use cases more than one virtual CAN interface is needed.
597
598 The virtual CAN interfaces allow the transmission and reception of CAN
599 frames without real CAN controller hardware. Virtual CAN network
600 devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
601 When compiled as a module the virtual CAN driver module is called vcan.ko
602
603 Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
604 netlink interface to create vcan network devices. The creation and
605 removal of vcan network devices can be managed with the ip(8) tool:
606
607 - Create a virtual CAN network interface:
608 ip link add type vcan
609
610 - Create a virtual CAN network interface with a specific name 'vcan42':
611 ip link add dev vcan42 type vcan
612
613 - Remove a (virtual CAN) network interface 'vcan42':
614 ip link del vcan42
615
616 The tool 'vcan' from the SocketCAN SVN repository on BerliOS is obsolete.
617
618 Virtual CAN network device creation in older Kernels:
619 In Linux Kernel versions < 2.6.24 the vcan driver creates 4 vcan
620 netdevices at module load time by default. This value can be changed
621 with the module parameter 'numdev'. E.g. 'modprobe vcan numdev=8'
622
623 6.5 currently supported CAN hardware
588 624
589 On the project website http://developer.berlios.de/projects/socketcan 625 On the project website http://developer.berlios.de/projects/socketcan
590 there are different drivers available: 626 there are different drivers available:
@@ -603,7 +639,7 @@ solution for a couple of reasons:
603 639
604 Please check the Mailing Lists on the berlios OSS project website. 640 Please check the Mailing Lists on the berlios OSS project website.
605 641
606 6.5 todo (September 2007) 642 6.6 todo
607 643
608 The configuration interface for CAN network drivers is still an open 644 The configuration interface for CAN network drivers is still an open
609 issue that has not been finalized in the socketcan project. Also the 645 issue that has not been finalized in the socketcan project. Also the
diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
index d391ea631141..4caa0e314cc2 100644
--- a/Documentation/networking/multiqueue.txt
+++ b/Documentation/networking/multiqueue.txt
@@ -24,4 +24,56 @@ netif_{start|stop|wake}_subqueue() functions to manage each queue while the
24device is still operational. netdev->queue_lock is still used when the device 24device is still operational. netdev->queue_lock is still used when the device
25comes online or when it's completely shut down (unregister_netdev(), etc.). 25comes online or when it's completely shut down (unregister_netdev(), etc.).
26 26
27Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com> 27
28Section 2: Qdisc support for multiqueue devices
29
30-----------------------------------------------
31
32Currently two qdiscs are optimized for multiqueue devices. The first is the
33default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue.
34A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
35qdisc is responsible for classifying the skb's and then directing the skb's to
36bands and queues based on the value in skb->queue_mapping. Use this field in
37the base driver to determine which queue to send the skb to.
38
39sch_multiq has been added for hardware that wishes to avoid head-of-line
40blocking. It will cycle though the bands and verify that the hardware queue
41associated with the band is not stopped prior to dequeuing a packet.
42
43On qdisc load, the number of bands is based on the number of queues on the
44hardware. Once the association is made, any skb with skb->queue_mapping set,
45will be queued to the band associated with the hardware queue.
46
47
48Section 3: Brief howto using MULTIQ for multiqueue devices
49---------------------------------------------------------------
50
51The userspace command 'tc,' part of the iproute2 package, is used to configure
52qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
53is called eth0, run the following command:
54
55# tc qdisc add dev eth0 root handle 1: multiq
56
57The qdisc will allocate the number of bands to equal the number of queues that
58the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
59queues, the band mapping would look like:
60
61band 0 => queue 0
62band 1 => queue 1
63band 2 => queue 2
64band 3 => queue 3
65
66Traffic will begin flowing through each queue based on either the simple_tx_hash
67function or based on netdev->select_queue() if you have it defined.
68
69The behavior of tc filters remains the same. However a new tc action,
70skbedit, has been added. Assuming you wanted to route all traffic to a
71specific host, for example 192.168.0.3, through a specific queue you could use
72this action and establish a filter such as:
73
74tc filter add dev eth0 parent 1: protocol ip prio 1 u32 \
75 match ip dst 192.168.0.3 \
76 action skbedit queue_mapping 3
77
78Author: Alexander Duyck <alexander.h.duyck@intel.com>
79Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt
new file mode 100644
index 000000000000..f3c72e0ca8d7
--- /dev/null
+++ b/Documentation/networking/phonet.txt
@@ -0,0 +1,111 @@
1Linux Phonet protocol family
2============================
3
4Introduction
5------------
6
7Phonet is a packet protocol used by Nokia cellular modems for both IPC
8and RPC. With the Linux Phonet socket family, Linux host processes can
9receive and send messages from/to the modem, or any other external
10device attached to the modem. The modem takes care of routing.
11
12Phonet packets can be exchanged through various hardware connections
13depending on the device, such as:
14 - USB with the CDC Phonet interface,
15 - infrared,
16 - Bluetooth,
17 - an RS232 serial port (with a dedicated "FBUS" line discipline),
18 - the SSI bus with some TI OMAP processors.
19
20
21Packets format
22--------------
23
24Phonet packet have a common header as follow:
25
26 struct phonethdr {
27 uint8_t pn_media; /* Media type (link-layer identifier) */
28 uint8_t pn_rdev; /* Receiver device ID */
29 uint8_t pn_sdev; /* Sender device ID */
30 uint8_t pn_res; /* Resource ID or function */
31 uint16_t pn_length; /* Big-endian message byte length (minus 6) */
32 uint8_t pn_robj; /* Receiver object ID */
33 uint8_t pn_sobj; /* Sender object ID */
34 };
35
36The device ID is split: the 6 higher order bits consitutes the device
37address, while the 2 lower order bits are used for multiplexing, as are
38the 8-bits object identifiers. As such, Phonet can be considered as a
39network layer with 6 bits of address space and 10 bits for transport
40protocol (much like port numbers in IP world).
41
42The modem always has address number zero. Each other device has a its
43own 6-bits address.
44
45
46Link layer
47----------
48
49Phonet links are always point-to-point links. The link layer header
50consists of a single Phonet media type byte. It uniquely identifies the
51link through which the packet is transmitted, from the modem's
52perspective.
53
54Linux Phonet network interfaces use a dedicated link layer type
55(ETH_P_PHONET) which is out of the Ethernet type range. They can only
56send and receive Phonet packets.
57
58Note that Phonet interfaces are not allowed to re-order packets, so
59only the (default) Linux FIFO qdisc should be used with them.
60
61
62Network layer
63-------------
64
65The Phonet socket address family maps the Phonet packet header:
66
67 struct sockaddr_pn {
68 sa_family_t spn_family; /* AF_PHONET */
69 uint8_t spn_obj; /* Object ID */
70 uint8_t spn_dev; /* Device ID */
71 uint8_t spn_resource; /* Resource or function */
72 uint8_t spn_zero[...]; /* Padding */
73 };
74
75The resource field is only used when sending and receiving;
76It is ignored by bind() and getsockname().
77
78
79Low-level datagram protocol
80---------------------------
81
82Applications can send Phonet messages using the Phonet datagram socket
83protocol from the PF_PHONET family. Each socket is bound to one of the
842^10 object IDs available, and can send and receive packets with any
85other peer.
86
87 struct sockaddr_pn addr = { .spn_family = AF_PHONET, };
88 ssize_t len;
89 socklen_t addrlen = sizeof(addr);
90 int fd;
91
92 fd = socket(PF_PHONET, SOCK_DGRAM, 0);
93 bind(fd, (struct sockaddr *)&addr, sizeof(addr));
94 /* ... */
95
96 sendto(fd, msg, msglen, 0, (struct sockaddr *)&addr, sizeof(addr));
97 len = recvfrom(fd, buf, sizeof(buf), 0,
98 (struct sockaddr *)&addr, &addrlen);
99
100This protocol follows the SOCK_DGRAM connection-less semantics.
101However, connect() and getpeername() are not supported, as they did
102not seem useful with Phonet usages (could be added easily).
103
104
105Authors
106-------
107
108Linux Phonet was initially written by Sakari Ailus.
109Other contributors include Mikä Liljeberg, Andras Domokos,
110Carlos Chinea and Rémi Denis-Courmont.
111Copyright (C) 2008 Nokia Corporation.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
new file mode 100644
index 000000000000..a96989a8ff35
--- /dev/null
+++ b/Documentation/networking/regulatory.txt
@@ -0,0 +1,194 @@
1Linux wireless regulatory documentation
2---------------------------------------
3
4This document gives a brief review over how the Linux wireless
5regulatory infrastructure works.
6
7More up to date information can be obtained at the project's web page:
8
9http://wireless.kernel.org/en/developers/Regulatory
10
11Keeping regulatory domains in userspace
12---------------------------------------
13
14Due to the dynamic nature of regulatory domains we keep them
15in userspace and provide a framework for userspace to upload
16to the kernel one regulatory domain to be used as the central
17core regulatory domain all wireless devices should adhere to.
18
19How to get regulatory domains to the kernel
20-------------------------------------------
21
22Userspace gets a regulatory domain in the kernel by having
23a userspace agent build it and send it via nl80211. Only
24expected regulatory domains will be respected by the kernel.
25
26A currently available userspace agent which can accomplish this
27is CRDA - central regulatory domain agent. Its documented here:
28
29http://wireless.kernel.org/en/developers/Regulatory/CRDA
30
31Essentially the kernel will send a udev event when it knows
32it needs a new regulatory domain. A udev rule can be put in place
33to trigger crda to send the respective regulatory domain for a
34specific ISO/IEC 3166 alpha2.
35
36Below is an example udev rule which can be used:
37
38# Example file, should be put in /etc/udev/rules.d/regulatory.rules
39KERNEL=="regulatory*", ACTION=="change", SUBSYSTEM=="platform", RUN+="/sbin/crda"
40
41The alpha2 is passed as an environment variable under the variable COUNTRY.
42
43Who asks for regulatory domains?
44--------------------------------
45
46* Users
47
48Users can use iw:
49
50http://wireless.kernel.org/en/users/Documentation/iw
51
52An example:
53
54 # set regulatory domain to "Costa Rica"
55 iw reg set CR
56
57This will request the kernel to set the regulatory domain to
58the specificied alpha2. The kernel in turn will then ask userspace
59to provide a regulatory domain for the alpha2 specified by the user
60by sending a uevent.
61
62* Wireless subsystems for Country Information elements
63
64The kernel will send a uevent to inform userspace a new
65regulatory domain is required. More on this to be added
66as its integration is added.
67
68* Drivers
69
70If drivers determine they need a specific regulatory domain
71set they can inform the wireless core using regulatory_hint().
72They have two options -- they either provide an alpha2 so that
73crda can provide back a regulatory domain for that country or
74they can build their own regulatory domain based on internal
75custom knowledge so the wireless core can respect it.
76
77*Most* drivers will rely on the first mechanism of providing a
78regulatory hint with an alpha2. For these drivers there is an additional
79check that can be used to ensure compliance based on custom EEPROM
80regulatory data. This additional check can be used by drivers by
81registering on its struct wiphy a reg_notifier() callback. This notifier
82is called when the core's regulatory domain has been changed. The driver
83can use this to review the changes made and also review who made them
84(driver, user, country IE) and determine what to allow based on its
85internal EEPROM data. Devices drivers wishing to be capable of world
86roaming should use this callback. More on world roaming will be
87added to this document when its support is enabled.
88
89Device drivers who provide their own built regulatory domain
90do not need a callback as the channels registered by them are
91the only ones that will be allowed and therefore *additional*
92cannels cannot be enabled.
93
94Example code - drivers hinting an alpha2:
95------------------------------------------
96
97This example comes from the zd1211rw device driver. You can start
98by having a mapping of your device's EEPROM country/regulatory
99domain value to to a specific alpha2 as follows:
100
101static struct zd_reg_alpha2_map reg_alpha2_map[] = {
102 { ZD_REGDOMAIN_FCC, "US" },
103 { ZD_REGDOMAIN_IC, "CA" },
104 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
105 { ZD_REGDOMAIN_JAPAN, "JP" },
106 { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
107 { ZD_REGDOMAIN_SPAIN, "ES" },
108 { ZD_REGDOMAIN_FRANCE, "FR" },
109
110Then you can define a routine to map your read EEPROM value to an alpha2,
111as follows:
112
113static int zd_reg2alpha2(u8 regdomain, char *alpha2)
114{
115 unsigned int i;
116 struct zd_reg_alpha2_map *reg_map;
117 for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
118 reg_map = &reg_alpha2_map[i];
119 if (regdomain == reg_map->reg) {
120 alpha2[0] = reg_map->alpha2[0];
121 alpha2[1] = reg_map->alpha2[1];
122 return 0;
123 }
124 }
125 return 1;
126}
127
128Lastly, you can then hint to the core of your discovered alpha2, if a match
129was found. You need to do this after you have registered your wiphy. You
130are expected to do this during initialization.
131
132 r = zd_reg2alpha2(mac->regdomain, alpha2);
133 if (!r)
134 regulatory_hint(hw->wiphy, alpha2, NULL);
135
136Example code - drivers providing a built in regulatory domain:
137--------------------------------------------------------------
138
139If you have regulatory information you can obtain from your
140driver and you *need* to use this we let you build a regulatory domain
141structure and pass it to the wireless core. To do this you should
142kmalloc() a structure big enough to hold your regulatory domain
143structure and you should then fill it with your data. Finally you simply
144call regulatory_hint() with the regulatory domain structure in it.
145
146Bellow is a simple example, with a regulatory domain cached using the stack.
147Your implementation may vary (read EEPROM cache instead, for example).
148
149Example cache of some regulatory domain
150
151struct ieee80211_regdomain mydriver_jp_regdom = {
152 .n_reg_rules = 3,
153 .alpha2 = "JP",
154 //.alpha2 = "99", /* If I have no alpha2 to map it to */
155 .reg_rules = {
156 /* IEEE 802.11b/g, channels 1..14 */
157 REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
158 /* IEEE 802.11a, channels 34..48 */
159 REG_RULE(5170-20, 5240+20, 40, 6, 20,
160 NL80211_RRF_PASSIVE_SCAN),
161 /* IEEE 802.11a, channels 52..64 */
162 REG_RULE(5260-20, 5320+20, 40, 6, 20,
163 NL80211_RRF_NO_IBSS |
164 NL80211_RRF_DFS),
165 }
166};
167
168Then in some part of your code after your wiphy has been registered:
169
170 int r;
171 struct ieee80211_regdomain *rd;
172 int size_of_regd;
173 int num_rules = mydriver_jp_regdom.n_reg_rules;
174 unsigned int i;
175
176 size_of_regd = sizeof(struct ieee80211_regdomain) +
177 (num_rules * sizeof(struct ieee80211_reg_rule));
178
179 rd = kzalloc(size_of_regd, GFP_KERNEL);
180 if (!rd)
181 return -ENOMEM;
182
183 memcpy(rd, &mydriver_jp_regdom, sizeof(struct ieee80211_regdomain));
184
185 for (i=0; i < num_rules; i++) {
186 memcpy(&rd->reg_rules[i], &mydriver_jp_regdom.reg_rules[i],
187 sizeof(struct ieee80211_reg_rule));
188 }
189 r = regulatory_hint(hw->wiphy, NULL, rd);
190 if (r) {
191 kfree(rd);
192 return r;
193 }
194
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 6fcb3060dec5..b65f0799df48 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -341,6 +341,8 @@ key that does nothing by itself, as well as any hot key that is type-specific
3413.1 Guidelines for wireless device drivers 3413.1 Guidelines for wireless device drivers
342------------------------------------------ 342------------------------------------------
343 343
344(in this text, rfkill->foo means the foo field of struct rfkill).
345
3441. Each independent transmitter in a wireless device (usually there is only one 3461. Each independent transmitter in a wireless device (usually there is only one
345transmitter per device) should have a SINGLE rfkill class attached to it. 347transmitter per device) should have a SINGLE rfkill class attached to it.
346 348
@@ -363,10 +365,32 @@ This rule exists because users of the rfkill subsystem expect to get (and set,
363when possible) the overall transmitter rfkill state, not of a particular rfkill 365when possible) the overall transmitter rfkill state, not of a particular rfkill
364line. 366line.
365 367
3665. During suspend, the rfkill class will attempt to soft-block the radio 3685. The wireless device driver MUST NOT leave the transmitter enabled during
367through a call to rfkill->toggle_radio, and will try to restore its previous 369suspend and hibernation unless:
368state during resume. After a rfkill class is suspended, it will *not* call 370
369rfkill->toggle_radio until it is resumed. 371 5.1. The transmitter has to be enabled for some sort of functionality
372 like wake-on-wireless-packet or autonomous packed forwarding in a mesh
373 network, and that functionality is enabled for this suspend/hibernation
374 cycle.
375
376AND
377
378 5.2. The device was not on a user-requested BLOCKED state before
379 the suspend (i.e. the driver must NOT unblock a device, not even
380 to support wake-on-wireless-packet or remain in the mesh).
381
382In other words, there is absolutely no allowed scenario where a driver can
383automatically take action to unblock a rfkill controller (obviously, this deals
384with scenarios where soft-blocking or both soft and hard blocking is happening.
385Scenarios where hardware rfkill lines are the only ones blocking the
386transmitter are outside of this rule, since the wireless device driver does not
387control its input hardware rfkill lines in the first place).
388
3896. During resume, rfkill will try to restore its previous state.
390
3917. After a rfkill class is suspended, it will *not* call rfkill->toggle_radio
392until it is resumed.
393
370 394
371Example of a WLAN wireless driver connected to the rfkill subsystem: 395Example of a WLAN wireless driver connected to the rfkill subsystem:
372-------------------------------------------------------------------- 396--------------------------------------------------------------------
diff --git a/Documentation/video4linux/CARDLIST.au0828 b/Documentation/video4linux/CARDLIST.au0828
index eedc399e8deb..aa05e5bb22fb 100644
--- a/Documentation/video4linux/CARDLIST.au0828
+++ b/Documentation/video4linux/CARDLIST.au0828
@@ -3,3 +3,4 @@
3 2 -> Hauppauge HVR850 (au0828) [2040:7240] 3 2 -> Hauppauge HVR850 (au0828) [2040:7240]
4 3 -> DViCO FusionHDTV USB (au0828) [0fe9:d620] 4 3 -> DViCO FusionHDTV USB (au0828) [0fe9:d620]
5 4 -> Hauppauge HVR950Q rev xxF8 (au0828) [2040:7201,2040:7211,2040:7281] 5 4 -> Hauppauge HVR950Q rev xxF8 (au0828) [2040:7201,2040:7211,2040:7281]
6 5 -> Hauppauge Woodbury (au0828) [2040:8200]
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 78a863ab8a5a..0f03900c48fb 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -88,14 +88,14 @@ zc3xx 0471:0325 Philips SPC 200 NC
88zc3xx 0471:0326 Philips SPC 300 NC 88zc3xx 0471:0326 Philips SPC 300 NC
89sonixj 0471:0327 Philips SPC 600 NC 89sonixj 0471:0327 Philips SPC 600 NC
90sonixj 0471:0328 Philips SPC 700 NC 90sonixj 0471:0328 Philips SPC 700 NC
91zc3xx 0471:032d Philips spc210nc 91zc3xx 0471:032d Philips SPC 210 NC
92zc3xx 0471:032e Philips spc315nc 92zc3xx 0471:032e Philips SPC 315 NC
93sonixj 0471:0330 Philips SPC 710NC 93sonixj 0471:0330 Philips SPC 710 NC
94spca501 0497:c001 Smile International 94spca501 0497:c001 Smile International
95sunplus 04a5:3003 Benq DC 1300 95sunplus 04a5:3003 Benq DC 1300
96sunplus 04a5:3008 Benq DC 1500 96sunplus 04a5:3008 Benq DC 1500
97sunplus 04a5:300a Benq DC3410 97sunplus 04a5:300a Benq DC 3410
98spca500 04a5:300c Benq DC1016 98spca500 04a5:300c Benq DC 1016
99sunplus 04f1:1001 JVC GC A50 99sunplus 04f1:1001 JVC GC A50
100spca561 04fc:0561 Flexcam 100 100spca561 04fc:0561 Flexcam 100
101sunplus 04fc:500c Sunplus CA500C 101sunplus 04fc:500c Sunplus CA500C
@@ -175,19 +175,21 @@ sunplus 08ca:2060 Aiptek PocketDV5300
175tv8532 0923:010f ICM532 cams 175tv8532 0923:010f ICM532 cams
176mars 093a:050f Mars-Semi Pc-Camera 176mars 093a:050f Mars-Semi Pc-Camera
177pac207 093a:2460 PAC207 Qtec Webcam 100 177pac207 093a:2460 PAC207 Qtec Webcam 100
178pac207 093a:2463 Philips spc200nc pac207 178pac207 093a:2463 Philips SPC 220 NC
179pac207 093a:2464 Labtec Webcam 1200 179pac207 093a:2464 Labtec Webcam 1200
180pac207 093a:2468 PAC207 180pac207 093a:2468 PAC207
181pac207 093a:2470 Genius GF112 181pac207 093a:2470 Genius GF112
182pac207 093a:2471 PAC207 Genius VideoCam ge111 182pac207 093a:2471 Genius VideoCam ge111
183pac207 093a:2472 PAC207 Genius VideoCam ge110 183pac207 093a:2472 Genius VideoCam ge110
184pac7311 093a:2600 PAC7311 Typhoon 184pac7311 093a:2600 PAC7311 Typhoon
185pac7311 093a:2601 PAC7311 Phillips SPC610NC 185pac7311 093a:2601 Philips SPC 610 NC
186pac7311 093a:2603 PAC7312 186pac7311 093a:2603 PAC7312
187pac7311 093a:2608 PAC7311 Trust WB-3300p 187pac7311 093a:2608 Trust WB-3300p
188pac7311 093a:260e PAC7311 Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350 188pac7311 093a:260e Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
189pac7311 093a:260f PAC7311 SnakeCam 189pac7311 093a:260f SnakeCam
190pac7311 093a:2621 PAC731x 190pac7311 093a:2621 PAC731x
191pac7311 093a:2624 PAC7302
192pac7311 093a:2626 Labtec 2200
191zc3xx 0ac8:0302 Z-star Vimicro zc0302 193zc3xx 0ac8:0302 Z-star Vimicro zc0302
192vc032x 0ac8:0321 Vimicro generic vc0321 194vc032x 0ac8:0321 Vimicro generic vc0321
193vc032x 0ac8:0323 Vimicro Vc0323 195vc032x 0ac8:0323 Vimicro Vc0323
@@ -220,6 +222,7 @@ sonixj 0c45:60c0 Sangha Sn535
220sonixj 0c45:60ec SN9C105+MO4000 222sonixj 0c45:60ec SN9C105+MO4000
221sonixj 0c45:60fb Surfer NoName 223sonixj 0c45:60fb Surfer NoName
222sonixj 0c45:60fc LG-LIC300 224sonixj 0c45:60fc LG-LIC300
225sonixj 0c45:6128 Microdia/Sonix SNP325
223sonixj 0c45:612a Avant Camera 226sonixj 0c45:612a Avant Camera
224sonixj 0c45:612c Typhoon Rasy Cam 1.3MPix 227sonixj 0c45:612c Typhoon Rasy Cam 1.3MPix
225sonixj 0c45:6130 Sonix Pccam 228sonixj 0c45:6130 Sonix Pccam
@@ -234,7 +237,7 @@ zc3xx 10fd:0128 Typhoon Webshot II USB 300k 0x0128
234spca561 10fd:7e50 FlyCam Usb 100 237spca561 10fd:7e50 FlyCam Usb 100
235zc3xx 10fd:8050 Typhoon Webshot II USB 300k 238zc3xx 10fd:8050 Typhoon Webshot II USB 300k
236spca501 1776:501c Arowana 300K CMOS Camera 239spca501 1776:501c Arowana 300K CMOS Camera
237t613 17a1:0128 T613/TAS5130A 240t613 17a1:0128 TASCORP JPEG Webcam, NGS Cyclops
238vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC 241vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC
239pac207 2001:f115 D-Link DSB-C120 242pac207 2001:f115 D-Link DSB-C120
240spca500 2899:012c Toptro Industrial 243spca500 2899:012c Toptro Industrial
diff --git a/MAINTAINERS b/MAINTAINERS
index c4ca99cf80df..c29b420fc1ca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -222,8 +222,7 @@ W: http://code.google.com/p/aceracpi
222S: Maintained 222S: Maintained
223 223
224ACPI 224ACPI
225P: Andi Kleen 225P: Len Brown
226M: ak@linux.intel.com
227M: lenb@kernel.org 226M: lenb@kernel.org
228L: linux-acpi@vger.kernel.org 227L: linux-acpi@vger.kernel.org
229W: http://www.lesswatts.org/projects/acpi/ 228W: http://www.lesswatts.org/projects/acpi/
@@ -419,6 +418,12 @@ L: linux-laptop@vger.kernel.org
419W: http://www.canb.auug.org.au/~sfr/ 418W: http://www.canb.auug.org.au/~sfr/
420S: Supported 419S: Supported
421 420
421APPLE BCM5974 MULTITOUCH DRIVER
422P: Henrik Rydberg
423M: rydberg@euromail.se
424L: linux-input@vger.kernel.org
425S: Maintained
426
422APPLE SMC DRIVER 427APPLE SMC DRIVER
423P: Nicolas Boichat 428P: Nicolas Boichat
424M: nicolas@boichat.ch 429M: nicolas@boichat.ch
@@ -745,11 +750,13 @@ P: Ville Syrjala
745M: syrjala@sci.fi 750M: syrjala@sci.fi
746S: Maintained 751S: Maintained
747 752
748ATL1 ETHERNET DRIVER 753ATLX ETHERNET DRIVERS
749P: Jay Cliburn 754P: Jay Cliburn
750M: jcliburn@gmail.com 755M: jcliburn@gmail.com
751P: Chris Snook 756P: Chris Snook
752M: csnook@redhat.com 757M: csnook@redhat.com
758P: Jie Yang
759M: jie.yang@atheros.com
753L: atl1-devel@lists.sourceforge.net 760L: atl1-devel@lists.sourceforge.net
754W: http://sourceforge.net/projects/atl1 761W: http://sourceforge.net/projects/atl1
755W: http://atl1.sourceforge.net 762W: http://atl1.sourceforge.net
@@ -1041,6 +1048,13 @@ L: cbe-oss-dev@ozlabs.org
1041W: http://www.ibm.com/developerworks/power/cell/ 1048W: http://www.ibm.com/developerworks/power/cell/
1042S: Supported 1049S: Supported
1043 1050
1051CISCO 10G ETHERNET DRIVER
1052P: Scott Feldman
1053M: scofeldm@cisco.com
1054P: Joe Eykholt
1055M: jeykholt@cisco.com
1056S: Supported
1057
1044CFAG12864B LCD DRIVER 1058CFAG12864B LCD DRIVER
1045P: Miguel Ojeda Sandonis 1059P: Miguel Ojeda Sandonis
1046M: miguel.ojeda.sandonis@gmail.com 1060M: miguel.ojeda.sandonis@gmail.com
@@ -1588,7 +1602,7 @@ S: Supported
1588EMBEDDED LINUX 1602EMBEDDED LINUX
1589P: Paul Gortmaker 1603P: Paul Gortmaker
1590M: paul.gortmaker@windriver.com 1604M: paul.gortmaker@windriver.com
1591P David Woodhouse 1605P: David Woodhouse
1592M: dwmw2@infradead.org 1606M: dwmw2@infradead.org
1593L: linux-embedded@vger.kernel.org 1607L: linux-embedded@vger.kernel.org
1594S: Maintained 1608S: Maintained
@@ -2314,6 +2328,12 @@ L: video4linux-list@redhat.com
2314W: http://www.ivtvdriver.org 2328W: http://www.ivtvdriver.org
2315S: Maintained 2329S: Maintained
2316 2330
2331JME NETWORK DRIVER
2332P: Guo-Fu Tseng
2333M: cooldavid@cooldavid.org
2334L: netdev@vger.kernel.org
2335S: Maintained
2336
2317JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) 2337JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
2318P: David Woodhouse 2338P: David Woodhouse
2319M: dwmw2@infradead.org 2339M: dwmw2@infradead.org
@@ -3051,7 +3071,7 @@ P: Anton Altaparmakov
3051M: aia21@cantab.net 3071M: aia21@cantab.net
3052L: linux-ntfs-dev@lists.sourceforge.net 3072L: linux-ntfs-dev@lists.sourceforge.net
3053L: linux-kernel@vger.kernel.org 3073L: linux-kernel@vger.kernel.org
3054W: http://linux-ntfs.sf.net/ 3074W: http://www.linux-ntfs.org/
3055T: git kernel.org:/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git 3075T: git kernel.org:/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
3056S: Maintained 3076S: Maintained
3057 3077
@@ -3378,6 +3398,13 @@ M: linux-driver@qlogic.com
3378L: netdev@vger.kernel.org 3398L: netdev@vger.kernel.org
3379S: Supported 3399S: Supported
3380 3400
3401QLOGIC QLGE 10Gb ETHERNET DRIVER
3402P: Ron Mercer
3403M: linux-driver@qlogic.com
3404M: ron.mercer@qlogic.com
3405L: netdev@vger.kernel.org
3406S: Supported
3407
3381QNX4 FILESYSTEM 3408QNX4 FILESYSTEM
3382P: Anders Larsen 3409P: Anders Larsen
3383M: al@alarsen.net 3410M: al@alarsen.net
diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h
index d04a7a2bc2e9..4fbfb22f65a0 100644
--- a/arch/arm/include/asm/byteorder.h
+++ b/arch/arm/include/asm/byteorder.h
@@ -18,15 +18,7 @@
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <asm/types.h> 19#include <asm/types.h>
20 20
21#ifdef __ARMEB__ 21static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
22# define __BIG_ENDIAN
23#else
24# define __LITTLE_ENDIAN
25#endif
26
27#define __SWAB_64_THRU_32__
28
29static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
30{ 22{
31 __u32 t; 23 __u32 t;
32 24
@@ -48,8 +40,19 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
48 40
49 return x; 41 return x;
50} 42}
51#define __arch_swab32 __arch_swab32
52 43
53#include <linux/byteorder.h> 44#define __arch__swab32(x) ___arch__swab32(x)
45
46#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
47# define __BYTEORDER_HAS_U64__
48# define __SWAB_64_THRU_32__
49#endif
50
51#ifdef __ARMEB__
52#include <linux/byteorder/big_endian.h>
53#else
54#include <linux/byteorder/little_endian.h>
55#endif
54 56
55#endif 57#endif
58
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 94a95d7fafd6..71934856fc22 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -61,8 +61,9 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
61#define MT_DEVICE_NONSHARED 1 61#define MT_DEVICE_NONSHARED 1
62#define MT_DEVICE_CACHED 2 62#define MT_DEVICE_CACHED 2
63#define MT_DEVICE_IXP2000 3 63#define MT_DEVICE_IXP2000 3
64#define MT_DEVICE_WC 4
64/* 65/*
65 * types 4 onwards can be found in asm/mach/map.h and are undefined 66 * types 5 onwards can be found in asm/mach/map.h and are undefined
66 * for ioremap 67 * for ioremap
67 */ 68 */
68 69
@@ -215,11 +216,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
215#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 216#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
216#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 217#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
217#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) 218#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED)
219#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC)
218#define iounmap(cookie) __iounmap(cookie) 220#define iounmap(cookie) __iounmap(cookie)
219#else 221#else
220#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 222#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
221#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 223#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
222#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) 224#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
225#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
223#define iounmap(cookie) __arch_iounmap(cookie) 226#define iounmap(cookie) __arch_iounmap(cookie)
224#endif 227#endif
225 228
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index 06f583b13999..9eb936e49cc3 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -18,13 +18,13 @@ struct map_desc {
18 unsigned int type; 18 unsigned int type;
19}; 19};
20 20
21/* types 0-3 are defined in asm/io.h */ 21/* types 0-4 are defined in asm/io.h */
22#define MT_CACHECLEAN 4 22#define MT_CACHECLEAN 5
23#define MT_MINICLEAN 5 23#define MT_MINICLEAN 6
24#define MT_LOW_VECTORS 6 24#define MT_LOW_VECTORS 7
25#define MT_HIGH_VECTORS 7 25#define MT_HIGH_VECTORS 8
26#define MT_MEMORY 8 26#define MT_MEMORY 9
27#define MT_ROM 9 27#define MT_ROM 10
28 28
29#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED 29#define MT_NONSHARED_DEVICE MT_DEVICE_NONSHARED
30#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000 30#define MT_IXP2000_DEVICE MT_DEVICE_IXP2000
diff --git a/arch/arm/mach-omap1/mcbsp.c b/arch/arm/mach-omap1/mcbsp.c
index 826010d5d014..2baeaeb0c900 100644
--- a/arch/arm/mach-omap1/mcbsp.c
+++ b/arch/arm/mach-omap1/mcbsp.c
@@ -159,6 +159,7 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
159#ifdef CONFIG_ARCH_OMAP730 159#ifdef CONFIG_ARCH_OMAP730
160static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = { 160static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
161 { 161 {
162 .phys_base = OMAP730_MCBSP1_BASE,
162 .virt_base = io_p2v(OMAP730_MCBSP1_BASE), 163 .virt_base = io_p2v(OMAP730_MCBSP1_BASE),
163 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 164 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
164 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 165 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -167,6 +168,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
167 .ops = &omap1_mcbsp_ops, 168 .ops = &omap1_mcbsp_ops,
168 }, 169 },
169 { 170 {
171 .phys_base = OMAP730_MCBSP2_BASE,
170 .virt_base = io_p2v(OMAP730_MCBSP2_BASE), 172 .virt_base = io_p2v(OMAP730_MCBSP2_BASE),
171 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 173 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
172 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 174 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
@@ -184,6 +186,7 @@ static struct omap_mcbsp_platform_data omap730_mcbsp_pdata[] = {
184#ifdef CONFIG_ARCH_OMAP15XX 186#ifdef CONFIG_ARCH_OMAP15XX
185static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = { 187static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
186 { 188 {
189 .phys_base = OMAP1510_MCBSP1_BASE,
187 .virt_base = OMAP1510_MCBSP1_BASE, 190 .virt_base = OMAP1510_MCBSP1_BASE,
188 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 191 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
189 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 192 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -193,6 +196,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
193 .clk_name = "mcbsp_clk", 196 .clk_name = "mcbsp_clk",
194 }, 197 },
195 { 198 {
199 .phys_base = OMAP1510_MCBSP2_BASE,
196 .virt_base = io_p2v(OMAP1510_MCBSP2_BASE), 200 .virt_base = io_p2v(OMAP1510_MCBSP2_BASE),
197 .dma_rx_sync = OMAP_DMA_MCBSP2_RX, 201 .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
198 .dma_tx_sync = OMAP_DMA_MCBSP2_TX, 202 .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
@@ -201,6 +205,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
201 .ops = &omap1_mcbsp_ops, 205 .ops = &omap1_mcbsp_ops,
202 }, 206 },
203 { 207 {
208 .phys_base = OMAP1510_MCBSP3_BASE,
204 .virt_base = OMAP1510_MCBSP3_BASE, 209 .virt_base = OMAP1510_MCBSP3_BASE,
205 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 210 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
206 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 211 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
@@ -219,6 +224,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
219#ifdef CONFIG_ARCH_OMAP16XX 224#ifdef CONFIG_ARCH_OMAP16XX
220static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = { 225static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
221 { 226 {
227 .phys_base = OMAP1610_MCBSP1_BASE,
222 .virt_base = OMAP1610_MCBSP1_BASE, 228 .virt_base = OMAP1610_MCBSP1_BASE,
223 .dma_rx_sync = OMAP_DMA_MCBSP1_RX, 229 .dma_rx_sync = OMAP_DMA_MCBSP1_RX,
224 .dma_tx_sync = OMAP_DMA_MCBSP1_TX, 230 .dma_tx_sync = OMAP_DMA_MCBSP1_TX,
@@ -228,6 +234,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
228 .clk_name = "mcbsp_clk", 234 .clk_name = "mcbsp_clk",
229 }, 235 },
230 { 236 {
237 .phys_base = OMAP1610_MCBSP2_BASE,
231 .virt_base = io_p2v(OMAP1610_MCBSP2_BASE), 238 .virt_base = io_p2v(OMAP1610_MCBSP2_BASE),
232 .dma_rx_sync = OMAP_DMA_MCBSP2_RX, 239 .dma_rx_sync = OMAP_DMA_MCBSP2_RX,
233 .dma_tx_sync = OMAP_DMA_MCBSP2_TX, 240 .dma_tx_sync = OMAP_DMA_MCBSP2_TX,
@@ -236,6 +243,7 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
236 .ops = &omap1_mcbsp_ops, 243 .ops = &omap1_mcbsp_ops,
237 }, 244 },
238 { 245 {
246 .phys_base = OMAP1610_MCBSP3_BASE,
239 .virt_base = OMAP1610_MCBSP3_BASE, 247 .virt_base = OMAP1610_MCBSP3_BASE,
240 .dma_rx_sync = OMAP_DMA_MCBSP3_RX, 248 .dma_rx_sync = OMAP_DMA_MCBSP3_RX,
241 .dma_tx_sync = OMAP_DMA_MCBSP3_TX, 249 .dma_tx_sync = OMAP_DMA_MCBSP3_TX,
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 27eb6e3ca926..b261f1f80b5e 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -134,6 +134,7 @@ static struct omap_mcbsp_ops omap2_mcbsp_ops = {
134#ifdef CONFIG_ARCH_OMAP24XX 134#ifdef CONFIG_ARCH_OMAP24XX
135static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = { 135static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
136 { 136 {
137 .phys_base = OMAP24XX_MCBSP1_BASE,
137 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE), 138 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP1_BASE),
138 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, 139 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
139 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, 140 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
@@ -143,6 +144,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
143 .clk_name = "mcbsp_clk", 144 .clk_name = "mcbsp_clk",
144 }, 145 },
145 { 146 {
147 .phys_base = OMAP24XX_MCBSP2_BASE,
146 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE), 148 .virt_base = IO_ADDRESS(OMAP24XX_MCBSP2_BASE),
147 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, 149 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
148 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, 150 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
@@ -161,6 +163,7 @@ static struct omap_mcbsp_platform_data omap24xx_mcbsp_pdata[] = {
161#ifdef CONFIG_ARCH_OMAP34XX 163#ifdef CONFIG_ARCH_OMAP34XX
162static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = { 164static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
163 { 165 {
166 .phys_base = OMAP34XX_MCBSP1_BASE,
164 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE), 167 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP1_BASE),
165 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX, 168 .dma_rx_sync = OMAP24XX_DMA_MCBSP1_RX,
166 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX, 169 .dma_tx_sync = OMAP24XX_DMA_MCBSP1_TX,
@@ -170,6 +173,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
170 .clk_name = "mcbsp_clk", 173 .clk_name = "mcbsp_clk",
171 }, 174 },
172 { 175 {
176 .phys_base = OMAP34XX_MCBSP2_BASE,
173 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE), 177 .virt_base = IO_ADDRESS(OMAP34XX_MCBSP2_BASE),
174 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX, 178 .dma_rx_sync = OMAP24XX_DMA_MCBSP2_RX,
175 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX, 179 .dma_tx_sync = OMAP24XX_DMA_MCBSP2_TX,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25d9a11eb617..a713e40e1f1a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -211,6 +211,12 @@ static struct mem_type mem_types[] = {
211 PMD_SECT_TEX(1), 211 PMD_SECT_TEX(1),
212 .domain = DOMAIN_IO, 212 .domain = DOMAIN_IO,
213 }, 213 },
214 [MT_DEVICE_WC] = { /* ioremap_wc */
215 .prot_pte = PROT_PTE_DEVICE,
216 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE,
218 .domain = DOMAIN_IO,
219 },
214 [MT_CACHECLEAN] = { 220 [MT_CACHECLEAN] = {
215 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 221 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
216 .domain = DOMAIN_KERNEL, 222 .domain = DOMAIN_KERNEL,
@@ -273,6 +279,20 @@ static void __init build_mem_type_table(void)
273 } 279 }
274 280
275 /* 281 /*
282 * On non-Xscale3 ARMv5-and-older systems, use CB=01
283 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
284 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
285 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
286 */
287 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
288 mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1);
289 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
290 } else {
291 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE;
292 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
293 }
294
295 /*
276 * ARMv5 and lower, bit 4 must be set for page tables. 296 * ARMv5 and lower, bit 4 must be set for page tables.
277 * (was: cache "update-able on write" bit on ARM610) 297 * (was: cache "update-able on write" bit on ARM610)
278 * However, Xscale cores require this bit to be cleared. 298 * However, Xscale cores require this bit to be cleared.
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2f8627218839..0a38f0b396eb 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -37,7 +37,6 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/semaphore.h> 38#include <linux/semaphore.h>
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/version.h>
41 40
42#include <mach/clock.h> 41#include <mach/clock.h>
43 42
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 3e76ee2bc731..9e1341ebc14e 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1488,7 +1488,7 @@ static int __init _omap_gpio_init(void)
1488 bank->chip.set = gpio_set; 1488 bank->chip.set = gpio_set;
1489 if (bank_is_mpuio(bank)) { 1489 if (bank_is_mpuio(bank)) {
1490 bank->chip.label = "mpuio"; 1490 bank->chip.label = "mpuio";
1491#ifdef CONFIG_ARCH_OMAP1 1491#ifdef CONFIG_ARCH_OMAP16XX
1492 bank->chip.dev = &omap_mpuio_device.dev; 1492 bank->chip.dev = &omap_mpuio_device.dev;
1493#endif 1493#endif
1494 bank->chip.base = OMAP_MPUIO(0); 1494 bank->chip.base = OMAP_MPUIO(0);
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h
index 6eb44a92871d..8fdb95e26fcd 100644
--- a/arch/arm/plat-omap/include/mach/mcbsp.h
+++ b/arch/arm/plat-omap/include/mach/mcbsp.h
@@ -315,6 +315,7 @@ struct omap_mcbsp_ops {
315}; 315};
316 316
317struct omap_mcbsp_platform_data { 317struct omap_mcbsp_platform_data {
318 unsigned long phys_base;
318 u32 virt_base; 319 u32 virt_base;
319 u8 dma_rx_sync, dma_tx_sync; 320 u8 dma_rx_sync, dma_tx_sync;
320 u16 rx_irq, tx_irq; 321 u16 rx_irq, tx_irq;
@@ -324,6 +325,7 @@ struct omap_mcbsp_platform_data {
324 325
325struct omap_mcbsp { 326struct omap_mcbsp {
326 struct device *dev; 327 struct device *dev;
328 unsigned long phys_base;
327 u32 io_base; 329 u32 io_base;
328 u8 id; 330 u8 id;
329 u8 free; 331 u8 free;
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index d0844050f2d2..014d26574bb6 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -651,7 +651,7 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer,
651 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch, 651 omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
652 src_port, 652 src_port,
653 OMAP_DMA_AMODE_CONSTANT, 653 OMAP_DMA_AMODE_CONSTANT,
654 mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1, 654 mcbsp[id].phys_base + OMAP_MCBSP_REG_DXR1,
655 0, 0); 655 0, 0);
656 656
657 omap_set_dma_src_params(mcbsp[id].dma_tx_lch, 657 omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
@@ -712,7 +712,7 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer,
712 omap_set_dma_src_params(mcbsp[id].dma_rx_lch, 712 omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
713 src_port, 713 src_port,
714 OMAP_DMA_AMODE_CONSTANT, 714 OMAP_DMA_AMODE_CONSTANT,
715 mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1, 715 mcbsp[id].phys_base + OMAP_MCBSP_REG_DRR1,
716 0, 0); 716 0, 0);
717 717
718 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch, 718 omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
@@ -830,6 +830,7 @@ static int __init omap_mcbsp_probe(struct platform_device *pdev)
830 mcbsp[id].dma_tx_lch = -1; 830 mcbsp[id].dma_tx_lch = -1;
831 mcbsp[id].dma_rx_lch = -1; 831 mcbsp[id].dma_rx_lch = -1;
832 832
833 mcbsp[id].phys_base = pdata->phys_base;
833 mcbsp[id].io_base = pdata->virt_base; 834 mcbsp[id].io_base = pdata->virt_base;
834 /* Default I/O is IRQ based */ 835 /* Default I/O is IRQ based */
835 mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO; 836 mcbsp[id].io_type = OMAP_MCBSP_IRQ_IO;
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c
index e4796c67a831..d6a8193a1d2f 100644
--- a/arch/avr32/kernel/asm-offsets.c
+++ b/arch/avr32/kernel/asm-offsets.c
@@ -4,6 +4,8 @@
4 * to extract and format the required data. 4 * to extract and format the required data.
5 */ 5 */
6 6
7#include <linux/mm.h>
8#include <linux/sched.h>
7#include <linux/thread_info.h> 9#include <linux/thread_info.h>
8#include <linux/kbuild.h> 10#include <linux/kbuild.h>
9 11
@@ -17,4 +19,8 @@ void foo(void)
17 OFFSET(TI_rar_saved, thread_info, rar_saved); 19 OFFSET(TI_rar_saved, thread_info, rar_saved);
18 OFFSET(TI_rsr_saved, thread_info, rsr_saved); 20 OFFSET(TI_rsr_saved, thread_info, rsr_saved);
19 OFFSET(TI_restart_block, thread_info, restart_block); 21 OFFSET(TI_restart_block, thread_info, restart_block);
22 BLANK();
23 OFFSET(TSK_active_mm, task_struct, active_mm);
24 BLANK();
25 OFFSET(MM_pgd, mm_struct, pgd);
20} 26}
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 2b398cae110c..33d49377b8be 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -334,9 +334,64 @@ save_full_context_ex:
334 334
335 /* Low-level exception handlers */ 335 /* Low-level exception handlers */
336handle_critical: 336handle_critical:
337 /*
338 * AT32AP700x errata:
339 *
340 * After a Java stack overflow or underflow trap, any CPU
341 * memory access may cause erratic behavior. This will happen
342 * when the four least significant bits of the JOSP system
343 * register contains any value between 9 and 15 (inclusive).
344 *
345 * Possible workarounds:
346 * - Don't use the Java Extension Module
347 * - Ensure that the stack overflow and underflow trap
348 * handlers do not do any memory access or trigger any
349 * exceptions before the overflow/underflow condition is
350 * cleared (by incrementing or decrementing the JOSP)
351 * - Make sure that JOSP does not contain any problematic
352 * value before doing any exception or interrupt
353 * processing.
354 * - Set up a critical exception handler which writes a
355 * known-to-be-safe value, e.g. 4, to JOSP before doing
356 * any further processing.
357 *
358 * We'll use the last workaround for now since we cannot
359 * guarantee that user space processes don't use Java mode.
360 * Non-well-behaving userland will be terminated with extreme
361 * prejudice.
362 */
363#ifdef CONFIG_CPU_AT32AP700X
364 /*
365 * There's a chance we can't touch memory, so temporarily
366 * borrow PTBR to save the stack pointer while we fix things
367 * up...
368 */
369 mtsr SYSREG_PTBR, sp
370 mov sp, 4
371 mtsr SYSREG_JOSP, sp
372 mfsr sp, SYSREG_PTBR
373 sub pc, -2
374
375 /* Push most of pt_regs on stack. We'll do the rest later */
337 sub sp, 4 376 sub sp, 4
338 stmts --sp, r0-lr 377 pushm r0-r12
339 rcall save_full_context_ex 378
379 /* PTBR mirrors current_thread_info()->task->active_mm->pgd */
380 get_thread_info r0
381 ld.w r1, r0[TI_task]
382 ld.w r2, r1[TSK_active_mm]
383 ld.w r3, r2[MM_pgd]
384 mtsr SYSREG_PTBR, r3
385#else
386 sub sp, 4
387 pushm r0-r12
388#endif
389 sub r0, sp, -(14 * 4)
390 mov r1, lr
391 mfsr r2, SYSREG_RAR_EX
392 mfsr r3, SYSREG_RSR_EX
393 pushm r0-r3
394
340 mfsr r12, SYSREG_ECR 395 mfsr r12, SYSREG_ECR
341 mov r11, sp 396 mov r11, sp
342 rcall do_critical_exception 397 rcall do_critical_exception
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index 5be4de65b209..17503b0ed6c9 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -134,7 +134,7 @@ pm_standby:
134 mov r11, SDRAMC_LPR_LPCB_SELF_RFR 134 mov r11, SDRAMC_LPR_LPCB_SELF_RFR
135 bfins r10, r11, 0, 2 /* LPCB <- self Refresh */ 135 bfins r10, r11, 0, 2 /* LPCB <- self Refresh */
136 sync 0 /* flush write buffer */ 136 sync 0 /* flush write buffer */
137 st.w r12[SDRAMC_LPR], r11 /* put SDRAM in self-refresh mode */ 137 st.w r12[SDRAMC_LPR], r10 /* put SDRAM in self-refresh mode */
138 ld.w r11, r12[SDRAMC_LPR] 138 ld.w r11, r12[SDRAMC_LPR]
139 unmask_interrupts 139 unmask_interrupts
140 sleep CPU_SLEEP_FROZEN 140 sleep CPU_SLEEP_FROZEN
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index 8a2a53b33616..bb959fbab2dc 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -580,13 +580,15 @@ int atari_keyb_init(void)
580 do { 580 do {
581 /* reset IKBD ACIA */ 581 /* reset IKBD ACIA */
582 acia.key_ctrl = ACIA_RESET | 582 acia.key_ctrl = ACIA_RESET |
583 (atari_switches & ATARI_SWITCH_IKBD) ? ACIA_RHTID : 0; 583 ((atari_switches & ATARI_SWITCH_IKBD) ?
584 ACIA_RHTID : 0);
584 (void)acia.key_ctrl; 585 (void)acia.key_ctrl;
585 (void)acia.key_data; 586 (void)acia.key_data;
586 587
587 /* reset MIDI ACIA */ 588 /* reset MIDI ACIA */
588 acia.mid_ctrl = ACIA_RESET | 589 acia.mid_ctrl = ACIA_RESET |
589 (atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0; 590 ((atari_switches & ATARI_SWITCH_MIDI) ?
591 ACIA_RHTID : 0);
590 (void)acia.mid_ctrl; 592 (void)acia.mid_ctrl;
591 (void)acia.mid_data; 593 (void)acia.mid_data;
592 594
@@ -599,7 +601,8 @@ int atari_keyb_init(void)
599 ACIA_RHTID : ACIA_RLTID); 601 ACIA_RHTID : ACIA_RLTID);
600 602
601 acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | 603 acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S |
602 (atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0; 604 ((atari_switches & ATARI_SWITCH_MIDI) ?
605 ACIA_RHTID : 0);
603 606
604 /* make sure the interrupt line is up */ 607 /* make sure the interrupt line is up */
605 } while ((mfp.par_dt_reg & 0x10) == 0); 608 } while ((mfp.par_dt_reg & 0x10) == 0);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4da736e25333..49896a2a1d72 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1886,6 +1886,15 @@ config STACKTRACE_SUPPORT
1886 1886
1887source "init/Kconfig" 1887source "init/Kconfig"
1888 1888
1889config PROBE_INITRD_HEADER
1890 bool "Probe initrd header created by addinitrd"
1891 depends on BLK_DEV_INITRD
1892 help
1893 Probe initrd header at the last page of kernel image.
1894 Say Y here if you are using arch/mips/boot/addinitrd.c to
1895 add initrd or initramfs image to the kernel image.
1896 Otherwise, say N.
1897
1889menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)" 1898menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
1890 1899
1891config HW_HAS_EISA 1900config HW_HAS_EISA
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2aae76bce293..16f8edfe5cdc 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -160,30 +160,33 @@ early_param("rd_size", rd_size_early);
160static unsigned long __init init_initrd(void) 160static unsigned long __init init_initrd(void)
161{ 161{
162 unsigned long end; 162 unsigned long end;
163 u32 *initrd_header;
164 163
165 /* 164 /*
166 * Board specific code or command line parser should have 165 * Board specific code or command line parser should have
167 * already set up initrd_start and initrd_end. In these cases 166 * already set up initrd_start and initrd_end. In these cases
168 * perfom sanity checks and use them if all looks good. 167 * perfom sanity checks and use them if all looks good.
169 */ 168 */
170 if (initrd_start && initrd_end > initrd_start) 169 if (!initrd_start || initrd_end <= initrd_start) {
171 goto sanitize; 170#ifdef CONFIG_PROBE_INITRD_HEADER
171 u32 *initrd_header;
172 172
173 /* 173 /*
174 * See if initrd has been added to the kernel image by 174 * See if initrd has been added to the kernel image by
175 * arch/mips/boot/addinitrd.c. In that case a header is 175 * arch/mips/boot/addinitrd.c. In that case a header is
176 * prepended to initrd and is made up by 8 bytes. The fisrt 176 * prepended to initrd and is made up by 8 bytes. The first
177 * word is a magic number and the second one is the size of 177 * word is a magic number and the second one is the size of
178 * initrd. Initrd start must be page aligned in any cases. 178 * initrd. Initrd start must be page aligned in any cases.
179 */ 179 */
180 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8; 180 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
181 if (initrd_header[0] != 0x494E5244) 181 if (initrd_header[0] != 0x494E5244)
182 goto disable;
183 initrd_start = (unsigned long)(initrd_header + 2);
184 initrd_end = initrd_start + initrd_header[1];
185#else
182 goto disable; 186 goto disable;
183 initrd_start = (unsigned long)(initrd_header + 2); 187#endif
184 initrd_end = initrd_start + initrd_header[1]; 188 }
185 189
186sanitize:
187 if (initrd_start & ~PAGE_MASK) { 190 if (initrd_start & ~PAGE_MASK) {
188 pr_err("initrd start must be page aligned\n"); 191 pr_err("initrd start must be page aligned\n");
189 goto disable; 192 goto disable;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 426cced1e9dc..6bee29097a56 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -373,8 +373,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
373 do_exit(SIGSEGV); 373 do_exit(SIGSEGV);
374} 374}
375 375
376extern const struct exception_table_entry __start___dbe_table[]; 376extern struct exception_table_entry __start___dbe_table[];
377extern const struct exception_table_entry __stop___dbe_table[]; 377extern struct exception_table_entry __stop___dbe_table[];
378 378
379__asm__( 379__asm__(
380" .section __dbe_table, \"a\"\n" 380" .section __dbe_table, \"a\"\n"
@@ -1200,7 +1200,7 @@ void *set_except_vector(int n, void *addr)
1200 if (n == 0 && cpu_has_divec) { 1200 if (n == 0 && cpu_has_divec) {
1201 *(u32 *)(ebase + 0x200) = 0x08000000 | 1201 *(u32 *)(ebase + 0x200) = 0x08000000 |
1202 (0x03ffffff & (handler >> 2)); 1202 (0x03ffffff & (handler >> 2));
1203 flush_icache_range(ebase + 0x200, ebase + 0x204); 1203 local_flush_icache_range(ebase + 0x200, ebase + 0x204);
1204 } 1204 }
1205 return (void *)old_handler; 1205 return (void *)old_handler;
1206} 1206}
@@ -1283,7 +1283,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1283 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); 1283 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1284 w = (u32 *)(b + ori_offset); 1284 w = (u32 *)(b + ori_offset);
1285 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); 1285 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1286 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); 1286 local_flush_icache_range((unsigned long)b,
1287 (unsigned long)(b+handler_len));
1287 } 1288 }
1288 else { 1289 else {
1289 /* 1290 /*
@@ -1295,7 +1296,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1295 w = (u32 *)b; 1296 w = (u32 *)b;
1296 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ 1297 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1297 *w = 0; 1298 *w = 0;
1298 flush_icache_range((unsigned long)b, (unsigned long)(b+8)); 1299 local_flush_icache_range((unsigned long)b,
1300 (unsigned long)(b+8));
1299 } 1301 }
1300 1302
1301 return (void *)old_handler; 1303 return (void *)old_handler;
@@ -1515,7 +1517,7 @@ void __cpuinit per_cpu_trap_init(void)
1515void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1517void __init set_handler(unsigned long offset, void *addr, unsigned long size)
1516{ 1518{
1517 memcpy((void *)(ebase + offset), addr, size); 1519 memcpy((void *)(ebase + offset), addr, size);
1518 flush_icache_range(ebase + offset, ebase + offset + size); 1520 local_flush_icache_range(ebase + offset, ebase + offset + size);
1519} 1521}
1520 1522
1521static char panic_null_cerr[] __cpuinitdata = 1523static char panic_null_cerr[] __cpuinitdata =
@@ -1680,6 +1682,8 @@ void __init trap_init(void)
1680 signal32_init(); 1682 signal32_init();
1681#endif 1683#endif
1682 1684
1683 flush_icache_range(ebase, ebase + 0x400); 1685 local_flush_icache_range(ebase, ebase + 0x400);
1684 flush_tlb_handlers(); 1686 flush_tlb_handlers();
1687
1688 sort_extable(__start___dbe_table, __stop___dbe_table);
1685} 1689}
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 27a5b466c85c..5500c20c79ae 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -320,6 +320,7 @@ void __cpuinit r3k_cache_init(void)
320 flush_cache_range = r3k_flush_cache_range; 320 flush_cache_range = r3k_flush_cache_range;
321 flush_cache_page = r3k_flush_cache_page; 321 flush_cache_page = r3k_flush_cache_page;
322 flush_icache_range = r3k_flush_icache_range; 322 flush_icache_range = r3k_flush_icache_range;
323 local_flush_icache_range = r3k_flush_icache_range;
323 324
324 flush_cache_sigtramp = r3k_flush_cache_sigtramp; 325 flush_cache_sigtramp = r3k_flush_cache_sigtramp;
325 local_flush_data_cache_page = local_r3k_flush_data_cache_page; 326 local_flush_data_cache_page = local_r3k_flush_data_cache_page;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 71df3390c07b..6e99665ae860 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -543,12 +543,8 @@ struct flush_icache_range_args {
543 unsigned long end; 543 unsigned long end;
544}; 544};
545 545
546static inline void local_r4k_flush_icache_range(void *args) 546static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
547{ 547{
548 struct flush_icache_range_args *fir_args = args;
549 unsigned long start = fir_args->start;
550 unsigned long end = fir_args->end;
551
552 if (!cpu_has_ic_fills_f_dc) { 548 if (!cpu_has_ic_fills_f_dc) {
553 if (end - start >= dcache_size) { 549 if (end - start >= dcache_size) {
554 r4k_blast_dcache(); 550 r4k_blast_dcache();
@@ -564,6 +560,15 @@ static inline void local_r4k_flush_icache_range(void *args)
564 protected_blast_icache_range(start, end); 560 protected_blast_icache_range(start, end);
565} 561}
566 562
563static inline void local_r4k_flush_icache_range_ipi(void *args)
564{
565 struct flush_icache_range_args *fir_args = args;
566 unsigned long start = fir_args->start;
567 unsigned long end = fir_args->end;
568
569 local_r4k_flush_icache_range(start, end);
570}
571
567static void r4k_flush_icache_range(unsigned long start, unsigned long end) 572static void r4k_flush_icache_range(unsigned long start, unsigned long end)
568{ 573{
569 struct flush_icache_range_args args; 574 struct flush_icache_range_args args;
@@ -571,7 +576,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
571 args.start = start; 576 args.start = start;
572 args.end = end; 577 args.end = end;
573 578
574 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); 579 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
575 instruction_hazard(); 580 instruction_hazard();
576} 581}
577 582
@@ -1375,6 +1380,7 @@ void __cpuinit r4k_cache_init(void)
1375 local_flush_data_cache_page = local_r4k_flush_data_cache_page; 1380 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1376 flush_data_cache_page = r4k_flush_data_cache_page; 1381 flush_data_cache_page = r4k_flush_data_cache_page;
1377 flush_icache_range = r4k_flush_icache_range; 1382 flush_icache_range = r4k_flush_icache_range;
1383 local_flush_icache_range = local_r4k_flush_icache_range;
1378 1384
1379#if defined(CONFIG_DMA_NONCOHERENT) 1385#if defined(CONFIG_DMA_NONCOHERENT)
1380 if (coherentio) { 1386 if (coherentio) {
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index a9f7f1f5e9b4..f7c8f9ce39c1 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -362,6 +362,7 @@ void __cpuinit tx39_cache_init(void)
362 flush_cache_range = (void *) tx39h_flush_icache_all; 362 flush_cache_range = (void *) tx39h_flush_icache_all;
363 flush_cache_page = (void *) tx39h_flush_icache_all; 363 flush_cache_page = (void *) tx39h_flush_icache_all;
364 flush_icache_range = (void *) tx39h_flush_icache_all; 364 flush_icache_range = (void *) tx39h_flush_icache_all;
365 local_flush_icache_range = (void *) tx39h_flush_icache_all;
365 366
366 flush_cache_sigtramp = (void *) tx39h_flush_icache_all; 367 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
367 local_flush_data_cache_page = (void *) tx39h_flush_icache_all; 368 local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
@@ -390,6 +391,7 @@ void __cpuinit tx39_cache_init(void)
390 flush_cache_range = tx39_flush_cache_range; 391 flush_cache_range = tx39_flush_cache_range;
391 flush_cache_page = tx39_flush_cache_page; 392 flush_cache_page = tx39_flush_cache_page;
392 flush_icache_range = tx39_flush_icache_range; 393 flush_icache_range = tx39_flush_icache_range;
394 local_flush_icache_range = tx39_flush_icache_range;
393 395
394 flush_cache_sigtramp = tx39_flush_cache_sigtramp; 396 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
395 local_flush_data_cache_page = local_tx39_flush_data_cache_page; 397 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 034e8506f6ea..1eb7c71e3d6a 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -29,6 +29,7 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn); 30 unsigned long pfn);
31void (*flush_icache_range)(unsigned long start, unsigned long end); 31void (*flush_icache_range)(unsigned long start, unsigned long end);
32void (*local_flush_icache_range)(unsigned long start, unsigned long end);
32 33
33void (*__flush_cache_vmap)(void); 34void (*__flush_cache_vmap)(void);
34void (*__flush_cache_vunmap)(void); 35void (*__flush_cache_vunmap)(void);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 76da73a5ab3c..979cf9197282 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1273,10 +1273,10 @@ void __cpuinit build_tlb_refill_handler(void)
1273 1273
1274void __cpuinit flush_tlb_handlers(void) 1274void __cpuinit flush_tlb_handlers(void)
1275{ 1275{
1276 flush_icache_range((unsigned long)handle_tlbl, 1276 local_flush_icache_range((unsigned long)handle_tlbl,
1277 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 1277 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
1278 flush_icache_range((unsigned long)handle_tlbs, 1278 local_flush_icache_range((unsigned long)handle_tlbs,
1279 (unsigned long)handle_tlbs + sizeof(handle_tlbs)); 1279 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1280 flush_icache_range((unsigned long)handle_tlbm, 1280 local_flush_icache_range((unsigned long)handle_tlbm,
1281 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 1281 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
1282} 1282}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 0afe94c48fb6..fe6bee09cece 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -53,6 +53,7 @@ txx9_reg_res_init(unsigned int pcode, unsigned long base, unsigned long size)
53 txx9_ce_res[i].name = txx9_ce_res_name[i]; 53 txx9_ce_res[i].name = txx9_ce_res_name[i];
54 } 54 }
55 55
56 txx9_pcode = pcode;
56 sprintf(txx9_pcode_str, "TX%x", pcode); 57 sprintf(txx9_pcode_str, "TX%x", pcode);
57 if (base) { 58 if (base) {
58 txx9_reg_res.start = base & 0xfffffffffULL; 59 txx9_reg_res.start = base & 0xfffffffffULL;
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 69ff671498e5..12c04c5e558b 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -104,7 +104,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
104 buf->count -= reclen; 104 buf->count -= reclen;
105 return 0; 105 return 0;
106Efault: 106Efault:
107 buffer->error = -EFAULT; 107 buf->error = -EFAULT;
108 return -EFAULT; 108 return -EFAULT;
109} 109}
110 110
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 9155c9312c1e..c6be19e9ceae 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -116,6 +116,11 @@ ifeq ($(CONFIG_6xx),y)
116KBUILD_CFLAGS += -mcpu=powerpc 116KBUILD_CFLAGS += -mcpu=powerpc
117endif 117endif
118 118
119# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
120ifeq ($(CONFIG_FTRACE),y)
121KBUILD_CFLAGS += -mno-sched-epilog
122endif
123
119cpu-as-$(CONFIG_4xx) += -Wa,-m405 124cpu-as-$(CONFIG_4xx) += -Wa,-m405
120cpu-as-$(CONFIG_6xx) += -Wa,-maltivec 125cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
121cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec 126cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 14174aa24074..717a3bc1352e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -49,7 +49,7 @@ zlib := inffast.c inflate.c inftrees.c
49zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h 49zlibheader := inffast.h inffixed.h inflate.h inftrees.h infutil.h
50zliblinuxheader := zlib.h zconf.h zutil.h 50zliblinuxheader := zlib.h zconf.h zutil.h
51 51
52$(addprefix $(obj)/,$(zlib) gunzip_util.o main.o): \ 52$(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o prpmc2800.o): \
53 $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) 53 $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
54 54
55src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c 55src-libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index db0b8f3b8807..4597c491e9b5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -153,12 +153,10 @@
153#define __S110 PAGE_SHARED_X 153#define __S110 PAGE_SHARED_X
154#define __S111 PAGE_SHARED_X 154#define __S111 PAGE_SHARED_X
155 155
156#ifdef CONFIG_HUGETLB_PAGE 156#ifdef CONFIG_PPC_MM_SLICES
157
158#define HAVE_ARCH_UNMAPPED_AREA 157#define HAVE_ARCH_UNMAPPED_AREA
159#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 158#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
160 159#endif /* CONFIG_PPC_MM_SLICES */
161#endif
162 160
163#ifndef __ASSEMBLY__ 161#ifndef __ASSEMBLY__
164 162
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 64f5948ebc9d..946daea780f1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -14,12 +14,13 @@ endif
14 14
15ifdef CONFIG_FTRACE 15ifdef CONFIG_FTRACE
16# Do not trace early boot code 16# Do not trace early boot code
17CFLAGS_REMOVE_cputable.o = -pg 17CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
18CFLAGS_REMOVE_prom_init.o = -pg 18CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
19CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
19 20
20ifdef CONFIG_DYNAMIC_FTRACE 21ifdef CONFIG_DYNAMIC_FTRACE
21# dynamic ftrace setup. 22# dynamic ftrace setup.
22CFLAGS_REMOVE_ftrace.o = -pg 23CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
23endif 24endif
24 25
25endif 26endif
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 367129789cc0..5af4e9b2dbe2 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -647,7 +647,7 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
647 unsigned int flags, unsigned int length) 647 unsigned int flags, unsigned int length)
648{ 648{
649 char *ptr = (char *) &current->thread.TS_FPR(reg); 649 char *ptr = (char *) &current->thread.TS_FPR(reg);
650 int ret; 650 int ret = 0;
651 651
652 flush_vsx_to_thread(current); 652 flush_vsx_to_thread(current);
653 653
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 019b02d8844f..15c611de1ee2 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
158 stw r9,_NIP(r11) /* make it do a blr */ 158 stw r9,_NIP(r11) /* make it do a blr */
159 159
160#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
161 mfspr r12,SPRN_SPRG3 161 rlwinm r12,r11,0,0,31-THREAD_SHIFT
162 lwz r11,TI_CPU(r12) /* get cpu number * 4 */ 162 lwz r11,TI_CPU(r12) /* get cpu number * 4 */
163 slwi r11,r11,2 163 slwi r11,r11,2
164#else 164#else
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 06304034b393..47a1a983ff88 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -84,10 +84,11 @@ _GLOBAL(power_save_ppc32_restore)
84 stw r9,_NIP(r11) /* make it do a blr */ 84 stw r9,_NIP(r11) /* make it do a blr */
85 85
86#ifdef CONFIG_SMP 86#ifdef CONFIG_SMP
87 mfspr r12,SPRN_SPRG3 87 rlwinm r12,r1,0,0,31-THREAD_SHIFT
88 lwz r11,TI_CPU(r12) /* get cpu number * 4 */ 88 lwz r11,TI_CPU(r12) /* get cpu number * 4 */
89 slwi r11,r11,2 89 slwi r11,r11,2
90#else 90#else
91 li r11,0 91 li r11,0
92#endif 92#endif
93
93 b transfer_to_handler_cont 94 b transfer_to_handler_cont
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 4a8ce62fe112..9f6c1ca1739e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -66,11 +66,12 @@ SECTIONS
66 __got2_end = .; 66 __got2_end = .;
67#endif /* CONFIG_PPC32 */ 67#endif /* CONFIG_PPC32 */
68 68
69 . = ALIGN(PAGE_SIZE);
70 _etext = .;
71 PROVIDE32 (etext = .);
72 } :kernel 69 } :kernel
73 70
71 . = ALIGN(PAGE_SIZE);
72 _etext = .;
73 PROVIDE32 (etext = .);
74
74 /* Read-only data */ 75 /* Read-only data */
75 RODATA 76 RODATA
76 77
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 14be408dfc9b..8920eea34528 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -191,12 +191,17 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
191 unsigned long hash, hpteg; 191 unsigned long hash, hpteg;
192 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 192 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
193 unsigned long va = hpt_va(vaddr, vsid, ssize); 193 unsigned long va = hpt_va(vaddr, vsid, ssize);
194 unsigned long tprot = prot;
195
196 /* Make kernel text executable */
197 if (in_kernel_text(vaddr))
198 tprot &= ~HPTE_R_N;
194 199
195 hash = hpt_hash(va, shift, ssize); 200 hash = hpt_hash(va, shift, ssize);
196 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 201 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
197 202
198 BUG_ON(!ppc_md.hpte_insert); 203 BUG_ON(!ppc_md.hpte_insert);
199 ret = ppc_md.hpte_insert(hpteg, va, paddr, prot, 204 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
200 HPTE_V_BOLTED, psize, ssize); 205 HPTE_V_BOLTED, psize, ssize);
201 206
202 if (ret < 0) 207 if (ret < 0)
@@ -584,7 +589,7 @@ void __init htab_initialize(void)
584{ 589{
585 unsigned long table; 590 unsigned long table;
586 unsigned long pteg_count; 591 unsigned long pteg_count;
587 unsigned long prot, tprot; 592 unsigned long prot;
588 unsigned long base = 0, size = 0, limit; 593 unsigned long base = 0, size = 0, limit;
589 int i; 594 int i;
590 595
@@ -660,10 +665,9 @@ void __init htab_initialize(void)
660 for (i=0; i < lmb.memory.cnt; i++) { 665 for (i=0; i < lmb.memory.cnt; i++) {
661 base = (unsigned long)__va(lmb.memory.region[i].base); 666 base = (unsigned long)__va(lmb.memory.region[i].base);
662 size = lmb.memory.region[i].size; 667 size = lmb.memory.region[i].size;
663 tprot = prot | (in_kernel_text(base) ? _PAGE_EXEC : 0);
664 668
665 DBG("creating mapping for region: %lx..%lx (prot: %x)\n", 669 DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
666 base, size, tprot); 670 base, size, prot);
667 671
668#ifdef CONFIG_U3_DART 672#ifdef CONFIG_U3_DART
669 /* Do not map the DART space. Fortunately, it will be aligned 673 /* Do not map the DART space. Fortunately, it will be aligned
@@ -680,21 +684,21 @@ void __init htab_initialize(void)
680 unsigned long dart_table_end = dart_tablebase + 16 * MB; 684 unsigned long dart_table_end = dart_tablebase + 16 * MB;
681 if (base != dart_tablebase) 685 if (base != dart_tablebase)
682 BUG_ON(htab_bolt_mapping(base, dart_tablebase, 686 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
683 __pa(base), tprot, 687 __pa(base), prot,
684 mmu_linear_psize, 688 mmu_linear_psize,
685 mmu_kernel_ssize)); 689 mmu_kernel_ssize));
686 if ((base + size) > dart_table_end) 690 if ((base + size) > dart_table_end)
687 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB, 691 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
688 base + size, 692 base + size,
689 __pa(dart_table_end), 693 __pa(dart_table_end),
690 tprot, 694 prot,
691 mmu_linear_psize, 695 mmu_linear_psize,
692 mmu_kernel_ssize)); 696 mmu_kernel_ssize));
693 continue; 697 continue;
694 } 698 }
695#endif /* CONFIG_U3_DART */ 699#endif /* CONFIG_U3_DART */
696 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), 700 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
697 tprot, mmu_linear_psize, mmu_kernel_ssize)); 701 prot, mmu_linear_psize, mmu_kernel_ssize));
698 } 702 }
699 703
700 /* 704 /*
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1c1b627ee843..67595bc380dc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx)
643 !(tmp->flags & SPU_CREATE_NOSCHED) && 643 !(tmp->flags & SPU_CREATE_NOSCHED) &&
644 (!victim || tmp->prio > victim->prio)) { 644 (!victim || tmp->prio > victim->prio)) {
645 victim = spu->ctx; 645 victim = spu->ctx;
646 get_spu_context(victim);
647 } 646 }
648 } 647 }
648 if (victim)
649 get_spu_context(victim);
649 mutex_unlock(&cbe_spu_info[node].list_mutex); 650 mutex_unlock(&cbe_spu_info[node].list_mutex);
650 651
651 if (victim) { 652 if (victim) {
@@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
727 /* not a candidate for interruptible because it's called either 728 /* not a candidate for interruptible because it's called either
728 from the scheduler thread or from spu_deactivate */ 729 from the scheduler thread or from spu_deactivate */
729 mutex_lock(&ctx->state_mutex); 730 mutex_lock(&ctx->state_mutex);
730 __spu_schedule(spu, ctx); 731 if (ctx->state == SPU_STATE_SAVED)
732 __spu_schedule(spu, ctx);
731 spu_release(ctx); 733 spu_release(ctx);
732} 734}
733 735
734static void spu_unschedule(struct spu *spu, struct spu_context *ctx) 736/**
737 * spu_unschedule - remove a context from a spu, and possibly release it.
738 * @spu: The SPU to unschedule from
739 * @ctx: The context currently scheduled on the SPU
740 * @free_spu Whether to free the SPU for other contexts
741 *
742 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
743 * SPU is made available for other contexts (ie, may be returned by
744 * spu_get_idle). If this is zero, the caller is expected to schedule another
745 * context to this spu.
746 *
747 * Should be called with ctx->state_mutex held.
748 */
749static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
750 int free_spu)
735{ 751{
736 int node = spu->node; 752 int node = spu->node;
737 753
738 mutex_lock(&cbe_spu_info[node].list_mutex); 754 mutex_lock(&cbe_spu_info[node].list_mutex);
739 cbe_spu_info[node].nr_active--; 755 cbe_spu_info[node].nr_active--;
740 spu->alloc_state = SPU_FREE; 756 if (free_spu)
757 spu->alloc_state = SPU_FREE;
741 spu_unbind_context(spu, ctx); 758 spu_unbind_context(spu, ctx);
742 ctx->stats.invol_ctx_switch++; 759 ctx->stats.invol_ctx_switch++;
743 spu->stats.invol_ctx_switch++; 760 spu->stats.invol_ctx_switch++;
@@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
837 if (spu) { 854 if (spu) {
838 new = grab_runnable_context(max_prio, spu->node); 855 new = grab_runnable_context(max_prio, spu->node);
839 if (new || force) { 856 if (new || force) {
840 spu_unschedule(spu, ctx); 857 spu_unschedule(spu, ctx, new == NULL);
841 if (new) { 858 if (new) {
842 if (new->flags & SPU_CREATE_NOSCHED) 859 if (new->flags & SPU_CREATE_NOSCHED)
843 wake_up(&new->stop_wq); 860 wake_up(&new->stop_wq);
@@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
910 927
911 new = grab_runnable_context(ctx->prio + 1, spu->node); 928 new = grab_runnable_context(ctx->prio + 1, spu->node);
912 if (new) { 929 if (new) {
913 spu_unschedule(spu, ctx); 930 spu_unschedule(spu, ctx, 0);
914 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 931 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
915 spu_add_to_rq(ctx); 932 spu_add_to_rq(ctx);
916 } else { 933 } else {
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 58ecdd72630f..be60d64be7ad 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -2,7 +2,7 @@ CFLAGS_bootx_init.o += -fPIC
2 2
3ifdef CONFIG_FTRACE 3ifdef CONFIG_FTRACE
4# Do not trace early boot code 4# Do not trace early boot code
5CFLAGS_REMOVE_bootx_init.o = -pg 5CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
6endif 6endif
7 7
8obj-y += pic.o setup.o time.o feature.o pci.o \ 8obj-y += pic.o setup.o time.o feature.o pci.o \
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 29926a9b9ce2..851c870adf3b 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26 3# Linux kernel version: 2.6.27-rc4
4# Wed Jul 30 01:18:59 2008 4# Tue Aug 26 14:21:17 2008
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH32=y 7CONFIG_SUPERH32=y
@@ -11,6 +11,7 @@ CONFIG_GENERIC_BUG=y
11CONFIG_GENERIC_FIND_NEXT_BIT=y 11CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y 12CONFIG_GENERIC_HWEIGHT=y
13CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
14CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
14CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
15CONFIG_GENERIC_CALIBRATE_DELAY=y 16CONFIG_GENERIC_CALIBRATE_DELAY=y
16CONFIG_GENERIC_TIME=y 17CONFIG_GENERIC_TIME=y
@@ -20,7 +21,6 @@ CONFIG_LOCKDEP_SUPPORT=y
20# CONFIG_ARCH_HAS_ILOG2_U32 is not set 21# CONFIG_ARCH_HAS_ILOG2_U32 is not set
21# CONFIG_ARCH_HAS_ILOG2_U64 is not set 22# CONFIG_ARCH_HAS_ILOG2_U64 is not set
22CONFIG_ARCH_NO_VIRT_TO_BUS=y 23CONFIG_ARCH_NO_VIRT_TO_BUS=y
23CONFIG_ARCH_SUPPORTS_AOUT=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25 25
26# 26#
@@ -58,7 +58,6 @@ CONFIG_SYSCTL=y
58CONFIG_EMBEDDED=y 58CONFIG_EMBEDDED=y
59CONFIG_UID16=y 59CONFIG_UID16=y
60CONFIG_SYSCTL_SYSCALL=y 60CONFIG_SYSCTL_SYSCALL=y
61CONFIG_SYSCTL_SYSCALL_CHECK=y
62# CONFIG_KALLSYMS is not set 61# CONFIG_KALLSYMS is not set
63CONFIG_HOTPLUG=y 62CONFIG_HOTPLUG=y
64CONFIG_PRINTK=y 63CONFIG_PRINTK=y
@@ -89,6 +88,7 @@ CONFIG_HAVE_OPROFILE=y
89# CONFIG_USE_GENERIC_SMP_HELPERS is not set 88# CONFIG_USE_GENERIC_SMP_HELPERS is not set
90CONFIG_HAVE_CLK=y 89CONFIG_HAVE_CLK=y
91CONFIG_PROC_PAGE_MONITOR=y 90CONFIG_PROC_PAGE_MONITOR=y
91CONFIG_HAVE_GENERIC_DMA_COHERENT=y
92CONFIG_SLABINFO=y 92CONFIG_SLABINFO=y
93CONFIG_RT_MUTEXES=y 93CONFIG_RT_MUTEXES=y
94# CONFIG_TINY_SHMEM is not set 94# CONFIG_TINY_SHMEM is not set
@@ -261,9 +261,10 @@ CONFIG_HZ_250=y
261# CONFIG_HZ_300 is not set 261# CONFIG_HZ_300 is not set
262# CONFIG_HZ_1000 is not set 262# CONFIG_HZ_1000 is not set
263CONFIG_HZ=250 263CONFIG_HZ=250
264# CONFIG_SCHED_HRTICK is not set 264CONFIG_SCHED_HRTICK=y
265# CONFIG_KEXEC is not set 265# CONFIG_KEXEC is not set
266# CONFIG_CRASH_DUMP is not set 266# CONFIG_CRASH_DUMP is not set
267CONFIG_SECCOMP=y
267# CONFIG_PREEMPT_NONE is not set 268# CONFIG_PREEMPT_NONE is not set
268# CONFIG_PREEMPT_VOLUNTARY is not set 269# CONFIG_PREEMPT_VOLUNTARY is not set
269CONFIG_PREEMPT=y 270CONFIG_PREEMPT=y
@@ -289,10 +290,6 @@ CONFIG_CMDLINE="console=tty1 console=ttySC5,38400 root=/dev/nfs ip=dhcp"
289# 290#
290CONFIG_BINFMT_ELF=y 291CONFIG_BINFMT_ELF=y
291# CONFIG_BINFMT_MISC is not set 292# CONFIG_BINFMT_MISC is not set
292
293#
294# Networking
295#
296CONFIG_NET=y 293CONFIG_NET=y
297 294
298# 295#
@@ -647,6 +644,7 @@ CONFIG_SSB_POSSIBLE=y
647# CONFIG_MFD_CORE is not set 644# CONFIG_MFD_CORE is not set
648# CONFIG_MFD_SM501 is not set 645# CONFIG_MFD_SM501 is not set
649# CONFIG_HTC_PASIC3 is not set 646# CONFIG_HTC_PASIC3 is not set
647# CONFIG_MFD_TMIO is not set
650 648
651# 649#
652# Multimedia devices 650# Multimedia devices
@@ -690,7 +688,10 @@ CONFIG_DUMMY_CONSOLE=y
690# CONFIG_ACCESSIBILITY is not set 688# CONFIG_ACCESSIBILITY is not set
691# CONFIG_RTC_CLASS is not set 689# CONFIG_RTC_CLASS is not set
692# CONFIG_DMADEVICES is not set 690# CONFIG_DMADEVICES is not set
693# CONFIG_UIO is not set 691CONFIG_UIO=y
692# CONFIG_UIO_PDRV is not set
693CONFIG_UIO_PDRV_GENIRQ=y
694# CONFIG_UIO_SMX is not set
694 695
695# 696#
696# File systems 697# File systems
@@ -854,6 +855,7 @@ CONFIG_FRAME_WARN=1024
854# CONFIG_DEBUG_KERNEL is not set 855# CONFIG_DEBUG_KERNEL is not set
855# CONFIG_DEBUG_BUGVERBOSE is not set 856# CONFIG_DEBUG_BUGVERBOSE is not set
856# CONFIG_DEBUG_MEMORY_INIT is not set 857# CONFIG_DEBUG_MEMORY_INIT is not set
858CONFIG_SYSCTL_SYSCALL_CHECK=y
857# CONFIG_SAMPLES is not set 859# CONFIG_SAMPLES is not set
858# CONFIG_SH_STANDARD_BIOS is not set 860# CONFIG_SH_STANDARD_BIOS is not set
859# CONFIG_EARLY_SCIF_CONSOLE is not set 861# CONFIG_EARLY_SCIF_CONSOLE is not set
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index c4b3e1d8950d..4f8b1974f2c7 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26 3# Linux kernel version: 2.6.27-rc4
4# Wed Jul 30 01:44:41 2008 4# Tue Aug 26 14:18:17 2008
5# 5#
6CONFIG_SUPERH=y 6CONFIG_SUPERH=y
7CONFIG_SUPERH32=y 7CONFIG_SUPERH32=y
@@ -11,6 +11,7 @@ CONFIG_GENERIC_BUG=y
11CONFIG_GENERIC_FIND_NEXT_BIT=y 11CONFIG_GENERIC_FIND_NEXT_BIT=y
12CONFIG_GENERIC_HWEIGHT=y 12CONFIG_GENERIC_HWEIGHT=y
13CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
14CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
14CONFIG_GENERIC_IRQ_PROBE=y 15CONFIG_GENERIC_IRQ_PROBE=y
15CONFIG_GENERIC_CALIBRATE_DELAY=y 16CONFIG_GENERIC_CALIBRATE_DELAY=y
16CONFIG_GENERIC_TIME=y 17CONFIG_GENERIC_TIME=y
@@ -21,7 +22,6 @@ CONFIG_LOCKDEP_SUPPORT=y
21# CONFIG_ARCH_HAS_ILOG2_U32 is not set 22# CONFIG_ARCH_HAS_ILOG2_U32 is not set
22# CONFIG_ARCH_HAS_ILOG2_U64 is not set 23# CONFIG_ARCH_HAS_ILOG2_U64 is not set
23CONFIG_ARCH_NO_VIRT_TO_BUS=y 24CONFIG_ARCH_NO_VIRT_TO_BUS=y
24CONFIG_ARCH_SUPPORTS_AOUT=y
25CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 25CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
26 26
27# 27#
@@ -87,6 +87,7 @@ CONFIG_HAVE_OPROFILE=y
87# CONFIG_USE_GENERIC_SMP_HELPERS is not set 87# CONFIG_USE_GENERIC_SMP_HELPERS is not set
88CONFIG_HAVE_CLK=y 88CONFIG_HAVE_CLK=y
89CONFIG_PROC_PAGE_MONITOR=y 89CONFIG_PROC_PAGE_MONITOR=y
90CONFIG_HAVE_GENERIC_DMA_COHERENT=y
90CONFIG_SLABINFO=y 91CONFIG_SLABINFO=y
91CONFIG_RT_MUTEXES=y 92CONFIG_RT_MUTEXES=y
92# CONFIG_TINY_SHMEM is not set 93# CONFIG_TINY_SHMEM is not set
@@ -270,6 +271,7 @@ CONFIG_HZ=250
270# CONFIG_SCHED_HRTICK is not set 271# CONFIG_SCHED_HRTICK is not set
271# CONFIG_KEXEC is not set 272# CONFIG_KEXEC is not set
272# CONFIG_CRASH_DUMP is not set 273# CONFIG_CRASH_DUMP is not set
274CONFIG_SECCOMP=y
273CONFIG_PREEMPT_NONE=y 275CONFIG_PREEMPT_NONE=y
274# CONFIG_PREEMPT_VOLUNTARY is not set 276# CONFIG_PREEMPT_VOLUNTARY is not set
275# CONFIG_PREEMPT is not set 277# CONFIG_PREEMPT is not set
@@ -294,10 +296,6 @@ CONFIG_CMDLINE="console=ttySC0,115200 earlyprintk=serial ip=on"
294# 296#
295CONFIG_BINFMT_ELF=y 297CONFIG_BINFMT_ELF=y
296# CONFIG_BINFMT_MISC is not set 298# CONFIG_BINFMT_MISC is not set
297
298#
299# Networking
300#
301CONFIG_NET=y 299CONFIG_NET=y
302 300
303# 301#
@@ -649,6 +647,7 @@ CONFIG_HW_RANDOM=y
649CONFIG_I2C=y 647CONFIG_I2C=y
650CONFIG_I2C_BOARDINFO=y 648CONFIG_I2C_BOARDINFO=y
651# CONFIG_I2C_CHARDEV is not set 649# CONFIG_I2C_CHARDEV is not set
650CONFIG_I2C_HELPER_AUTO=y
652 651
653# 652#
654# I2C Hardware Bus support 653# I2C Hardware Bus support
@@ -709,6 +708,7 @@ CONFIG_SSB_POSSIBLE=y
709# CONFIG_MFD_CORE is not set 708# CONFIG_MFD_CORE is not set
710# CONFIG_MFD_SM501 is not set 709# CONFIG_MFD_SM501 is not set
711# CONFIG_HTC_PASIC3 is not set 710# CONFIG_HTC_PASIC3 is not set
711# CONFIG_MFD_TMIO is not set
712 712
713# 713#
714# Multimedia devices 714# Multimedia devices
@@ -755,6 +755,8 @@ CONFIG_USB_ARCH_HAS_HCD=y
755# CONFIG_USB is not set 755# CONFIG_USB is not set
756# CONFIG_USB_OTG_WHITELIST is not set 756# CONFIG_USB_OTG_WHITELIST is not set
757# CONFIG_USB_OTG_BLACKLIST_HUB is not set 757# CONFIG_USB_OTG_BLACKLIST_HUB is not set
758# CONFIG_USB_MUSB_HDRC is not set
759# CONFIG_USB_GADGET_MUSB_HDRC is not set
758 760
759# 761#
760# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 762# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@@ -842,7 +844,10 @@ CONFIG_RTC_DRV_RS5C372=y
842# 844#
843CONFIG_RTC_DRV_SH=y 845CONFIG_RTC_DRV_SH=y
844# CONFIG_DMADEVICES is not set 846# CONFIG_DMADEVICES is not set
845# CONFIG_UIO is not set 847CONFIG_UIO=y
848# CONFIG_UIO_PDRV is not set
849CONFIG_UIO_PDRV_GENIRQ=y
850# CONFIG_UIO_SMX is not set
846 851
847# 852#
848# File systems 853# File systems
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 81b3d515fcb3..5580fd471003 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -76,4 +76,6 @@ extern long __put_user_asm_l(void *, long);
76extern long __put_user_asm_q(void *, long); 76extern long __put_user_asm_q(void *, long);
77extern void __put_user_unknown(void); 77extern void __put_user_unknown(void);
78 78
79extern long __strnlen_user(const char *__s, long __n);
80
79#endif /* __ASM_SH_UACCESS_64_H */ 81#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index 04c7da968146..e640c63d5811 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -2,7 +2,7 @@
2 * arch/sh/kernel/cpu/sh5/entry.S 2 * arch/sh/kernel/cpu/sh5/entry.S
3 * 3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli 4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2004 - 2007 Paul Mundt 5 * Copyright (C) 2004 - 2008 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow 6 * Copyright (C) 2003, 2004 Richard Curnow
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General Public 8 * This file is subject to the terms and conditions of the GNU General Public
@@ -923,6 +923,8 @@ ret_from_exception:
923 blink tr0, ZERO 923 blink tr0, ZERO
924 924
925resume_kernel: 925resume_kernel:
926 CLI()
927
926 pta restore_all, tr0 928 pta restore_all, tr0
927 929
928 getcon KCR0, r6 930 getcon KCR0, r6
@@ -939,19 +941,11 @@ need_resched:
939 andi r7, 0xf0, r7 941 andi r7, 0xf0, r7
940 bne r7, ZERO, tr0 942 bne r7, ZERO, tr0
941 943
942 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8 944 movi preempt_schedule_irq, r7
943 shori (PREEMPT_ACTIVE & 65535), r8
944 st.l r6, TI_PRE_COUNT, r8
945
946 STI()
947 movi schedule, r7
948 ori r7, 1, r7 945 ori r7, 1, r7
949 ptabs r7, tr1 946 ptabs r7, tr1
950 blink tr1, LINK 947 blink tr1, LINK
951 948
952 st.l r6, TI_PRE_COUNT, ZERO
953 CLI()
954
955 pta need_resched, tr1 949 pta need_resched, tr1
956 blink tr1, ZERO 950 blink tr1, ZERO
957#endif 951#endif
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 0bc17def55a7..efbb4268875e 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -92,6 +92,7 @@ ENTRY(ret_from_irq)
92 bra resume_userspace 92 bra resume_userspace
93 nop 93 nop
94ENTRY(resume_kernel) 94ENTRY(resume_kernel)
95 cli
95 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count 96 mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
96 tst r0, r0 97 tst r0, r0
97 bf noresched 98 bf noresched
@@ -105,28 +106,9 @@ need_resched:
105 and #0xf0, r0 ! interrupts off (exception path)? 106 and #0xf0, r0 ! interrupts off (exception path)?
106 cmp/eq #0xf0, r0 107 cmp/eq #0xf0, r0
107 bt noresched 108 bt noresched
108
109 mov.l 1f, r0
110 mov.l r0, @(TI_PRE_COUNT,r8)
111
112#ifdef CONFIG_TRACE_IRQFLAGS
113 mov.l 3f, r0 109 mov.l 3f, r0
114 jsr @r0 110 jsr @r0 ! call preempt_schedule_irq
115 nop
116#endif
117 sti
118 mov.l 2f, r0
119 jsr @r0
120 nop
121 mov #0, r0
122 mov.l r0, @(TI_PRE_COUNT,r8)
123 cli
124#ifdef CONFIG_TRACE_IRQFLAGS
125 mov.l 4f, r0
126 jsr @r0
127 nop 111 nop
128#endif
129
130 bra need_resched 112 bra need_resched
131 nop 113 nop
132 114
@@ -137,10 +119,7 @@ noresched:
137 .align 2 119 .align 2
1381: .long PREEMPT_ACTIVE 1201: .long PREEMPT_ACTIVE
1392: .long schedule 1212: .long schedule
140#ifdef CONFIG_TRACE_IRQFLAGS 1223: .long preempt_schedule_irq
1413: .long trace_hardirqs_on
1424: .long trace_hardirqs_off
143#endif
144#endif 123#endif
145 124
146ENTRY(resume_userspace) 125ENTRY(resume_userspace)
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 4703dff174d5..94df56b0d1f6 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -102,7 +102,7 @@ void machine_kexec(struct kimage *image)
102 102
103 /* now call it */ 103 /* now call it */
104 rnk = (relocate_new_kernel_t) reboot_code_buffer; 104 rnk = (relocate_new_kernel_t) reboot_code_buffer;
105 (*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg); 105 (*rnk)(page_list, reboot_code_buffer, P2SEGADDR(image->start), vbr_reg);
106} 106}
107 107
108void arch_crash_save_vmcoreinfo(void) 108void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 5922edd416db..9c6424892bd3 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -131,6 +131,8 @@ void user_enable_single_step(struct task_struct *child)
131 131
132void user_disable_single_step(struct task_struct *child) 132void user_disable_single_step(struct task_struct *child)
133{ 133{
134 struct pt_regs *regs = child->thread.uregs;
135
134 regs->sr &= ~SR_SSTEP; 136 regs->sr &= ~SR_SSTEP;
135} 137}
136 138
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index a35207655e7b..de832056bf1b 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -171,6 +171,7 @@ static void __init reserve_crashkernel(void)
171 (unsigned long)(free_mem >> 20)); 171 (unsigned long)(free_mem >> 20));
172 crashk_res.start = crash_base; 172 crashk_res.start = crash_base;
173 crashk_res.end = crash_base + crash_size - 1; 173 crashk_res.end = crash_base + crash_size - 1;
174 insert_resource(&iomem_resource, &crashk_res);
174 } 175 }
175} 176}
176#else 177#else
@@ -204,11 +205,6 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
204 request_resource(res, &data_resource); 205 request_resource(res, &data_resource);
205 request_resource(res, &bss_resource); 206 request_resource(res, &bss_resource);
206 207
207#ifdef CONFIG_KEXEC
208 if (crashk_res.start != crashk_res.end)
209 request_resource(res, &crashk_res);
210#endif
211
212 add_active_range(nid, start_pfn, end_pfn); 208 add_active_range(nid, start_pfn, end_pfn);
213} 209}
214 210
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 59cd2859ce9b..9061b86d73fa 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -170,7 +170,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
170 version = call >> 16; /* hack for backward compatibility */ 170 version = call >> 16; /* hack for backward compatibility */
171 call &= 0xffff; 171 call &= 0xffff;
172 172
173 if (call <= SEMCTL) 173 if (call <= SEMTIMEDOP)
174 switch (call) { 174 switch (call) {
175 case SEMOP: 175 case SEMOP:
176 return sys_semtimedop(first, 176 return sys_semtimedop(first,
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 895bb3f335c7..64b8f7f96f9a 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -101,7 +101,7 @@ static int __init memchunk_setup(char *str)
101} 101}
102__setup("memchunk.", memchunk_setup); 102__setup("memchunk.", memchunk_setup);
103 103
104static void memchunk_cmdline_override(char *name, unsigned long *sizep) 104static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
105{ 105{
106 char *p = boot_command_line; 106 char *p = boot_command_line;
107 int k = strlen(name); 107 int k = strlen(name);
@@ -118,8 +118,8 @@ static void memchunk_cmdline_override(char *name, unsigned long *sizep)
118 } 118 }
119} 119}
120 120
121int platform_resource_setup_memory(struct platform_device *pdev, 121int __init platform_resource_setup_memory(struct platform_device *pdev,
122 char *name, unsigned long memsize) 122 char *name, unsigned long memsize)
123{ 123{
124 struct resource *r; 124 struct resource *r;
125 dma_addr_t dma_handle; 125 dma_addr_t dma_handle;
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 7201752cf934..a8180e546a48 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -50,27 +50,24 @@ struct seq_file;
50void smp_bogo(struct seq_file *); 50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *); 51void smp_info(struct seq_file *);
52 52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) 53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, cpumask_t, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void) 54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id) 55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current) 56BTFIXUPDEF_BLACKBOX(load_current)
57 57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) 58#define smp_cross_call(func,mask,arg1,arg2,arg3,arg4) BTFIXUP_CALL(smp_cross_call)(func,mask,arg1,arg2,arg3,arg4)
59 59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } 60static inline void xc0(smpfunc_t func) { smp_cross_call(func, cpu_online_map, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1) 61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); } 62{ smp_cross_call(func, cpu_online_map, arg1, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); } 64{ smp_cross_call(func, cpu_online_map, arg1, arg2, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3) 66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); } 67{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4) 69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } 70{ smp_cross_call(func, cpu_online_map, arg1, arg2, arg3, arg4); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74 71
75static inline int smp_call_function(void (*func)(void *info), void *info, int wait) 72static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{ 73{
@@ -78,6 +75,14 @@ static inline int smp_call_function(void (*func)(void *info), void *info, int wa
78 return 0; 75 return 0;
79} 76}
80 77
78static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
79 void *info, int wait)
80{
81 smp_cross_call((smpfunc_t)func, cpumask_of_cpu(cpuid),
82 (unsigned long) info, 0, 0, 0);
83 return 0;
84}
85
81static inline int cpu_logical_map(int cpu) 86static inline int cpu_logical_map(int cpu)
82{ 87{
83 return cpu; 88 return cpu;
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index cc4c235c4f59..c481d45f97b7 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -70,7 +70,7 @@ struct of_bus {
70 int *addrc, int *sizec); 70 int *addrc, int *sizec);
71 int (*map)(u32 *addr, const u32 *range, 71 int (*map)(u32 *addr, const u32 *range,
72 int na, int ns, int pna); 72 int na, int ns, int pna);
73 unsigned int (*get_flags)(const u32 *addr); 73 unsigned long (*get_flags)(const u32 *addr, unsigned long);
74}; 74};
75 75
76/* 76/*
@@ -130,8 +130,10 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
130 return 0; 130 return 0;
131} 131}
132 132
133static unsigned int of_bus_default_get_flags(const u32 *addr) 133static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
134{ 134{
135 if (flags)
136 return flags;
135 return IORESOURCE_MEM; 137 return IORESOURCE_MEM;
136} 138}
137 139
@@ -194,17 +196,21 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
194 return 0; 196 return 0;
195} 197}
196 198
197static unsigned int of_bus_pci_get_flags(const u32 *addr) 199static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
198{ 200{
199 unsigned int flags = 0;
200 u32 w = addr[0]; 201 u32 w = addr[0];
201 202
203 /* For PCI, we override whatever child busses may have used. */
204 flags = 0;
202 switch((w >> 24) & 0x03) { 205 switch((w >> 24) & 0x03) {
203 case 0x01: 206 case 0x01:
204 flags |= IORESOURCE_IO; 207 flags |= IORESOURCE_IO;
208 break;
209
205 case 0x02: /* 32 bits */ 210 case 0x02: /* 32 bits */
206 case 0x03: /* 64 bits */ 211 case 0x03: /* 64 bits */
207 flags |= IORESOURCE_MEM; 212 flags |= IORESOURCE_MEM;
213 break;
208 } 214 }
209 if (w & 0x40000000) 215 if (w & 0x40000000)
210 flags |= IORESOURCE_PREFETCH; 216 flags |= IORESOURCE_PREFETCH;
@@ -362,10 +368,11 @@ static void __init build_device_resources(struct of_device *op,
362 int pna, pns; 368 int pna, pns;
363 369
364 size = of_read_addr(reg + na, ns); 370 size = of_read_addr(reg + na, ns);
365 flags = bus->get_flags(reg);
366 371
367 memcpy(addr, reg, na * 4); 372 memcpy(addr, reg, na * 4);
368 373
374 flags = bus->get_flags(reg, 0);
375
369 /* If the immediate parent has no ranges property to apply, 376 /* If the immediate parent has no ranges property to apply,
370 * just use a 1<->1 mapping. 377 * just use a 1<->1 mapping.
371 */ 378 */
@@ -393,6 +400,8 @@ static void __init build_device_resources(struct of_device *op,
393 dna, dns, pna)) 400 dna, dns, pna))
394 break; 401 break;
395 402
403 flags = pbus->get_flags(addr, flags);
404
396 dna = pna; 405 dna = pna;
397 dns = pns; 406 dns = pns;
398 dbus = pbus; 407 dbus = pbus;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index dfde77ff0848..69596402a500 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -262,8 +262,9 @@ static struct smp_funcall {
262static DEFINE_SPINLOCK(cross_call_lock); 262static DEFINE_SPINLOCK(cross_call_lock);
263 263
264/* Cross calls must be serialized, at least currently. */ 264/* Cross calls must be serialized, at least currently. */
265void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, 265static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
266 unsigned long arg3, unsigned long arg4, unsigned long arg5) 266 unsigned long arg2, unsigned long arg3,
267 unsigned long arg4)
267{ 268{
268 if(smp_processors_ready) { 269 if(smp_processors_ready) {
269 register int high = smp_highest_cpu; 270 register int high = smp_highest_cpu;
@@ -278,7 +279,7 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
278 register unsigned long a2 asm("i2") = arg2; 279 register unsigned long a2 asm("i2") = arg2;
279 register unsigned long a3 asm("i3") = arg3; 280 register unsigned long a3 asm("i3") = arg3;
280 register unsigned long a4 asm("i4") = arg4; 281 register unsigned long a4 asm("i4") = arg4;
281 register unsigned long a5 asm("i5") = arg5; 282 register unsigned long a5 asm("i5") = 0;
282 283
283 __asm__ __volatile__( 284 __asm__ __volatile__(
284 "std %0, [%6]\n\t" 285 "std %0, [%6]\n\t"
@@ -290,11 +291,10 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
290 291
291 /* Init receive/complete mapping, plus fire the IPI's off. */ 292 /* Init receive/complete mapping, plus fire the IPI's off. */
292 { 293 {
293 cpumask_t mask;
294 register int i; 294 register int i;
295 295
296 mask = cpumask_of_cpu(hard_smp4d_processor_id()); 296 cpu_clear(smp_processor_id(), mask);
297 cpus_andnot(mask, cpu_online_map, mask); 297 cpus_and(mask, cpu_online_map, mask);
298 for(i = 0; i <= high; i++) { 298 for(i = 0; i <= high; i++) {
299 if (cpu_isset(i, mask)) { 299 if (cpu_isset(i, mask)) {
300 ccall_info.processors_in[i] = 0; 300 ccall_info.processors_in[i] = 0;
@@ -309,12 +309,16 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
309 309
310 i = 0; 310 i = 0;
311 do { 311 do {
312 if (!cpu_isset(i, mask))
313 continue;
312 while(!ccall_info.processors_in[i]) 314 while(!ccall_info.processors_in[i])
313 barrier(); 315 barrier();
314 } while(++i <= high); 316 } while(++i <= high);
315 317
316 i = 0; 318 i = 0;
317 do { 319 do {
320 if (!cpu_isset(i, mask))
321 continue;
318 while(!ccall_info.processors_out[i]) 322 while(!ccall_info.processors_out[i])
319 barrier(); 323 barrier();
320 } while(++i <= high); 324 } while(++i <= high);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 406ac1abc83a..a14a76ac7f36 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -244,9 +244,9 @@ static struct smp_funcall {
244static DEFINE_SPINLOCK(cross_call_lock); 244static DEFINE_SPINLOCK(cross_call_lock);
245 245
246/* Cross calls must be serialized, at least currently. */ 246/* Cross calls must be serialized, at least currently. */
247static void smp4m_cross_call(smpfunc_t func, unsigned long arg1, 247static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
248 unsigned long arg2, unsigned long arg3, 248 unsigned long arg2, unsigned long arg3,
249 unsigned long arg4, unsigned long arg5) 249 unsigned long arg4)
250{ 250{
251 register int ncpus = SUN4M_NCPUS; 251 register int ncpus = SUN4M_NCPUS;
252 unsigned long flags; 252 unsigned long flags;
@@ -259,14 +259,14 @@ static void smp4m_cross_call(smpfunc_t func, unsigned long arg1,
259 ccall_info.arg2 = arg2; 259 ccall_info.arg2 = arg2;
260 ccall_info.arg3 = arg3; 260 ccall_info.arg3 = arg3;
261 ccall_info.arg4 = arg4; 261 ccall_info.arg4 = arg4;
262 ccall_info.arg5 = arg5; 262 ccall_info.arg5 = 0;
263 263
264 /* Init receive/complete mapping, plus fire the IPI's off. */ 264 /* Init receive/complete mapping, plus fire the IPI's off. */
265 { 265 {
266 cpumask_t mask = cpu_online_map;
267 register int i; 266 register int i;
268 267
269 cpu_clear(smp_processor_id(), mask); 268 cpu_clear(smp_processor_id(), mask);
269 cpus_and(mask, cpu_online_map, mask);
270 for(i = 0; i < ncpus; i++) { 270 for(i = 0; i < ncpus; i++) {
271 if (cpu_isset(i, mask)) { 271 if (cpu_isset(i, mask)) {
272 ccall_info.processors_in[i] = 0; 272 ccall_info.processors_in[i] = 0;
@@ -284,12 +284,16 @@ static void smp4m_cross_call(smpfunc_t func, unsigned long arg1,
284 284
285 i = 0; 285 i = 0;
286 do { 286 do {
287 if (!cpu_isset(i, mask))
288 continue;
287 while(!ccall_info.processors_in[i]) 289 while(!ccall_info.processors_in[i])
288 barrier(); 290 barrier();
289 } while(++i < ncpus); 291 } while(++i < ncpus);
290 292
291 i = 0; 293 i = 0;
292 do { 294 do {
295 if (!cpu_isset(i, mask))
296 continue;
293 while(!ccall_info.processors_out[i]) 297 while(!ccall_info.processors_out[i])
294 barrier(); 298 barrier();
295 } while(++i < ncpus); 299 } while(++i < ncpus);
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index f8b50cbf4bf7..f845f150f565 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -96,7 +96,7 @@ struct of_bus {
96 int *addrc, int *sizec); 96 int *addrc, int *sizec);
97 int (*map)(u32 *addr, const u32 *range, 97 int (*map)(u32 *addr, const u32 *range,
98 int na, int ns, int pna); 98 int na, int ns, int pna);
99 unsigned int (*get_flags)(const u32 *addr); 99 unsigned long (*get_flags)(const u32 *addr, unsigned long);
100}; 100};
101 101
102/* 102/*
@@ -156,8 +156,10 @@ static int of_bus_default_map(u32 *addr, const u32 *range,
156 return 0; 156 return 0;
157} 157}
158 158
159static unsigned int of_bus_default_get_flags(const u32 *addr) 159static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
160{ 160{
161 if (flags)
162 return flags;
161 return IORESOURCE_MEM; 163 return IORESOURCE_MEM;
162} 164}
163 165
@@ -249,17 +251,21 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
249 return 0; 251 return 0;
250} 252}
251 253
252static unsigned int of_bus_pci_get_flags(const u32 *addr) 254static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
253{ 255{
254 unsigned int flags = 0;
255 u32 w = addr[0]; 256 u32 w = addr[0];
256 257
258 /* For PCI, we override whatever child busses may have used. */
259 flags = 0;
257 switch((w >> 24) & 0x03) { 260 switch((w >> 24) & 0x03) {
258 case 0x01: 261 case 0x01:
259 flags |= IORESOURCE_IO; 262 flags |= IORESOURCE_IO;
263 break;
264
260 case 0x02: /* 32 bits */ 265 case 0x02: /* 32 bits */
261 case 0x03: /* 64 bits */ 266 case 0x03: /* 64 bits */
262 flags |= IORESOURCE_MEM; 267 flags |= IORESOURCE_MEM;
268 break;
263 } 269 }
264 if (w & 0x40000000) 270 if (w & 0x40000000)
265 flags |= IORESOURCE_PREFETCH; 271 flags |= IORESOURCE_PREFETCH;
@@ -478,10 +484,10 @@ static void __init build_device_resources(struct of_device *op,
478 int pna, pns; 484 int pna, pns;
479 485
480 size = of_read_addr(reg + na, ns); 486 size = of_read_addr(reg + na, ns);
481 flags = bus->get_flags(reg);
482
483 memcpy(addr, reg, na * 4); 487 memcpy(addr, reg, na * 4);
484 488
489 flags = bus->get_flags(addr, 0);
490
485 if (use_1to1_mapping(pp)) { 491 if (use_1to1_mapping(pp)) {
486 result = of_read_addr(addr, na); 492 result = of_read_addr(addr, na);
487 goto build_res; 493 goto build_res;
@@ -506,6 +512,8 @@ static void __init build_device_resources(struct of_device *op,
506 dna, dns, pna)) 512 dna, dns, pna))
507 break; 513 break;
508 514
515 flags = pbus->get_flags(addr, flags);
516
509 dna = pna; 517 dna = pna;
510 dns = pns; 518 dns = pns;
511 dbus = pbus; 519 dbus = pbus;
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 743ccad61c60..2be166c544ca 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -80,8 +80,6 @@ void smp_bogo(struct seq_file *m)
80 i, cpu_data(i).clock_tick); 80 i, cpu_data(i).clock_tick);
81} 81}
82 82
83static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
84
85extern void setup_sparc64_timer(void); 83extern void setup_sparc64_timer(void);
86 84
87static volatile unsigned long callin_flag = 0; 85static volatile unsigned long callin_flag = 0;
@@ -120,9 +118,9 @@ void __cpuinit smp_callin(void)
120 while (!cpu_isset(cpuid, smp_commenced_mask)) 118 while (!cpu_isset(cpuid, smp_commenced_mask))
121 rmb(); 119 rmb();
122 120
123 spin_lock(&call_lock); 121 ipi_call_lock();
124 cpu_set(cpuid, cpu_online_map); 122 cpu_set(cpuid, cpu_online_map);
125 spin_unlock(&call_lock); 123 ipi_call_unlock();
126 124
127 /* idle thread is expected to have preempt disabled */ 125 /* idle thread is expected to have preempt disabled */
128 preempt_disable(); 126 preempt_disable();
@@ -1305,10 +1303,6 @@ int __cpu_disable(void)
1305 c->core_id = 0; 1303 c->core_id = 0;
1306 c->proc_id = -1; 1304 c->proc_id = -1;
1307 1305
1308 spin_lock(&call_lock);
1309 cpu_clear(cpu, cpu_online_map);
1310 spin_unlock(&call_lock);
1311
1312 smp_wmb(); 1306 smp_wmb();
1313 1307
1314 /* Make sure no interrupts point to this cpu. */ 1308 /* Make sure no interrupts point to this cpu. */
@@ -1318,6 +1312,10 @@ int __cpu_disable(void)
1318 mdelay(1); 1312 mdelay(1);
1319 local_irq_disable(); 1313 local_irq_disable();
1320 1314
1315 ipi_call_lock();
1316 cpu_clear(cpu, cpu_online_map);
1317 ipi_call_unlock();
1318
1321 return 0; 1319 return 0;
1322} 1320}
1323 1321
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b4aeb0f696dc..a41df7bef035 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1843,7 +1843,7 @@ static int pavail_rescan_ents __initdata;
1843 * memory list again, and make sure it provides at least as much 1843 * memory list again, and make sure it provides at least as much
1844 * memory as 'pavail' does. 1844 * memory as 'pavail' does.
1845 */ 1845 */
1846static void setup_valid_addr_bitmap_from_pavail(void) 1846static void __init setup_valid_addr_bitmap_from_pavail(void)
1847{ 1847{
1848 int i; 1848 int i;
1849 1849
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 4b9ae7c56748..4d3ff037201f 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -38,12 +38,12 @@ static const u32 req_flags[NCAPINTS] =
38{ 38{
39 REQUIRED_MASK0, 39 REQUIRED_MASK0,
40 REQUIRED_MASK1, 40 REQUIRED_MASK1,
41 REQUIRED_MASK2, 41 0, /* REQUIRED_MASK2 not implemented in this file */
42 REQUIRED_MASK3, 42 0, /* REQUIRED_MASK3 not implemented in this file */
43 REQUIRED_MASK4, 43 REQUIRED_MASK4,
44 REQUIRED_MASK5, 44 0, /* REQUIRED_MASK5 not implemented in this file */
45 REQUIRED_MASK6, 45 REQUIRED_MASK6,
46 REQUIRED_MASK7, 46 0, /* REQUIRED_MASK7 not implemented in this file */
47}; 47};
48 48
49#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a)) 49#define A32(a, b, c, d) (((d) << 24)+((c) << 16)+((b) << 8)+(a))
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 2763cb37b553..65a0c1b48696 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -145,35 +145,25 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
145extern char __vsyscall_0; 145extern char __vsyscall_0;
146const unsigned char *const *find_nop_table(void) 146const unsigned char *const *find_nop_table(void)
147{ 147{
148 return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || 148 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
149 boot_cpu_data.x86 < 6 ? k8_nops : p6_nops; 149 boot_cpu_has(X86_FEATURE_NOPL))
150 return p6_nops;
151 else
152 return k8_nops;
150} 153}
151 154
152#else /* CONFIG_X86_64 */ 155#else /* CONFIG_X86_64 */
153 156
154static const struct nop {
155 int cpuid;
156 const unsigned char *const *noptable;
157} noptypes[] = {
158 { X86_FEATURE_K8, k8_nops },
159 { X86_FEATURE_K7, k7_nops },
160 { X86_FEATURE_P4, p6_nops },
161 { X86_FEATURE_P3, p6_nops },
162 { -1, NULL }
163};
164
165const unsigned char *const *find_nop_table(void) 157const unsigned char *const *find_nop_table(void)
166{ 158{
167 const unsigned char *const *noptable = intel_nops; 159 if (boot_cpu_has(X86_FEATURE_K8))
168 int i; 160 return k8_nops;
169 161 else if (boot_cpu_has(X86_FEATURE_K7))
170 for (i = 0; noptypes[i].cpuid >= 0; i++) { 162 return k7_nops;
171 if (boot_cpu_has(noptypes[i].cpuid)) { 163 else if (boot_cpu_has(X86_FEATURE_NOPL))
172 noptable = noptypes[i].noptable; 164 return p6_nops;
173 break; 165 else
174 } 166 return intel_nops;
175 }
176 return noptable;
177} 167}
178 168
179#endif /* CONFIG_X86_64 */ 169#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cae9cabc3031..18514ed26104 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
31 if (c->x86_power & (1<<8)) 31 if (c->x86_power & (1<<8))
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
33 } 33 }
34
35 /* Set MTRR capability flag if appropriate */
36 if (c->x86_model == 13 || c->x86_model == 9 ||
37 (c->x86_model == 8 && c->x86_mask >= 8))
38 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
34} 39}
35 40
36static void __cpuinit init_amd(struct cpuinfo_x86 *c) 41static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
166 mbytes); 171 mbytes);
167 } 172 }
168 173
169 /* Set MTRR capability flag if appropriate */
170 if (c->x86_model == 13 || c->x86_model == 9 ||
171 (c->x86_model == 8 && c->x86_mask >= 8))
172 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
173 break; 174 break;
174 } 175 }
175 176
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e0f45edd6a55..a0534c04d38a 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -314,6 +314,16 @@ enum {
314 EAMD3D = 1<<20, 314 EAMD3D = 1<<20,
315}; 315};
316 316
317static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
318{
319 switch (c->x86) {
320 case 5:
321 /* Emulate MTRRs using Centaur's MCR. */
322 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
323 break;
324 }
325}
326
317static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 327static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
318{ 328{
319 329
@@ -462,6 +472,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
462static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 472static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
463 .c_vendor = "Centaur", 473 .c_vendor = "Centaur",
464 .c_ident = { "CentaurHauls" }, 474 .c_ident = { "CentaurHauls" },
475 .c_early_init = early_init_centaur,
465 .c_init = init_centaur, 476 .c_init = init_centaur,
466 .c_size_cache = centaur_size_cache, 477 .c_size_cache = centaur_size_cache,
467}; 478};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 80ab20d4fa39..8aab8517642e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,6 +13,7 @@
13#include <asm/mtrr.h> 13#include <asm/mtrr.h>
14#include <asm/mce.h> 14#include <asm/mce.h>
15#include <asm/pat.h> 15#include <asm/pat.h>
16#include <asm/asm.h>
16#ifdef CONFIG_X86_LOCAL_APIC 17#ifdef CONFIG_X86_LOCAL_APIC
17#include <asm/mpspec.h> 18#include <asm/mpspec.h>
18#include <asm/apic.h> 19#include <asm/apic.h>
@@ -334,11 +335,40 @@ static void __init early_cpu_detect(void)
334 335
335 get_cpu_vendor(c, 1); 336 get_cpu_vendor(c, 1);
336 337
338 early_get_cap(c);
339
337 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 340 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
338 cpu_devs[c->x86_vendor]->c_early_init) 341 cpu_devs[c->x86_vendor]->c_early_init)
339 cpu_devs[c->x86_vendor]->c_early_init(c); 342 cpu_devs[c->x86_vendor]->c_early_init(c);
343}
340 344
341 early_get_cap(c); 345/*
346 * The NOPL instruction is supposed to exist on all CPUs with
347 * family >= 6, unfortunately, that's not true in practice because
348 * of early VIA chips and (more importantly) broken virtualizers that
349 * are not easy to detect. Hence, probe for it based on first
350 * principles.
351 */
352static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
353{
354 const u32 nopl_signature = 0x888c53b1; /* Random number */
355 u32 has_nopl = nopl_signature;
356
357 clear_cpu_cap(c, X86_FEATURE_NOPL);
358 if (c->x86 >= 6) {
359 asm volatile("\n"
360 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
361 "2:\n"
362 " .section .fixup,\"ax\"\n"
363 "3: xor %0,%0\n"
364 " jmp 2b\n"
365 " .previous\n"
366 _ASM_EXTABLE(1b,3b)
367 : "+a" (has_nopl));
368
369 if (has_nopl == nopl_signature)
370 set_cpu_cap(c, X86_FEATURE_NOPL);
371 }
342} 372}
343 373
344static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 374static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
@@ -395,8 +425,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
395 } 425 }
396 426
397 init_scattered_cpuid_features(c); 427 init_scattered_cpuid_features(c);
428 detect_nopl(c);
398 } 429 }
399
400} 430}
401 431
402static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 432static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index dd6e3f15017e..a11f5d4477cd 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -18,6 +18,7 @@
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/mce.h> 19#include <asm/mce.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <asm/asm.h>
21#include <asm/numa.h> 22#include <asm/numa.h>
22#ifdef CONFIG_X86_LOCAL_APIC 23#ifdef CONFIG_X86_LOCAL_APIC
23#include <asm/mpspec.h> 24#include <asm/mpspec.h>
@@ -215,6 +216,39 @@ static void __init early_cpu_support_print(void)
215 } 216 }
216} 217}
217 218
219/*
220 * The NOPL instruction is supposed to exist on all CPUs with
221 * family >= 6, unfortunately, that's not true in practice because
222 * of early VIA chips and (more importantly) broken virtualizers that
223 * are not easy to detect. Hence, probe for it based on first
224 * principles.
225 *
226 * Note: no 64-bit chip is known to lack these, but put the code here
227 * for consistency with 32 bits, and to make it utterly trivial to
228 * diagnose the problem should it ever surface.
229 */
230static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
231{
232 const u32 nopl_signature = 0x888c53b1; /* Random number */
233 u32 has_nopl = nopl_signature;
234
235 clear_cpu_cap(c, X86_FEATURE_NOPL);
236 if (c->x86 >= 6) {
237 asm volatile("\n"
238 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
239 "2:\n"
240 " .section .fixup,\"ax\"\n"
241 "3: xor %0,%0\n"
242 " jmp 2b\n"
243 " .previous\n"
244 _ASM_EXTABLE(1b,3b)
245 : "+a" (has_nopl));
246
247 if (has_nopl == nopl_signature)
248 set_cpu_cap(c, X86_FEATURE_NOPL);
249 }
250}
251
218static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
219 253
220void __init early_cpu_init(void) 254void __init early_cpu_init(void)
@@ -313,6 +347,8 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
313 c->x86_phys_bits = eax & 0xff; 347 c->x86_phys_bits = eax & 0xff;
314 } 348 }
315 349
350 detect_nopl(c);
351
316 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
317 cpu_devs[c->x86_vendor]->c_early_init) 353 cpu_devs[c->x86_vendor]->c_early_init)
318 cpu_devs[c->x86_vendor]->c_early_init(c); 354 cpu_devs[c->x86_vendor]->c_early_init(c);
@@ -493,17 +529,20 @@ void pda_init(int cpu)
493 /* others are initialized in smpboot.c */ 529 /* others are initialized in smpboot.c */
494 pda->pcurrent = &init_task; 530 pda->pcurrent = &init_task;
495 pda->irqstackptr = boot_cpu_stack; 531 pda->irqstackptr = boot_cpu_stack;
532 pda->irqstackptr += IRQSTACKSIZE - 64;
496 } else { 533 } else {
497 pda->irqstackptr = (char *) 534 if (!pda->irqstackptr) {
498 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); 535 pda->irqstackptr = (char *)
499 if (!pda->irqstackptr) 536 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
500 panic("cannot allocate irqstack for cpu %d", cpu); 537 if (!pda->irqstackptr)
538 panic("cannot allocate irqstack for cpu %d",
539 cpu);
540 pda->irqstackptr += IRQSTACKSIZE - 64;
541 }
501 542
502 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) 543 if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
503 pda->nodenumber = cpu_to_node(cpu); 544 pda->nodenumber = cpu_to_node(cpu);
504 } 545 }
505
506 pda->irqstackptr += IRQSTACKSIZE-64;
507} 546}
508 547
509char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + 548char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
@@ -601,19 +640,22 @@ void __cpuinit cpu_init(void)
601 /* 640 /*
602 * set up and load the per-CPU TSS 641 * set up and load the per-CPU TSS
603 */ 642 */
604 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 643 if (!orig_ist->ist[0]) {
605 static const unsigned int order[N_EXCEPTION_STACKS] = { 644 static const unsigned int order[N_EXCEPTION_STACKS] = {
606 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, 645 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
607 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER 646 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
608 }; 647 };
609 if (cpu) { 648 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
610 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); 649 if (cpu) {
611 if (!estacks) 650 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
612 panic("Cannot allocate exception stack %ld %d\n", 651 if (!estacks)
613 v, cpu); 652 panic("Cannot allocate exception "
653 "stack %ld %d\n", v, cpu);
654 }
655 estacks += PAGE_SIZE << order[v];
656 orig_ist->ist[v] = t->x86_tss.ist[v] =
657 (unsigned long)estacks;
614 } 658 }
615 estacks += PAGE_SIZE << order[v];
616 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
617 } 659 }
618 660
619 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 661 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index e710a21bb6e8..898a5a2002ed 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,13 +15,11 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 unsigned long flags;
22 21
23 /* we test for DEVID by checking whether CCR3 is writable */ 22 /* we test for DEVID by checking whether CCR3 is writable */
24 local_irq_save(flags);
25 ccr3 = getCx86(CX86_CCR3); 23 ccr3 = getCx86(CX86_CCR3);
26 setCx86(CX86_CCR3, ccr3 ^ 0x80); 24 setCx86(CX86_CCR3, ccr3 ^ 0x80);
27 getCx86(0xc0); /* dummy to change bus */ 25 getCx86(0xc0); /* dummy to change bus */
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 *dir0 = getCx86(CX86_DIR0); 42 *dir0 = getCx86(CX86_DIR0);
45 *dir1 = getCx86(CX86_DIR1); 43 *dir1 = getCx86(CX86_DIR1);
46 } 44 }
47 local_irq_restore(flags);
48} 45}
49 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{
49 unsigned long flags;
50
51 local_irq_save(flags);
52 __do_cyrix_devid(dir0, dir1);
53 local_irq_restore(flags);
54}
50/* 55/*
51 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 56 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52 * order to identify the Cyrix CPU model after we're out of setup.c 57 * order to identify the Cyrix CPU model after we're out of setup.c
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
161 local_irq_restore(flags); 166 local_irq_restore(flags);
162} 167}
163 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
170{
171 unsigned char dir0, dir0_msn, dir1 = 0;
172
173 __do_cyrix_devid(&dir0, &dir1);
174 dir0_msn = dir0 >> 4; /* identifies CPU "family" */
175
176 switch (dir0_msn) {
177 case 3: /* 6x86/6x86L */
178 /* Emulate MTRRs using Cyrix's ARRs. */
179 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
180 break;
181 case 5: /* 6x86MX/M II */
182 /* Emulate MTRRs using Cyrix's ARRs. */
183 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184 break;
185 }
186}
164 187
165static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
166{ 189{
@@ -416,6 +439,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
416static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 439static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
417 .c_vendor = "Cyrix", 440 .c_vendor = "Cyrix",
418 .c_ident = { "CyrixInstead" }, 441 .c_ident = { "CyrixInstead" },
442 .c_early_init = early_init_cyrix,
419 .c_init = init_cyrix, 443 .c_init = init_cyrix,
420 .c_identify = cyrix_identify, 444 .c_identify = cyrix_identify,
421}; 445};
diff --git a/arch/x86/kernel/cpu/feature_names.c b/arch/x86/kernel/cpu/feature_names.c
index e43ad4ad4cba..c9017799497c 100644
--- a/arch/x86/kernel/cpu/feature_names.c
+++ b/arch/x86/kernel/cpu/feature_names.c
@@ -39,7 +39,8 @@ const char * const x86_cap_flags[NCAPINTS*32] = {
39 NULL, NULL, NULL, NULL, 39 NULL, NULL, NULL, NULL,
40 "constant_tsc", "up", NULL, "arch_perfmon", 40 "constant_tsc", "up", NULL, "arch_perfmon",
41 "pebs", "bts", NULL, NULL, 41 "pebs", "bts", NULL, NULL,
42 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL, 42 "rep_good", NULL, NULL, NULL,
43 "nopl", NULL, NULL, NULL,
43 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
44 45
45 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 59fd3b6b1303..73deaffadd03 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -210,8 +210,8 @@ static void hpet_legacy_clockevent_register(void)
210 /* Calculate the min / max delta */ 210 /* Calculate the min / max delta */
211 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 211 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
212 &hpet_clockevent); 212 &hpet_clockevent);
213 hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30, 213 /* 5 usec minimum reprogramming delta. */
214 &hpet_clockevent); 214 hpet_clockevent.min_delta_ns = 5000;
215 215
216 /* 216 /*
217 * Start hpet with the boot cpu mask and make it 217 * Start hpet with the boot cpu mask and make it
@@ -270,15 +270,22 @@ static void hpet_legacy_set_mode(enum clock_event_mode mode,
270} 270}
271 271
272static int hpet_legacy_next_event(unsigned long delta, 272static int hpet_legacy_next_event(unsigned long delta,
273 struct clock_event_device *evt) 273 struct clock_event_device *evt)
274{ 274{
275 unsigned long cnt; 275 u32 cnt;
276 276
277 cnt = hpet_readl(HPET_COUNTER); 277 cnt = hpet_readl(HPET_COUNTER);
278 cnt += delta; 278 cnt += (u32) delta;
279 hpet_writel(cnt, HPET_T0_CMP); 279 hpet_writel(cnt, HPET_T0_CMP);
280 280
281 return ((long)(hpet_readl(HPET_COUNTER) - cnt ) > 0) ? -ETIME : 0; 281 /*
282 * We need to read back the CMP register to make sure that
283 * what we wrote hit the chip before we compare it to the
284 * counter.
285 */
286 WARN_ON((u32)hpet_readl(HPET_T0_CMP) != cnt);
287
288 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
282} 289}
283 290
284/* 291/*
diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
index 1c3a66a67f83..720d2607aacb 100644
--- a/arch/x86/kernel/io_delay.c
+++ b/arch/x86/kernel/io_delay.c
@@ -92,6 +92,14 @@ static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
92 DMI_MATCH(DMI_BOARD_NAME, "30BF") 92 DMI_MATCH(DMI_BOARD_NAME, "30BF")
93 } 93 }
94 }, 94 },
95 {
96 .callback = dmi_io_delay_0xed_port,
97 .ident = "Presario F700",
98 .matches = {
99 DMI_MATCH(DMI_BOARD_VENDOR, "Quanta"),
100 DMI_MATCH(DMI_BOARD_NAME, "30D3")
101 }
102 },
95 { } 103 { }
96}; 104};
97 105
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 8e786b0d665a..8f98e9de1b82 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -122,80 +122,216 @@ static u64 tsc_read_refs(u64 *pm, u64 *hpet)
122 return ULLONG_MAX; 122 return ULLONG_MAX;
123} 123}
124 124
125/** 125/*
126 * native_calibrate_tsc - calibrate the tsc on boot 126 * Try to calibrate the TSC against the Programmable
127 * Interrupt Timer and return the frequency of the TSC
128 * in kHz.
129 *
130 * Return ULONG_MAX on failure to calibrate.
127 */ 131 */
128unsigned long native_calibrate_tsc(void) 132static unsigned long pit_calibrate_tsc(void)
129{ 133{
130 unsigned long flags; 134 u64 tsc, t1, t2, delta;
131 u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; 135 unsigned long tscmin, tscmax;
132 int hpet = is_hpet_enabled(); 136 int pitcnt;
133 unsigned int tsc_khz_val = 0;
134
135 local_irq_save(flags);
136
137 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
138 137
138 /* Set the Gate high, disable speaker */
139 outb((inb(0x61) & ~0x02) | 0x01, 0x61); 139 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
140 140
141 /*
142 * Setup CTC channel 2* for mode 0, (interrupt on terminal
143 * count mode), binary count. Set the latch register to 50ms
144 * (LSB then MSB) to begin countdown.
145 */
141 outb(0xb0, 0x43); 146 outb(0xb0, 0x43);
142 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); 147 outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
143 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); 148 outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
144 tr1 = get_cycles();
145 while ((inb(0x61) & 0x20) == 0);
146 tr2 = get_cycles();
147 149
148 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); 150 tsc = t1 = t2 = get_cycles();
149 151
150 local_irq_restore(flags); 152 pitcnt = 0;
153 tscmax = 0;
154 tscmin = ULONG_MAX;
155 while ((inb(0x61) & 0x20) == 0) {
156 t2 = get_cycles();
157 delta = t2 - tsc;
158 tsc = t2;
159 if ((unsigned long) delta < tscmin)
160 tscmin = (unsigned int) delta;
161 if ((unsigned long) delta > tscmax)
162 tscmax = (unsigned int) delta;
163 pitcnt++;
164 }
151 165
152 /* 166 /*
153 * Preset the result with the raw and inaccurate PIT 167 * Sanity checks:
154 * calibration value 168 *
169 * If we were not able to read the PIT more than 5000
170 * times, then we have been hit by a massive SMI
171 *
172 * If the maximum is 10 times larger than the minimum,
173 * then we got hit by an SMI as well.
155 */ 174 */
156 delta = (tr2 - tr1); 175 if (pitcnt < 5000 || tscmax > 10 * tscmin)
176 return ULONG_MAX;
177
178 /* Calculate the PIT value */
179 delta = t2 - t1;
157 do_div(delta, 50); 180 do_div(delta, 50);
158 tsc_khz_val = delta; 181 return delta;
182}
183
184
185/**
186 * native_calibrate_tsc - calibrate the tsc on boot
187 */
188unsigned long native_calibrate_tsc(void)
189{
190 u64 tsc1, tsc2, delta, pm1, pm2, hpet1, hpet2;
191 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
192 unsigned long flags;
193 int hpet = is_hpet_enabled(), i;
159 194
160 /* hpet or pmtimer available ? */ 195 /*
196 * Run 5 calibration loops to get the lowest frequency value
197 * (the best estimate). We use two different calibration modes
198 * here:
199 *
200 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
201 * load a timeout of 50ms. We read the time right after we
202 * started the timer and wait until the PIT count down reaches
203 * zero. In each wait loop iteration we read the TSC and check
204 * the delta to the previous read. We keep track of the min
205 * and max values of that delta. The delta is mostly defined
206 * by the IO time of the PIT access, so we can detect when a
207 * SMI/SMM disturbance happend between the two reads. If the
208 * maximum time is significantly larger than the minimum time,
209 * then we discard the result and have another try.
210 *
211 * 2) Reference counter. If available we use the HPET or the
212 * PMTIMER as a reference to check the sanity of that value.
213 * We use separate TSC readouts and check inside of the
214 * reference read for a SMI/SMM disturbance. We dicard
215 * disturbed values here as well. We do that around the PIT
216 * calibration delay loop as we have to wait for a certain
217 * amount of time anyway.
218 */
219 for (i = 0; i < 5; i++) {
220 unsigned long tsc_pit_khz;
221
222 /*
223 * Read the start value and the reference count of
224 * hpet/pmtimer when available. Then do the PIT
225 * calibration, which will take at least 50ms, and
226 * read the end value.
227 */
228 local_irq_save(flags);
229 tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
230 tsc_pit_khz = pit_calibrate_tsc();
231 tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
232 local_irq_restore(flags);
233
234 /* Pick the lowest PIT TSC calibration so far */
235 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
236
237 /* hpet or pmtimer available ? */
238 if (!hpet && !pm1 && !pm2)
239 continue;
240
241 /* Check, whether the sampling was disturbed by an SMI */
242 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
243 continue;
244
245 tsc2 = (tsc2 - tsc1) * 1000000LL;
246
247 if (hpet) {
248 if (hpet2 < hpet1)
249 hpet2 += 0x100000000ULL;
250 hpet2 -= hpet1;
251 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
252 do_div(tsc1, 1000000);
253 } else {
254 if (pm2 < pm1)
255 pm2 += (u64)ACPI_PM_OVRRUN;
256 pm2 -= pm1;
257 tsc1 = pm2 * 1000000000LL;
258 do_div(tsc1, PMTMR_TICKS_PER_SEC);
259 }
260
261 do_div(tsc2, tsc1);
262 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
263 }
264
265 /*
266 * Now check the results.
267 */
268 if (tsc_pit_min == ULONG_MAX) {
269 /* PIT gave no useful value */
270 printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
271
272 /* We don't have an alternative source, disable TSC */
273 if (!hpet && !pm1 && !pm2) {
274 printk("TSC: No reference (HPET/PMTIMER) available\n");
275 return 0;
276 }
277
278 /* The alternative source failed as well, disable TSC */
279 if (tsc_ref_min == ULONG_MAX) {
280 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
281 "failed due to SMI disturbance.\n");
282 return 0;
283 }
284
285 /* Use the alternative source */
286 printk(KERN_INFO "TSC: using %s reference calibration\n",
287 hpet ? "HPET" : "PMTIMER");
288
289 return tsc_ref_min;
290 }
291
292 /* We don't have an alternative source, use the PIT calibration value */
161 if (!hpet && !pm1 && !pm2) { 293 if (!hpet && !pm1 && !pm2) {
162 printk(KERN_INFO "TSC calibrated against PIT\n"); 294 printk(KERN_INFO "TSC: Using PIT calibration value\n");
163 goto out; 295 return tsc_pit_min;
164 } 296 }
165 297
166 /* Check, whether the sampling was disturbed by an SMI */ 298 /* The alternative source failed, use the PIT calibration value */
167 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { 299 if (tsc_ref_min == ULONG_MAX) {
168 printk(KERN_WARNING "TSC calibration disturbed by SMI, " 300 printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed due "
169 "using PIT calibration result\n"); 301 "to SMI disturbance. Using PIT calibration\n");
170 goto out; 302 return tsc_pit_min;
171 } 303 }
172 304
173 tsc2 = (tsc2 - tsc1) * 1000000LL; 305 /* Check the reference deviation */
174 306 delta = ((u64) tsc_pit_min) * 100;
175 if (hpet) { 307 do_div(delta, tsc_ref_min);
176 printk(KERN_INFO "TSC calibrated against HPET\n"); 308
177 if (hpet2 < hpet1) 309 /*
178 hpet2 += 0x100000000ULL; 310 * If both calibration results are inside a 5% window, the we
179 hpet2 -= hpet1; 311 * use the lower frequency of those as it is probably the
180 tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); 312 * closest estimate.
181 do_div(tsc1, 1000000); 313 */
182 } else { 314 if (delta >= 95 && delta <= 105) {
183 printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); 315 printk(KERN_INFO "TSC: PIT calibration confirmed by %s.\n",
184 if (pm2 < pm1) 316 hpet ? "HPET" : "PMTIMER");
185 pm2 += (u64)ACPI_PM_OVRRUN; 317 printk(KERN_INFO "TSC: using %s calibration value\n",
186 pm2 -= pm1; 318 tsc_pit_min <= tsc_ref_min ? "PIT" :
187 tsc1 = pm2 * 1000000000LL; 319 hpet ? "HPET" : "PMTIMER");
188 do_div(tsc1, PMTMR_TICKS_PER_SEC); 320 return tsc_pit_min <= tsc_ref_min ? tsc_pit_min : tsc_ref_min;
189 } 321 }
190 322
191 do_div(tsc2, tsc1); 323 printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
192 tsc_khz_val = tsc2; 324 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
193 325
194out: 326 /*
195 return tsc_khz_val; 327 * The calibration values differ too much. In doubt, we use
328 * the PIT value as we know that there are PMTIMERs around
329 * running at double speed.
330 */
331 printk(KERN_INFO "TSC: Using PIT calibration value\n");
332 return tsc_pit_min;
196} 333}
197 334
198
199#ifdef CONFIG_X86_32 335#ifdef CONFIG_X86_32
200/* Only called from the Powernow K7 cpu freq driver */ 336/* Only called from the Powernow K7 cpu freq driver */
201int recalibrate_cpu_khz(void) 337int recalibrate_cpu_khz(void)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index d765da913842..8791fc55e715 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -31,11 +31,8 @@
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/acpi.h>
35 34
36#include <asm/pat.h> 35#include <asm/pat.h>
37#include <asm/hpet.h>
38#include <asm/io_apic.h>
39 36
40#include "pci.h" 37#include "pci.h"
41 38
@@ -80,77 +77,6 @@ pcibios_align_resource(void *data, struct resource *res,
80} 77}
81EXPORT_SYMBOL(pcibios_align_resource); 78EXPORT_SYMBOL(pcibios_align_resource);
82 79
83static int check_res_with_valid(struct pci_dev *dev, struct resource *res)
84{
85 unsigned long base;
86 unsigned long size;
87 int i;
88
89 base = res->start;
90 size = (res->start == 0 && res->end == res->start) ? 0 :
91 (res->end - res->start + 1);
92
93 if (!base || !size)
94 return 0;
95
96#ifdef CONFIG_HPET_TIMER
97 /* for hpet */
98 if (base == hpet_address && (res->flags & IORESOURCE_MEM)) {
99 dev_info(&dev->dev, "BAR has HPET at %08lx-%08lx\n",
100 base, base + size - 1);
101 return 1;
102 }
103#endif
104
105#ifdef CONFIG_X86_IO_APIC
106 for (i = 0; i < nr_ioapics; i++) {
107 unsigned long ioapic_phys = mp_ioapics[i].mp_apicaddr;
108
109 if (base == ioapic_phys && (res->flags & IORESOURCE_MEM)) {
110 dev_info(&dev->dev, "BAR has ioapic at %08lx-%08lx\n",
111 base, base + size - 1);
112 return 1;
113 }
114 }
115#endif
116
117#ifdef CONFIG_PCI_MMCONFIG
118 for (i = 0; i < pci_mmcfg_config_num; i++) {
119 unsigned long addr;
120
121 addr = pci_mmcfg_config[i].address;
122 if (base == addr && (res->flags & IORESOURCE_MEM)) {
123 dev_info(&dev->dev, "BAR has MMCONFIG at %08lx-%08lx\n",
124 base, base + size - 1);
125 return 1;
126 }
127 }
128#endif
129
130 return 0;
131}
132
133static int check_platform(struct pci_dev *dev, struct resource *res)
134{
135 struct resource *root = NULL;
136
137 /*
138 * forcibly insert it into the
139 * resource tree
140 */
141 if (res->flags & IORESOURCE_MEM)
142 root = &iomem_resource;
143 else if (res->flags & IORESOURCE_IO)
144 root = &ioport_resource;
145
146 if (root && check_res_with_valid(dev, res)) {
147 insert_resource(root, res);
148
149 return 1;
150 }
151
152 return 0;
153}
154/* 80/*
155 * Handle resources of PCI devices. If the world were perfect, we could 81 * Handle resources of PCI devices. If the world were perfect, we could
156 * just allocate all the resource regions and do nothing more. It isn't. 82 * just allocate all the resource regions and do nothing more. It isn't.
@@ -202,10 +128,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
202 pr = pci_find_parent_resource(dev, r); 128 pr = pci_find_parent_resource(dev, r);
203 if (!r->start || !pr || 129 if (!r->start || !pr ||
204 request_resource(pr, r) < 0) { 130 request_resource(pr, r) < 0) {
205 if (check_platform(dev, r)) 131 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
206 continue;
207 dev_err(&dev->dev, "BAR %d: can't "
208 "allocate resource\n", idx);
209 /* 132 /*
210 * Something is wrong with the region. 133 * Something is wrong with the region.
211 * Invalidate the resource to prevent 134 * Invalidate the resource to prevent
@@ -240,17 +163,13 @@ static void __init pcibios_allocate_resources(int pass)
240 else 163 else
241 disabled = !(command & PCI_COMMAND_MEMORY); 164 disabled = !(command & PCI_COMMAND_MEMORY);
242 if (pass == disabled) { 165 if (pass == disabled) {
243 dev_dbg(&dev->dev, "resource %#08llx-%#08llx " 166 dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
244 "(f=%lx, d=%d, p=%d)\n",
245 (unsigned long long) r->start, 167 (unsigned long long) r->start,
246 (unsigned long long) r->end, 168 (unsigned long long) r->end,
247 r->flags, disabled, pass); 169 r->flags, disabled, pass);
248 pr = pci_find_parent_resource(dev, r); 170 pr = pci_find_parent_resource(dev, r);
249 if (!pr || request_resource(pr, r) < 0) { 171 if (!pr || request_resource(pr, r) < 0) {
250 if (check_platform(dev, r)) 172 dev_err(&dev->dev, "BAR %d: can't allocate resource\n", idx);
251 continue;
252 dev_err(&dev->dev, "BAR %d: can't "
253 "allocate resource\n", idx);
254 /* We'll assign a new address later */ 173 /* We'll assign a new address later */
255 r->end -= r->start; 174 r->end -= r->start;
256 r->start = 0; 175 r->start = 0;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 9ff6e3cbf08f..a4e201b47f64 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1324,7 +1324,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 1324 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1325 1325
1326 .pte_val = xen_pte_val, 1326 .pte_val = xen_pte_val,
1327 .pte_flags = native_pte_val, 1327 .pte_flags = native_pte_flags,
1328 .pgd_val = xen_pgd_val, 1328 .pgd_val = xen_pgd_val,
1329 1329
1330 .make_pte = xen_make_pte, 1330 .make_pte = xen_make_pte,
diff --git a/block/cmd-filter.c b/block/cmd-filter.c
index 1d4026206ac2..228b6447e89f 100644
--- a/block/cmd-filter.c
+++ b/block/cmd-filter.c
@@ -223,6 +223,7 @@ int blk_register_filter(struct gendisk *disk)
223 223
224 return 0; 224 return 0;
225} 225}
226EXPORT_SYMBOL(blk_register_filter);
226 227
227void blk_unregister_filter(struct gendisk *disk) 228void blk_unregister_filter(struct gendisk *disk)
228{ 229{
@@ -231,4 +232,4 @@ void blk_unregister_filter(struct gendisk *disk)
231 kobject_put(&filter->kobj); 232 kobject_put(&filter->kobj);
232 kobject_put(disk->holder_dir->parent); 233 kobject_put(disk->holder_dir->parent);
233} 234}
234 235EXPORT_SYMBOL(blk_unregister_filter);
diff --git a/block/genhd.c b/block/genhd.c
index 656c2c7abf99..e0ce23ac2ece 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -190,7 +190,6 @@ void add_disk(struct gendisk *disk)
190 disk->minors, NULL, exact_match, exact_lock, disk); 190 disk->minors, NULL, exact_match, exact_lock, disk);
191 register_disk(disk); 191 register_disk(disk);
192 blk_register_queue(disk); 192 blk_register_queue(disk);
193 blk_register_filter(disk);
194 193
195 bdi = &disk->queue->backing_dev_info; 194 bdi = &disk->queue->backing_dev_info;
196 bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor)); 195 bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
@@ -203,7 +202,6 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
203 202
204void unlink_gendisk(struct gendisk *disk) 203void unlink_gendisk(struct gendisk *disk)
205{ 204{
206 blk_unregister_filter(disk);
207 sysfs_remove_link(&disk->dev.kobj, "bdi"); 205 sysfs_remove_link(&disk->dev.kobj, "bdi");
208 bdi_unregister(&disk->queue->backing_dev_info); 206 bdi_unregister(&disk->queue->backing_dev_info);
209 blk_unregister_queue(disk); 207 blk_unregister_queue(disk);
@@ -309,7 +307,7 @@ static void *part_start(struct seq_file *part, loff_t *pos)
309 loff_t k = *pos; 307 loff_t k = *pos;
310 308
311 if (!k) 309 if (!k)
312 seq_puts(part, "major minor #blocks name\n\n"); 310 part->private = (void *)1LU; /* tell show to print header */
313 311
314 mutex_lock(&block_class_lock); 312 mutex_lock(&block_class_lock);
315 dev = class_find_device(&block_class, NULL, &k, find_start); 313 dev = class_find_device(&block_class, NULL, &k, find_start);
@@ -351,6 +349,17 @@ static int show_partition(struct seq_file *part, void *v)
351 int n; 349 int n;
352 char buf[BDEVNAME_SIZE]; 350 char buf[BDEVNAME_SIZE];
353 351
352 /*
353 * Print header if start told us to do. This is to preserve
354 * the original behavior of not printing header if no
355 * partition exists. This hackery will be removed later with
356 * class iteration clean up.
357 */
358 if (part->private) {
359 seq_puts(part, "major minor #blocks name\n\n");
360 part->private = NULL;
361 }
362
354 /* Don't show non-partitionable removeable devices or empty devices */ 363 /* Don't show non-partitionable removeable devices or empty devices */
355 if (!get_capacity(sgp) || 364 if (!get_capacity(sgp) ||
356 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE))) 365 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index 85eaf7b1c531..e8362c1efa30 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -137,7 +137,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
137 spin_lock_bh(&next->lock); 137 spin_lock_bh(&next->lock);
138 next->parent = NULL; 138 next->parent = NULL;
139 _next = next->next; 139 _next = next->next;
140 next->next = NULL; 140 if (_next && _next->chan == chan)
141 next->next = NULL;
141 spin_unlock_bh(&next->lock); 142 spin_unlock_bh(&next->lock);
142 143
143 next->tx_submit(next); 144 next->tx_submit(next);
diff --git a/crypto/camellia.c b/crypto/camellia.c
index b1cc4de6493c..493fee7e0a8b 100644
--- a/crypto/camellia.c
+++ b/crypto/camellia.c
@@ -35,8 +35,6 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/bitops.h>
39#include <asm/unaligned.h>
40 38
41static const u32 camellia_sp1110[256] = { 39static const u32 camellia_sp1110[256] = {
42 0x70707000,0x82828200,0x2c2c2c00,0xececec00, 40 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
@@ -337,6 +335,20 @@ static const u32 camellia_sp4404[256] = {
337/* 335/*
338 * macros 336 * macros
339 */ 337 */
338#define GETU32(v, pt) \
339 do { \
340 /* latest breed of gcc is clever enough to use move */ \
341 memcpy(&(v), (pt), 4); \
342 (v) = be32_to_cpu(v); \
343 } while(0)
344
345/* rotation right shift 1byte */
346#define ROR8(x) (((x) >> 8) + ((x) << 24))
347/* rotation left shift 1bit */
348#define ROL1(x) (((x) << 1) + ((x) >> 31))
349/* rotation left shift 1byte */
350#define ROL8(x) (((x) << 8) + ((x) >> 24))
351
340#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ 352#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
341 do { \ 353 do { \
342 w0 = ll; \ 354 w0 = ll; \
@@ -371,7 +383,7 @@ static const u32 camellia_sp4404[256] = {
371 ^ camellia_sp3033[(u8)(il >> 8)] \ 383 ^ camellia_sp3033[(u8)(il >> 8)] \
372 ^ camellia_sp4404[(u8)(il )]; \ 384 ^ camellia_sp4404[(u8)(il )]; \
373 yl ^= yr; \ 385 yl ^= yr; \
374 yr = ror32(yr, 8); \ 386 yr = ROR8(yr); \
375 yr ^= yl; \ 387 yr ^= yl; \
376 } while(0) 388 } while(0)
377 389
@@ -393,7 +405,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
393 subL[7] ^= subL[1]; subR[7] ^= subR[1]; 405 subL[7] ^= subL[1]; subR[7] ^= subR[1];
394 subL[1] ^= subR[1] & ~subR[9]; 406 subL[1] ^= subR[1] & ~subR[9];
395 dw = subL[1] & subL[9], 407 dw = subL[1] & subL[9],
396 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ 408 subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
397 /* round 8 */ 409 /* round 8 */
398 subL[11] ^= subL[1]; subR[11] ^= subR[1]; 410 subL[11] ^= subL[1]; subR[11] ^= subR[1];
399 /* round 10 */ 411 /* round 10 */
@@ -402,7 +414,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
402 subL[15] ^= subL[1]; subR[15] ^= subR[1]; 414 subL[15] ^= subL[1]; subR[15] ^= subR[1];
403 subL[1] ^= subR[1] & ~subR[17]; 415 subL[1] ^= subR[1] & ~subR[17];
404 dw = subL[1] & subL[17], 416 dw = subL[1] & subL[17],
405 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ 417 subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
406 /* round 14 */ 418 /* round 14 */
407 subL[19] ^= subL[1]; subR[19] ^= subR[1]; 419 subL[19] ^= subL[1]; subR[19] ^= subR[1];
408 /* round 16 */ 420 /* round 16 */
@@ -418,7 +430,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
418 } else { 430 } else {
419 subL[1] ^= subR[1] & ~subR[25]; 431 subL[1] ^= subR[1] & ~subR[25];
420 dw = subL[1] & subL[25], 432 dw = subL[1] & subL[25],
421 subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ 433 subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
422 /* round 20 */ 434 /* round 20 */
423 subL[27] ^= subL[1]; subR[27] ^= subR[1]; 435 subL[27] ^= subL[1]; subR[27] ^= subR[1];
424 /* round 22 */ 436 /* round 22 */
@@ -438,7 +450,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
438 subL[26] ^= kw4l; subR[26] ^= kw4r; 450 subL[26] ^= kw4l; subR[26] ^= kw4r;
439 kw4l ^= kw4r & ~subR[24]; 451 kw4l ^= kw4r & ~subR[24];
440 dw = kw4l & subL[24], 452 dw = kw4l & subL[24],
441 kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ 453 kw4r ^= ROL1(dw); /* modified for FL(kl5) */
442 } 454 }
443 /* round 17 */ 455 /* round 17 */
444 subL[22] ^= kw4l; subR[22] ^= kw4r; 456 subL[22] ^= kw4l; subR[22] ^= kw4r;
@@ -448,7 +460,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
448 subL[18] ^= kw4l; subR[18] ^= kw4r; 460 subL[18] ^= kw4l; subR[18] ^= kw4r;
449 kw4l ^= kw4r & ~subR[16]; 461 kw4l ^= kw4r & ~subR[16];
450 dw = kw4l & subL[16], 462 dw = kw4l & subL[16],
451 kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ 463 kw4r ^= ROL1(dw); /* modified for FL(kl3) */
452 /* round 11 */ 464 /* round 11 */
453 subL[14] ^= kw4l; subR[14] ^= kw4r; 465 subL[14] ^= kw4l; subR[14] ^= kw4r;
454 /* round 9 */ 466 /* round 9 */
@@ -457,7 +469,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
457 subL[10] ^= kw4l; subR[10] ^= kw4r; 469 subL[10] ^= kw4l; subR[10] ^= kw4r;
458 kw4l ^= kw4r & ~subR[8]; 470 kw4l ^= kw4r & ~subR[8];
459 dw = kw4l & subL[8], 471 dw = kw4l & subL[8],
460 kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ 472 kw4r ^= ROL1(dw); /* modified for FL(kl1) */
461 /* round 5 */ 473 /* round 5 */
462 subL[6] ^= kw4l; subR[6] ^= kw4r; 474 subL[6] ^= kw4l; subR[6] ^= kw4r;
463 /* round 3 */ 475 /* round 3 */
@@ -482,7 +494,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
482 SUBKEY_R(6) = subR[5] ^ subR[7]; 494 SUBKEY_R(6) = subR[5] ^ subR[7];
483 tl = subL[10] ^ (subR[10] & ~subR[8]); 495 tl = subL[10] ^ (subR[10] & ~subR[8]);
484 dw = tl & subL[8], /* FL(kl1) */ 496 dw = tl & subL[8], /* FL(kl1) */
485 tr = subR[10] ^ rol32(dw, 1); 497 tr = subR[10] ^ ROL1(dw);
486 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ 498 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
487 SUBKEY_R(7) = subR[6] ^ tr; 499 SUBKEY_R(7) = subR[6] ^ tr;
488 SUBKEY_L(8) = subL[8]; /* FL(kl1) */ 500 SUBKEY_L(8) = subL[8]; /* FL(kl1) */
@@ -491,7 +503,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
491 SUBKEY_R(9) = subR[9]; 503 SUBKEY_R(9) = subR[9];
492 tl = subL[7] ^ (subR[7] & ~subR[9]); 504 tl = subL[7] ^ (subR[7] & ~subR[9]);
493 dw = tl & subL[9], /* FLinv(kl2) */ 505 dw = tl & subL[9], /* FLinv(kl2) */
494 tr = subR[7] ^ rol32(dw, 1); 506 tr = subR[7] ^ ROL1(dw);
495 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ 507 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
496 SUBKEY_R(10) = tr ^ subR[11]; 508 SUBKEY_R(10) = tr ^ subR[11];
497 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ 509 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
@@ -504,7 +516,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
504 SUBKEY_R(14) = subR[13] ^ subR[15]; 516 SUBKEY_R(14) = subR[13] ^ subR[15];
505 tl = subL[18] ^ (subR[18] & ~subR[16]); 517 tl = subL[18] ^ (subR[18] & ~subR[16]);
506 dw = tl & subL[16], /* FL(kl3) */ 518 dw = tl & subL[16], /* FL(kl3) */
507 tr = subR[18] ^ rol32(dw, 1); 519 tr = subR[18] ^ ROL1(dw);
508 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ 520 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
509 SUBKEY_R(15) = subR[14] ^ tr; 521 SUBKEY_R(15) = subR[14] ^ tr;
510 SUBKEY_L(16) = subL[16]; /* FL(kl3) */ 522 SUBKEY_L(16) = subL[16]; /* FL(kl3) */
@@ -513,7 +525,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
513 SUBKEY_R(17) = subR[17]; 525 SUBKEY_R(17) = subR[17];
514 tl = subL[15] ^ (subR[15] & ~subR[17]); 526 tl = subL[15] ^ (subR[15] & ~subR[17]);
515 dw = tl & subL[17], /* FLinv(kl4) */ 527 dw = tl & subL[17], /* FLinv(kl4) */
516 tr = subR[15] ^ rol32(dw, 1); 528 tr = subR[15] ^ ROL1(dw);
517 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ 529 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
518 SUBKEY_R(18) = tr ^ subR[19]; 530 SUBKEY_R(18) = tr ^ subR[19];
519 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ 531 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
@@ -532,7 +544,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
532 } else { 544 } else {
533 tl = subL[26] ^ (subR[26] & ~subR[24]); 545 tl = subL[26] ^ (subR[26] & ~subR[24]);
534 dw = tl & subL[24], /* FL(kl5) */ 546 dw = tl & subL[24], /* FL(kl5) */
535 tr = subR[26] ^ rol32(dw, 1); 547 tr = subR[26] ^ ROL1(dw);
536 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ 548 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
537 SUBKEY_R(23) = subR[22] ^ tr; 549 SUBKEY_R(23) = subR[22] ^ tr;
538 SUBKEY_L(24) = subL[24]; /* FL(kl5) */ 550 SUBKEY_L(24) = subL[24]; /* FL(kl5) */
@@ -541,7 +553,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
541 SUBKEY_R(25) = subR[25]; 553 SUBKEY_R(25) = subR[25];
542 tl = subL[23] ^ (subR[23] & ~subR[25]); 554 tl = subL[23] ^ (subR[23] & ~subR[25]);
543 dw = tl & subL[25], /* FLinv(kl6) */ 555 dw = tl & subL[25], /* FLinv(kl6) */
544 tr = subR[23] ^ rol32(dw, 1); 556 tr = subR[23] ^ ROL1(dw);
545 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ 557 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
546 SUBKEY_R(26) = tr ^ subR[27]; 558 SUBKEY_R(26) = tr ^ subR[27];
547 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ 559 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
@@ -561,17 +573,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
561 /* apply the inverse of the last half of P-function */ 573 /* apply the inverse of the last half of P-function */
562 i = 2; 574 i = 2;
563 do { 575 do {
564 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */ 576 dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
565 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; 577 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
566 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */ 578 dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
567 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; 579 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
568 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */ 580 dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
569 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; 581 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
570 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */ 582 dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
571 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; 583 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
572 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 9);/* round 5 */ 584 dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
573 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; 585 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
574 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */ 586 dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
575 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; 587 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
576 i += 8; 588 i += 8;
577 } while (i < max); 589 } while (i < max);
@@ -587,10 +599,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey)
587 /** 599 /**
588 * k == kll || klr || krl || krr (|| is concatenation) 600 * k == kll || klr || krl || krr (|| is concatenation)
589 */ 601 */
590 kll = get_unaligned_be32(key); 602 GETU32(kll, key );
591 klr = get_unaligned_be32(key + 4); 603 GETU32(klr, key + 4);
592 krl = get_unaligned_be32(key + 8); 604 GETU32(krl, key + 8);
593 krr = get_unaligned_be32(key + 12); 605 GETU32(krr, key + 12);
594 606
595 /* generate KL dependent subkeys */ 607 /* generate KL dependent subkeys */
596 /* kw1 */ 608 /* kw1 */
@@ -695,14 +707,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey)
695 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) 707 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
696 * (|| is concatenation) 708 * (|| is concatenation)
697 */ 709 */
698 kll = get_unaligned_be32(key); 710 GETU32(kll, key );
699 klr = get_unaligned_be32(key + 4); 711 GETU32(klr, key + 4);
700 krl = get_unaligned_be32(key + 8); 712 GETU32(krl, key + 8);
701 krr = get_unaligned_be32(key + 12); 713 GETU32(krr, key + 12);
702 krll = get_unaligned_be32(key + 16); 714 GETU32(krll, key + 16);
703 krlr = get_unaligned_be32(key + 20); 715 GETU32(krlr, key + 20);
704 krrl = get_unaligned_be32(key + 24); 716 GETU32(krrl, key + 24);
705 krrr = get_unaligned_be32(key + 28); 717 GETU32(krrr, key + 28);
706 718
707 /* generate KL dependent subkeys */ 719 /* generate KL dependent subkeys */
708 /* kw1 */ 720 /* kw1 */
@@ -858,13 +870,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
858 t0 &= ll; \ 870 t0 &= ll; \
859 t2 |= rr; \ 871 t2 |= rr; \
860 rl ^= t2; \ 872 rl ^= t2; \
861 lr ^= rol32(t0, 1); \ 873 lr ^= ROL1(t0); \
862 t3 = krl; \ 874 t3 = krl; \
863 t1 = klr; \ 875 t1 = klr; \
864 t3 &= rl; \ 876 t3 &= rl; \
865 t1 |= lr; \ 877 t1 |= lr; \
866 ll ^= t1; \ 878 ll ^= t1; \
867 rr ^= rol32(t3, 1); \ 879 rr ^= ROL1(t3); \
868 } while(0) 880 } while(0)
869 881
870#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ 882#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
@@ -880,7 +892,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey)
880 il ^= kl; \ 892 il ^= kl; \
881 ir ^= il ^ kr; \ 893 ir ^= il ^ kr; \
882 yl ^= ir; \ 894 yl ^= ir; \
883 yr ^= ror32(il, 8) ^ ir; \ 895 yr ^= ROR8(il) ^ ir; \
884 } while(0) 896 } while(0)
885 897
886/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ 898/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 735f5ea17473..12cf5d491f0d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -260,6 +260,9 @@ config ACPI_ASUS
260config ACPI_TOSHIBA 260config ACPI_TOSHIBA
261 tristate "Toshiba Laptop Extras" 261 tristate "Toshiba Laptop Extras"
262 depends on X86 262 depends on X86
263 select INPUT_POLLDEV
264 select NET
265 select RFKILL
263 select BACKLIGHT_CLASS_DEVICE 266 select BACKLIGHT_CLASS_DEVICE
264 ---help--- 267 ---help---
265 This driver adds support for access to certain system settings 268 This driver adds support for access to certain system settings
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 1022e38994c2..0f2805899210 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -496,7 +496,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
496 arg = arg->common.next; 496 arg = arg->common.next;
497 } 497 }
498 498
499 ACPI_ERROR((AE_INFO, 499 ACPI_WARNING((AE_INFO,
500 "Package List length (%X) larger than NumElements count (%X), truncated\n", 500 "Package List length (%X) larger than NumElements count (%X), truncated\n",
501 i, element_count)); 501 i, element_count));
502 } else if (i < element_count) { 502 } else if (i < element_count) {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index d3f0a62efcc1..ee68ac54c0d4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -138,7 +138,7 @@ static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
138 { 138 {
139 set_no_mwait, "Extensa 5220", { 139 set_no_mwait, "Extensa 5220", {
140 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 140 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141 DMI_MATCH(DMI_SYS_VENDOR, "ACER"), 141 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
142 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), 142 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, 143 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144 {}, 144 {},
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 80e32093e977..80c251ec6d2a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -71,7 +71,7 @@ static DEFINE_MUTEX(performance_mutex);
71 * 1 -> ignore _PPC totally -> forced by user through boot param 71 * 1 -> ignore _PPC totally -> forced by user through boot param
72 */ 72 */
73static int ignore_ppc = -1; 73static int ignore_ppc = -1;
74module_param(ignore_ppc, uint, 0644); 74module_param(ignore_ppc, int, 0644);
75MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 75MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
76 "limited by BIOS, this should help"); 76 "limited by BIOS, this should help");
77 77
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bcf2c70fca87..a4e3767b8c64 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -107,6 +107,13 @@ static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
107 if (wait_event_timeout(hc->wait, smb_check_done(hc), 107 if (wait_event_timeout(hc->wait, smb_check_done(hc),
108 msecs_to_jiffies(timeout))) 108 msecs_to_jiffies(timeout)))
109 return 0; 109 return 0;
110 /*
111 * After the timeout happens, OS will try to check the status of SMbus.
112 * If the status is what OS expected, it will be regarded as the bogus
113 * timeout.
114 */
115 if (smb_check_done(hc))
116 return 0;
110 else 117 else
111 return -ETIME; 118 return -ETIME;
112} 119}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index c3419182c9a7..775c97a282bd 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -300,6 +300,8 @@ int __init acpi_table_init(void)
300 300
301static int __init acpi_parse_apic_instance(char *str) 301static int __init acpi_parse_apic_instance(char *str)
302{ 302{
303 if (!str)
304 return -EINVAL;
303 305
304 acpi_apic_instance = simple_strtoul(str, NULL, 0); 306 acpi_apic_instance = simple_strtoul(str, NULL, 0);
305 307
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 0a43c8e0eff3..8a649f40d162 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * 4 *
5 * Copyright (C) 2002-2004 John Belmonte 5 * Copyright (C) 2002-2004 John Belmonte
6 * Copyright (C) 2008 Philip Langdale
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -33,7 +34,7 @@
33 * 34 *
34 */ 35 */
35 36
36#define TOSHIBA_ACPI_VERSION "0.18" 37#define TOSHIBA_ACPI_VERSION "0.19"
37#define PROC_INTERFACE_VERSION 1 38#define PROC_INTERFACE_VERSION 1
38 39
39#include <linux/kernel.h> 40#include <linux/kernel.h>
@@ -42,6 +43,9 @@
42#include <linux/types.h> 43#include <linux/types.h>
43#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
44#include <linux/backlight.h> 45#include <linux/backlight.h>
46#include <linux/platform_device.h>
47#include <linux/rfkill.h>
48#include <linux/input-polldev.h>
45 49
46#include <asm/uaccess.h> 50#include <asm/uaccess.h>
47 51
@@ -90,6 +94,7 @@ MODULE_LICENSE("GPL");
90#define HCI_VIDEO_OUT 0x001c 94#define HCI_VIDEO_OUT 0x001c
91#define HCI_HOTKEY_EVENT 0x001e 95#define HCI_HOTKEY_EVENT 0x001e
92#define HCI_LCD_BRIGHTNESS 0x002a 96#define HCI_LCD_BRIGHTNESS 0x002a
97#define HCI_WIRELESS 0x0056
93 98
94/* field definitions */ 99/* field definitions */
95#define HCI_LCD_BRIGHTNESS_BITS 3 100#define HCI_LCD_BRIGHTNESS_BITS 3
@@ -98,9 +103,14 @@ MODULE_LICENSE("GPL");
98#define HCI_VIDEO_OUT_LCD 0x1 103#define HCI_VIDEO_OUT_LCD 0x1
99#define HCI_VIDEO_OUT_CRT 0x2 104#define HCI_VIDEO_OUT_CRT 0x2
100#define HCI_VIDEO_OUT_TV 0x4 105#define HCI_VIDEO_OUT_TV 0x4
106#define HCI_WIRELESS_KILL_SWITCH 0x01
107#define HCI_WIRELESS_BT_PRESENT 0x0f
108#define HCI_WIRELESS_BT_ATTACH 0x40
109#define HCI_WIRELESS_BT_POWER 0x80
101 110
102static const struct acpi_device_id toshiba_device_ids[] = { 111static const struct acpi_device_id toshiba_device_ids[] = {
103 {"TOS6200", 0}, 112 {"TOS6200", 0},
113 {"TOS6208", 0},
104 {"TOS1900", 0}, 114 {"TOS1900", 0},
105 {"", 0}, 115 {"", 0},
106}; 116};
@@ -193,7 +203,7 @@ static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
193 return status; 203 return status;
194} 204}
195 205
196/* common hci tasks (get or set one value) 206/* common hci tasks (get or set one or two value)
197 * 207 *
198 * In addition to the ACPI status, the HCI system returns a result which 208 * In addition to the ACPI status, the HCI system returns a result which
199 * may be useful (such as "not supported"). 209 * may be useful (such as "not supported").
@@ -218,6 +228,152 @@ static acpi_status hci_read1(u32 reg, u32 * out1, u32 * result)
218 return status; 228 return status;
219} 229}
220 230
231static acpi_status hci_write2(u32 reg, u32 in1, u32 in2, u32 *result)
232{
233 u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
234 u32 out[HCI_WORDS];
235 acpi_status status = hci_raw(in, out);
236 *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
237 return status;
238}
239
240static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
241{
242 u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
243 u32 out[HCI_WORDS];
244 acpi_status status = hci_raw(in, out);
245 *out1 = out[2];
246 *out2 = out[3];
247 *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
248 return status;
249}
250
251struct toshiba_acpi_dev {
252 struct platform_device *p_dev;
253 struct rfkill *rfk_dev;
254 struct input_polled_dev *poll_dev;
255
256 const char *bt_name;
257 const char *rfk_name;
258
259 bool last_rfk_state;
260
261 struct mutex mutex;
262};
263
264static struct toshiba_acpi_dev toshiba_acpi = {
265 .bt_name = "Toshiba Bluetooth",
266 .rfk_name = "Toshiba RFKill Switch",
267 .last_rfk_state = false,
268};
269
270/* Bluetooth rfkill handlers */
271
272static u32 hci_get_bt_present(bool *present)
273{
274 u32 hci_result;
275 u32 value, value2;
276
277 value = 0;
278 value2 = 0;
279 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
280 if (hci_result == HCI_SUCCESS)
281 *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
282
283 return hci_result;
284}
285
286static u32 hci_get_bt_on(bool *on)
287{
288 u32 hci_result;
289 u32 value, value2;
290
291 value = 0;
292 value2 = 0x0001;
293 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
294 if (hci_result == HCI_SUCCESS)
295 *on = (value & HCI_WIRELESS_BT_POWER) &&
296 (value & HCI_WIRELESS_BT_ATTACH);
297
298 return hci_result;
299}
300
301static u32 hci_get_radio_state(bool *radio_state)
302{
303 u32 hci_result;
304 u32 value, value2;
305
306 value = 0;
307 value2 = 0x0001;
308 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
309
310 *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
311 return hci_result;
312}
313
314static int bt_rfkill_toggle_radio(void *data, enum rfkill_state state)
315{
316 u32 result1, result2;
317 u32 value;
318 bool radio_state;
319 struct toshiba_acpi_dev *dev = data;
320
321 value = (state == RFKILL_STATE_UNBLOCKED);
322
323 if (hci_get_radio_state(&radio_state) != HCI_SUCCESS)
324 return -EFAULT;
325
326 switch (state) {
327 case RFKILL_STATE_UNBLOCKED:
328 if (!radio_state)
329 return -EPERM;
330 break;
331 case RFKILL_STATE_SOFT_BLOCKED:
332 break;
333 default:
334 return -EINVAL;
335 }
336
337 mutex_lock(&dev->mutex);
338 hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
339 hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
340 mutex_unlock(&dev->mutex);
341
342 if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
343 return -EFAULT;
344
345 return 0;
346}
347
348static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
349{
350 bool state_changed;
351 bool new_rfk_state;
352 bool value;
353 u32 hci_result;
354 struct toshiba_acpi_dev *dev = poll_dev->private;
355
356 hci_result = hci_get_radio_state(&value);
357 if (hci_result != HCI_SUCCESS)
358 return; /* Can't do anything useful */
359
360 new_rfk_state = value;
361
362 mutex_lock(&dev->mutex);
363 state_changed = new_rfk_state != dev->last_rfk_state;
364 dev->last_rfk_state = new_rfk_state;
365 mutex_unlock(&dev->mutex);
366
367 if (unlikely(state_changed)) {
368 rfkill_force_state(dev->rfk_dev,
369 new_rfk_state ?
370 RFKILL_STATE_SOFT_BLOCKED :
371 RFKILL_STATE_HARD_BLOCKED);
372 input_report_switch(poll_dev->input, SW_RFKILL_ALL,
373 new_rfk_state);
374 }
375}
376
221static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 377static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
222static struct backlight_device *toshiba_backlight_device; 378static struct backlight_device *toshiba_backlight_device;
223static int force_fan; 379static int force_fan;
@@ -547,6 +703,14 @@ static struct backlight_ops toshiba_backlight_data = {
547 703
548static void toshiba_acpi_exit(void) 704static void toshiba_acpi_exit(void)
549{ 705{
706 if (toshiba_acpi.poll_dev) {
707 input_unregister_polled_device(toshiba_acpi.poll_dev);
708 input_free_polled_device(toshiba_acpi.poll_dev);
709 }
710
711 if (toshiba_acpi.rfk_dev)
712 rfkill_unregister(toshiba_acpi.rfk_dev);
713
550 if (toshiba_backlight_device) 714 if (toshiba_backlight_device)
551 backlight_device_unregister(toshiba_backlight_device); 715 backlight_device_unregister(toshiba_backlight_device);
552 716
@@ -555,6 +719,8 @@ static void toshiba_acpi_exit(void)
555 if (toshiba_proc_dir) 719 if (toshiba_proc_dir)
556 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); 720 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
557 721
722 platform_device_unregister(toshiba_acpi.p_dev);
723
558 return; 724 return;
559} 725}
560 726
@@ -562,6 +728,10 @@ static int __init toshiba_acpi_init(void)
562{ 728{
563 acpi_status status = AE_OK; 729 acpi_status status = AE_OK;
564 u32 hci_result; 730 u32 hci_result;
731 bool bt_present;
732 bool bt_on;
733 bool radio_on;
734 int ret = 0;
565 735
566 if (acpi_disabled) 736 if (acpi_disabled)
567 return -ENODEV; 737 return -ENODEV;
@@ -578,6 +748,18 @@ static int __init toshiba_acpi_init(void)
578 TOSHIBA_ACPI_VERSION); 748 TOSHIBA_ACPI_VERSION);
579 printk(MY_INFO " HCI method: %s\n", method_hci); 749 printk(MY_INFO " HCI method: %s\n", method_hci);
580 750
751 mutex_init(&toshiba_acpi.mutex);
752
753 toshiba_acpi.p_dev = platform_device_register_simple("toshiba_acpi",
754 -1, NULL, 0);
755 if (IS_ERR(toshiba_acpi.p_dev)) {
756 ret = PTR_ERR(toshiba_acpi.p_dev);
757 printk(MY_ERR "unable to register platform device\n");
758 toshiba_acpi.p_dev = NULL;
759 toshiba_acpi_exit();
760 return ret;
761 }
762
581 force_fan = 0; 763 force_fan = 0;
582 key_event_valid = 0; 764 key_event_valid = 0;
583 765
@@ -586,19 +768,23 @@ static int __init toshiba_acpi_init(void)
586 768
587 toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir); 769 toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
588 if (!toshiba_proc_dir) { 770 if (!toshiba_proc_dir) {
589 status = AE_ERROR; 771 toshiba_acpi_exit();
772 return -ENODEV;
590 } else { 773 } else {
591 toshiba_proc_dir->owner = THIS_MODULE; 774 toshiba_proc_dir->owner = THIS_MODULE;
592 status = add_device(); 775 status = add_device();
593 if (ACPI_FAILURE(status)) 776 if (ACPI_FAILURE(status)) {
594 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); 777 toshiba_acpi_exit();
778 return -ENODEV;
779 }
595 } 780 }
596 781
597 toshiba_backlight_device = backlight_device_register("toshiba",NULL, 782 toshiba_backlight_device = backlight_device_register("toshiba",
783 &toshiba_acpi.p_dev->dev,
598 NULL, 784 NULL,
599 &toshiba_backlight_data); 785 &toshiba_backlight_data);
600 if (IS_ERR(toshiba_backlight_device)) { 786 if (IS_ERR(toshiba_backlight_device)) {
601 int ret = PTR_ERR(toshiba_backlight_device); 787 ret = PTR_ERR(toshiba_backlight_device);
602 788
603 printk(KERN_ERR "Could not register toshiba backlight device\n"); 789 printk(KERN_ERR "Could not register toshiba backlight device\n");
604 toshiba_backlight_device = NULL; 790 toshiba_backlight_device = NULL;
@@ -607,7 +793,66 @@ static int __init toshiba_acpi_init(void)
607 } 793 }
608 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 794 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
609 795
610 return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; 796 /* Register rfkill switch for Bluetooth */
797 if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
798 toshiba_acpi.rfk_dev = rfkill_allocate(&toshiba_acpi.p_dev->dev,
799 RFKILL_TYPE_BLUETOOTH);
800 if (!toshiba_acpi.rfk_dev) {
801 printk(MY_ERR "unable to allocate rfkill device\n");
802 toshiba_acpi_exit();
803 return -ENOMEM;
804 }
805
806 toshiba_acpi.rfk_dev->name = toshiba_acpi.bt_name;
807 toshiba_acpi.rfk_dev->toggle_radio = bt_rfkill_toggle_radio;
808 toshiba_acpi.rfk_dev->user_claim_unsupported = 1;
809 toshiba_acpi.rfk_dev->data = &toshiba_acpi;
810
811 if (hci_get_bt_on(&bt_on) == HCI_SUCCESS && bt_on) {
812 toshiba_acpi.rfk_dev->state = RFKILL_STATE_UNBLOCKED;
813 } else if (hci_get_radio_state(&radio_on) == HCI_SUCCESS &&
814 radio_on) {
815 toshiba_acpi.rfk_dev->state = RFKILL_STATE_SOFT_BLOCKED;
816 } else {
817 toshiba_acpi.rfk_dev->state = RFKILL_STATE_HARD_BLOCKED;
818 }
819
820 ret = rfkill_register(toshiba_acpi.rfk_dev);
821 if (ret) {
822 printk(MY_ERR "unable to register rfkill device\n");
823 toshiba_acpi_exit();
824 return -ENOMEM;
825 }
826 }
827
828 /* Register input device for kill switch */
829 toshiba_acpi.poll_dev = input_allocate_polled_device();
830 if (!toshiba_acpi.poll_dev) {
831 printk(MY_ERR "unable to allocate kill-switch input device\n");
832 toshiba_acpi_exit();
833 return -ENOMEM;
834 }
835 toshiba_acpi.poll_dev->private = &toshiba_acpi;
836 toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
837 toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
838
839 toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
840 toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
841 toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
842 set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
843 set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
844 input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
845
846 ret = input_register_polled_device(toshiba_acpi.poll_dev);
847 if (ret) {
848 printk(MY_ERR "unable to register kill-switch input device\n");
849 rfkill_free(toshiba_acpi.rfk_dev);
850 toshiba_acpi.rfk_dev = NULL;
851 toshiba_acpi_exit();
852 return ret;
853 }
854
855 return 0;
611} 856}
612 857
613module_init(toshiba_acpi_init); 858module_init(toshiba_acpi_init);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ae8494944c45..11c8c19f0fb7 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -448,8 +448,10 @@ config PATA_MARVELL
448 tristate "Marvell PATA support via legacy mode" 448 tristate "Marvell PATA support via legacy mode"
449 depends on PCI 449 depends on PCI
450 help 450 help
451 This option enables limited support for the Marvell 88SE6145 ATA 451 This option enables limited support for the Marvell 88SE61xx ATA
452 controller. 452 controllers. If you wish to use only the SATA ports then select
453 the AHCI driver alone. If you wish to the use the PATA port or
454 both SATA and PATA include this driver.
453 455
454 If unsure, say N. 456 If unsure, say N.
455 457
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c729e6988bbb..2e1a7cb2ed5f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -420,7 +420,7 @@ static const struct ata_port_info ahci_port_info[] = {
420 /* board_ahci_mv */ 420 /* board_ahci_mv */
421 { 421 {
422 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 422 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
423 AHCI_HFLAG_MV_PATA), 423 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
424 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 424 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
425 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 425 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
426 .pio_mask = 0x1f, /* pio0-4 */ 426 .pio_mask = 0x1f, /* pio0-4 */
@@ -487,7 +487,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 487 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 488 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
489 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 489 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
490 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
490 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 491 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
492 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
491 493
492 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 494 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
493 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 495 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -610,6 +612,15 @@ module_param(ahci_em_messages, int, 0444);
610MODULE_PARM_DESC(ahci_em_messages, 612MODULE_PARM_DESC(ahci_em_messages,
611 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); 613 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
612 614
615#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
616static int marvell_enable;
617#else
618static int marvell_enable = 1;
619#endif
620module_param(marvell_enable, int, 0644);
621MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
622
623
613static inline int ahci_nr_ports(u32 cap) 624static inline int ahci_nr_ports(u32 cap)
614{ 625{
615 return (cap & 0x1f) + 1; 626 return (cap & 0x1f) + 1;
@@ -732,6 +743,8 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
732 "MV_AHCI HACK: port_map %x -> %x\n", 743 "MV_AHCI HACK: port_map %x -> %x\n",
733 port_map, 744 port_map,
734 port_map & mv); 745 port_map & mv);
746 dev_printk(KERN_ERR, &pdev->dev,
747 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
735 748
736 port_map &= mv; 749 port_map &= mv;
737 } 750 }
@@ -2533,6 +2546,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2533 if (!printed_version++) 2546 if (!printed_version++)
2534 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 2547 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2535 2548
2549 /* The AHCI driver can only drive the SATA ports, the PATA driver
2550 can drive them all so if both drivers are selected make sure
2551 AHCI stays out of the way */
2552 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2553 return -ENODEV;
2554
2536 /* acquire resources */ 2555 /* acquire resources */
2537 rc = pcim_enable_device(pdev); 2556 rc = pcim_enable_device(pdev);
2538 if (rc) 2557 if (rc)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 304fdc6f1dc2..2a4c516894f0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1315,11 +1315,6 @@ fsm_start:
1315 break; 1315 break;
1316 1316
1317 case HSM_ST_ERR: 1317 case HSM_ST_ERR:
1318 /* make sure qc->err_mask is available to
1319 * know what's wrong and recover
1320 */
1321 WARN_ON(!(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)));
1322
1323 ap->hsm_task_state = HSM_ST_IDLE; 1318 ap->hsm_task_state = HSM_ST_IDLE;
1324 1319
1325 /* complete taskfile transaction */ 1320 /* complete taskfile transaction */
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 24a011b25024..0d87eec84966 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -20,29 +20,30 @@
20#include <linux/ata.h> 20#include <linux/ata.h>
21 21
22#define DRV_NAME "pata_marvell" 22#define DRV_NAME "pata_marvell"
23#define DRV_VERSION "0.1.4" 23#define DRV_VERSION "0.1.6"
24 24
25/** 25/**
26 * marvell_pre_reset - check for 40/80 pin 26 * marvell_pata_active - check if PATA is active
27 * @link: link 27 * @pdev: PCI device
28 * @deadline: deadline jiffies for the operation
29 * 28 *
30 * Perform the PATA port setup we need. 29 * Returns 1 if the PATA port may be active. We know how to check this
30 * for the 6145 but not the other devices
31 */ 31 */
32 32
33static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) 33static int marvell_pata_active(struct pci_dev *pdev)
34{ 34{
35 struct ata_port *ap = link->ap; 35 int i;
36 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
37 u32 devices; 36 u32 devices;
38 void __iomem *barp; 37 void __iomem *barp;
39 int i;
40 38
41 /* Check if our port is enabled */ 39 /* We don't yet know how to do this for other devices */
40 if (pdev->device != 0x6145)
41 return 1;
42 42
43 barp = pci_iomap(pdev, 5, 0x10); 43 barp = pci_iomap(pdev, 5, 0x10);
44 if (barp == NULL) 44 if (barp == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46
46 printk("BAR5:"); 47 printk("BAR5:");
47 for(i = 0; i <= 0x0F; i++) 48 for(i = 0; i <= 0x0F; i++)
48 printk("%02X:%02X ", i, ioread8(barp + i)); 49 printk("%02X:%02X ", i, ioread8(barp + i));
@@ -51,9 +52,27 @@ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
51 devices = ioread32(barp + 0x0C); 52 devices = ioread32(barp + 0x0C);
52 pci_iounmap(pdev, barp); 53 pci_iounmap(pdev, barp);
53 54
54 if ((pdev->device == 0x6145) && (ap->port_no == 0) && 55 if (devices & 0x10)
55 (!(devices & 0x10))) /* PATA enable ? */ 56 return 1;
56 return -ENOENT; 57 return 0;
58}
59
60/**
61 * marvell_pre_reset - check for 40/80 pin
62 * @link: link
63 * @deadline: deadline jiffies for the operation
64 *
65 * Perform the PATA port setup we need.
66 */
67
68static int marvell_pre_reset(struct ata_link *link, unsigned long deadline)
69{
70 struct ata_port *ap = link->ap;
71 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
72
73 if (pdev->device == 0x6145 && ap->port_no == 0 &&
74 !marvell_pata_active(pdev)) /* PATA enable ? */
75 return -ENOENT;
57 76
58 return ata_sff_prereset(link, deadline); 77 return ata_sff_prereset(link, deadline);
59} 78}
@@ -128,6 +147,12 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
128 if (pdev->device == 0x6101) 147 if (pdev->device == 0x6101)
129 ppi[1] = &ata_dummy_port_info; 148 ppi[1] = &ata_dummy_port_info;
130 149
150#if defined(CONFIG_AHCI) || defined(CONFIG_AHCI_MODULE)
151 if (!marvell_pata_active(pdev)) {
152 printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
153 return -ENODEV;
154 }
155#endif
131 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL); 156 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL);
132} 157}
133 158
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 720b8645f58a..e970b227fbce 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -322,9 +322,6 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
322 /* Try to acquire MMIO resources and fallback to PIO if 322 /* Try to acquire MMIO resources and fallback to PIO if
323 * that fails 323 * that fails
324 */ 324 */
325 rc = pcim_enable_device(pdev);
326 if (rc)
327 return rc;
328 rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME); 325 rc = pcim_iomap_regions(pdev, 1 << SIL680_MMIO_BAR, DRV_NAME);
329 if (rc) 326 if (rc)
330 goto use_ioports; 327 goto use_ioports;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 13c1d2af18ac..c815f8ecf6e6 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -667,7 +667,8 @@ static const struct pci_device_id mv_pci_tbl[] = {
667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 667 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 668 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 669 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
670 /* RocketRAID 1740/174x have different identifiers */ 670 /* RocketRAID 1720/174x have different identifiers */
671 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
671 { PCI_VDEVICE(TTI, 0x1740), chip_508x }, 672 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
672 { PCI_VDEVICE(TTI, 0x1742), chip_508x }, 673 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
673 674
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 858f70610eda..1e1f3f3757ae 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -309,8 +309,6 @@ static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap); 309static void nv_nf2_thaw(struct ata_port *ap);
310static void nv_ck804_freeze(struct ata_port *ap); 310static void nv_ck804_freeze(struct ata_port *ap);
311static void nv_ck804_thaw(struct ata_port *ap); 311static void nv_ck804_thaw(struct ata_port *ap);
312static int nv_hardreset(struct ata_link *link, unsigned int *class,
313 unsigned long deadline);
314static int nv_adma_slave_config(struct scsi_device *sdev); 312static int nv_adma_slave_config(struct scsi_device *sdev);
315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 313static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 314static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
@@ -407,7 +405,7 @@ static struct scsi_host_template nv_swncq_sht = {
407 405
408static struct ata_port_operations nv_generic_ops = { 406static struct ata_port_operations nv_generic_ops = {
409 .inherits = &ata_bmdma_port_ops, 407 .inherits = &ata_bmdma_port_ops,
410 .hardreset = nv_hardreset, 408 .hardreset = ATA_OP_NULL,
411 .scr_read = nv_scr_read, 409 .scr_read = nv_scr_read,
412 .scr_write = nv_scr_write, 410 .scr_write = nv_scr_write,
413}; 411};
@@ -1588,21 +1586,6 @@ static void nv_mcp55_thaw(struct ata_port *ap)
1588 ata_sff_thaw(ap); 1586 ata_sff_thaw(ap);
1589} 1587}
1590 1588
1591static int nv_hardreset(struct ata_link *link, unsigned int *class,
1592 unsigned long deadline)
1593{
1594 int rc;
1595
1596 /* SATA hardreset fails to retrieve proper device signature on
1597 * some controllers. Request follow up SRST. For more info,
1598 * see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1599 */
1600 rc = sata_sff_hardreset(link, class, deadline);
1601 if (rc)
1602 return rc;
1603 return -EAGAIN;
1604}
1605
1606static void nv_adma_error_handler(struct ata_port *ap) 1589static void nv_adma_error_handler(struct ata_port *ap)
1607{ 1590{
1608 struct nv_adma_port_priv *pp = ap->private_data; 1591 struct nv_adma_port_priv *pp = ap->private_data;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 41b2204ebc6e..5503bfc8e132 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1270,7 +1270,7 @@ static int comp_tx(struct eni_dev *eni_dev,int *pcr,int reserved,int *pre,
1270 if (*pre < 3) (*pre)++; /* else fail later */ 1270 if (*pre < 3) (*pre)++; /* else fail later */
1271 div = pre_div[*pre]*-*pcr; 1271 div = pre_div[*pre]*-*pcr;
1272 DPRINTK("max div %d\n",div); 1272 DPRINTK("max div %d\n",div);
1273 *res = (TS_CLOCK+div-1)/div-1; 1273 *res = DIV_ROUND_UP(TS_CLOCK, div)-1;
1274 } 1274 }
1275 if (*res < 0) *res = 0; 1275 if (*res < 0) *res = 0;
1276 if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE; 1276 if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE;
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index c0ac728dc564..615412364e99 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -635,7 +635,7 @@ static int make_rate (const hrz_dev * dev, u32 c, rounding r,
635 // take care of rounding 635 // take care of rounding
636 switch (r) { 636 switch (r) {
637 case round_down: 637 case round_down:
638 pre = (br+(c<<div)-1)/(c<<div); 638 pre = DIV_ROUND_UP(br, c<<div);
639 // but p must be non-zero 639 // but p must be non-zero
640 if (!pre) 640 if (!pre)
641 pre = 1; 641 pre = 1;
@@ -668,7 +668,7 @@ static int make_rate (const hrz_dev * dev, u32 c, rounding r,
668 // take care of rounding 668 // take care of rounding
669 switch (r) { 669 switch (r) {
670 case round_down: 670 case round_down:
671 pre = (br+(c<<div)-1)/(c<<div); 671 pre = DIV_ROUND_UP(br, c<<div);
672 break; 672 break;
673 case round_nearest: 673 case round_nearest:
674 pre = (br+(c<<div)/2)/(c<<div); 674 pre = (br+(c<<div)/2)/(c<<div);
@@ -698,7 +698,7 @@ got_it:
698 if (bits) 698 if (bits)
699 *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1); 699 *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
700 if (actual) { 700 if (actual) {
701 *actual = (br + (pre<<div) - 1) / (pre<<div); 701 *actual = DIV_ROUND_UP(br, pre<<div);
702 PRINTD (DBG_QOS, "actual rate: %u", *actual); 702 PRINTD (DBG_QOS, "actual rate: %u", *actual);
703 } 703 }
704 return 0; 704 return 0;
@@ -1967,7 +1967,7 @@ static int __devinit hrz_init (hrz_dev * dev) {
1967 // Set the max AAL5 cell count to be just enough to contain the 1967 // Set the max AAL5 cell count to be just enough to contain the
1968 // largest AAL5 frame that the user wants to receive 1968 // largest AAL5 frame that the user wants to receive
1969 wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF, 1969 wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
1970 (max_rx_size + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD); 1970 DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
1971 1971
1972 // Enable receive 1972 // Enable receive
1973 wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE); 1973 wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 3a504e94a4d9..e33ae0025b12 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1114,11 +1114,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1114 1114
1115 rpp = &vc->rcv.rx_pool; 1115 rpp = &vc->rcv.rx_pool;
1116 1116
1117 __skb_queue_tail(&rpp->queue, skb);
1117 rpp->len += skb->len; 1118 rpp->len += skb->len;
1118 if (!rpp->count++)
1119 rpp->first = skb;
1120 *rpp->last = skb;
1121 rpp->last = &skb->next;
1122 1119
1123 if (stat & SAR_RSQE_EPDU) { 1120 if (stat & SAR_RSQE_EPDU) {
1124 unsigned char *l1l2; 1121 unsigned char *l1l2;
@@ -1145,7 +1142,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1145 atomic_inc(&vcc->stats->rx_err); 1142 atomic_inc(&vcc->stats->rx_err);
1146 return; 1143 return;
1147 } 1144 }
1148 if (rpp->count > 1) { 1145 if (skb_queue_len(&rpp->queue) > 1) {
1149 struct sk_buff *sb; 1146 struct sk_buff *sb;
1150 1147
1151 skb = dev_alloc_skb(rpp->len); 1148 skb = dev_alloc_skb(rpp->len);
@@ -1161,12 +1158,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1161 dev_kfree_skb(skb); 1158 dev_kfree_skb(skb);
1162 return; 1159 return;
1163 } 1160 }
1164 sb = rpp->first; 1161 skb_queue_walk(&rpp->queue, sb)
1165 for (i = 0; i < rpp->count; i++) {
1166 memcpy(skb_put(skb, sb->len), 1162 memcpy(skb_put(skb, sb->len),
1167 sb->data, sb->len); 1163 sb->data, sb->len);
1168 sb = sb->next;
1169 }
1170 1164
1171 recycle_rx_pool_skb(card, rpp); 1165 recycle_rx_pool_skb(card, rpp);
1172 1166
@@ -1180,7 +1174,6 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1180 return; 1174 return;
1181 } 1175 }
1182 1176
1183 skb->next = NULL;
1184 flush_rx_pool(card, rpp); 1177 flush_rx_pool(card, rpp);
1185 1178
1186 if (!atm_charge(vcc, skb->truesize)) { 1179 if (!atm_charge(vcc, skb->truesize)) {
@@ -1918,25 +1911,18 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
1918static void 1911static void
1919flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp) 1912flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp)
1920{ 1913{
1914 skb_queue_head_init(&rpp->queue);
1921 rpp->len = 0; 1915 rpp->len = 0;
1922 rpp->count = 0;
1923 rpp->first = NULL;
1924 rpp->last = &rpp->first;
1925} 1916}
1926 1917
1927static void 1918static void
1928recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp) 1919recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp)
1929{ 1920{
1930 struct sk_buff *skb, *next; 1921 struct sk_buff *skb, *tmp;
1931 int i;
1932 1922
1933 skb = rpp->first; 1923 skb_queue_walk_safe(&rpp->queue, skb, tmp)
1934 for (i = 0; i < rpp->count; i++) {
1935 next = skb->next;
1936 skb->next = NULL;
1937 recycle_rx_skb(card, skb); 1924 recycle_rx_skb(card, skb);
1938 skb = next; 1925
1939 }
1940 flush_rx_pool(card, rpp); 1926 flush_rx_pool(card, rpp);
1941} 1927}
1942 1928
@@ -2537,7 +2523,7 @@ idt77252_close(struct atm_vcc *vcc)
2537 waitfor_idle(card); 2523 waitfor_idle(card);
2538 spin_unlock_irqrestore(&card->cmd_lock, flags); 2524 spin_unlock_irqrestore(&card->cmd_lock, flags);
2539 2525
2540 if (vc->rcv.rx_pool.count) { 2526 if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
2541 DPRINTK("%s: closing a VC with pending rx buffers.\n", 2527 DPRINTK("%s: closing a VC with pending rx buffers.\n",
2542 card->name); 2528 card->name);
2543 2529
@@ -2970,7 +2956,7 @@ close_card_oam(struct idt77252_dev *card)
2970 waitfor_idle(card); 2956 waitfor_idle(card);
2971 spin_unlock_irqrestore(&card->cmd_lock, flags); 2957 spin_unlock_irqrestore(&card->cmd_lock, flags);
2972 2958
2973 if (vc->rcv.rx_pool.count) { 2959 if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
2974 DPRINTK("%s: closing a VC " 2960 DPRINTK("%s: closing a VC "
2975 "with pending rx buffers.\n", 2961 "with pending rx buffers.\n",
2976 card->name); 2962 card->name);
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index e83eaf120da0..5042bb2dab15 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -173,10 +173,8 @@ struct scq_info
173}; 173};
174 174
175struct rx_pool { 175struct rx_pool {
176 struct sk_buff *first; 176 struct sk_buff_head queue;
177 struct sk_buff **last;
178 unsigned int len; 177 unsigned int len;
179 unsigned int count;
180}; 178};
181 179
182struct aal1 { 180struct aal1 {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 58583c6ac5be..752b1ba81f7e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -496,8 +496,8 @@ static int open_rx_first(struct atm_vcc *vcc)
496 vcc->qos.rxtp.max_sdu = 65464; 496 vcc->qos.rxtp.max_sdu = 65464;
497 /* fix this - we may want to receive 64kB SDUs 497 /* fix this - we may want to receive 64kB SDUs
498 later */ 498 later */
499 cells = (vcc->qos.rxtp.max_sdu+ATM_AAL5_TRAILER+ 499 cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
500 ATM_CELL_PAYLOAD-1)/ATM_CELL_PAYLOAD; 500 ATM_CELL_PAYLOAD);
501 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); 501 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
502 } 502 }
503 else { 503 else {
@@ -820,7 +820,7 @@ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
820 } 820 }
821 else { 821 else {
822 i = 255; 822 i = 255;
823 m = (ATM_OC3_PCR*255+max-1)/max; 823 m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
824 } 824 }
825 } 825 }
826 if (i > m) { 826 if (i > m) {
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5b4c6e649c11..93f3690396a5 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,11 +159,8 @@ struct aoedev {
159 sector_t ssize; 159 sector_t ssize;
160 struct timer_list timer; 160 struct timer_list timer;
161 spinlock_t lock; 161 spinlock_t lock;
162 struct sk_buff *sendq_hd; /* packets needing to be sent, list head */ 162 struct sk_buff_head sendq;
163 struct sk_buff *sendq_tl; 163 struct sk_buff_head skbpool;
164 struct sk_buff *skbpool_hd;
165 struct sk_buff *skbpool_tl;
166 int nskbpool;
167 mempool_t *bufpool; /* for deadlock-free Buf allocation */ 164 mempool_t *bufpool; /* for deadlock-free Buf allocation */
168 struct list_head bufq; /* queue of bios to work on */ 165 struct list_head bufq; /* queue of bios to work on */
169 struct buf *inprocess; /* the one we're currently working on */ 166 struct buf *inprocess; /* the one we're currently working on */
@@ -199,7 +196,7 @@ int aoedev_flush(const char __user *str, size_t size);
199 196
200int aoenet_init(void); 197int aoenet_init(void);
201void aoenet_exit(void); 198void aoenet_exit(void);
202void aoenet_xmit(struct sk_buff *); 199void aoenet_xmit(struct sk_buff_head *);
203int is_aoe_netif(struct net_device *ifp); 200int is_aoe_netif(struct net_device *ifp);
204int set_aoe_iflist(const char __user *str, size_t size); 201int set_aoe_iflist(const char __user *str, size_t size);
205 202
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782b2660..fd2cf5439a1c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -158,9 +158,9 @@ aoeblk_release(struct inode *inode, struct file *filp)
158static int 158static int
159aoeblk_make_request(struct request_queue *q, struct bio *bio) 159aoeblk_make_request(struct request_queue *q, struct bio *bio)
160{ 160{
161 struct sk_buff_head queue;
161 struct aoedev *d; 162 struct aoedev *d;
162 struct buf *buf; 163 struct buf *buf;
163 struct sk_buff *sl;
164 ulong flags; 164 ulong flags;
165 165
166 blk_queue_bounce(q, &bio); 166 blk_queue_bounce(q, &bio);
@@ -213,11 +213,11 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
213 list_add_tail(&buf->bufs, &d->bufq); 213 list_add_tail(&buf->bufs, &d->bufq);
214 214
215 aoecmd_work(d); 215 aoecmd_work(d);
216 sl = d->sendq_hd; 216 __skb_queue_head_init(&queue);
217 d->sendq_hd = d->sendq_tl = NULL; 217 skb_queue_splice_init(&d->sendq, &queue);
218 218
219 spin_unlock_irqrestore(&d->lock, flags); 219 spin_unlock_irqrestore(&d->lock, flags);
220 aoenet_xmit(sl); 220 aoenet_xmit(&queue);
221 221
222 return 0; 222 return 0;
223} 223}
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 181ebb85f0be..1f56d2c5b7fc 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -9,6 +9,7 @@
9#include <linux/completion.h> 9#include <linux/completion.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
12#include <linux/skbuff.h>
12#include "aoe.h" 13#include "aoe.h"
13 14
14enum { 15enum {
@@ -103,7 +104,12 @@ loop:
103 spin_lock_irqsave(&d->lock, flags); 104 spin_lock_irqsave(&d->lock, flags);
104 goto loop; 105 goto loop;
105 } 106 }
106 aoenet_xmit(skb); 107 if (skb) {
108 struct sk_buff_head queue;
109 __skb_queue_head_init(&queue);
110 __skb_queue_tail(&queue, skb);
111 aoenet_xmit(&queue);
112 }
107 aoecmd_cfg(major, minor); 113 aoecmd_cfg(major, minor);
108 return 0; 114 return 0;
109} 115}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d06..e33da30be4c4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -114,29 +114,22 @@ ifrotate(struct aoetgt *t)
114static void 114static void
115skb_pool_put(struct aoedev *d, struct sk_buff *skb) 115skb_pool_put(struct aoedev *d, struct sk_buff *skb)
116{ 116{
117 if (!d->skbpool_hd) 117 __skb_queue_tail(&d->skbpool, skb);
118 d->skbpool_hd = skb;
119 else
120 d->skbpool_tl->next = skb;
121 d->skbpool_tl = skb;
122} 118}
123 119
124static struct sk_buff * 120static struct sk_buff *
125skb_pool_get(struct aoedev *d) 121skb_pool_get(struct aoedev *d)
126{ 122{
127 struct sk_buff *skb; 123 struct sk_buff *skb = skb_peek(&d->skbpool);
128 124
129 skb = d->skbpool_hd;
130 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { 125 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
131 d->skbpool_hd = skb->next; 126 __skb_unlink(skb, &d->skbpool);
132 skb->next = NULL;
133 return skb; 127 return skb;
134 } 128 }
135 if (d->nskbpool < NSKBPOOLMAX 129 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
136 && (skb = new_skb(ETH_ZLEN))) { 130 (skb = new_skb(ETH_ZLEN)))
137 d->nskbpool++;
138 return skb; 131 return skb;
139 } 132
140 return NULL; 133 return NULL;
141} 134}
142 135
@@ -293,29 +286,22 @@ aoecmd_ata_rw(struct aoedev *d)
293 286
294 skb->dev = t->ifp->nd; 287 skb->dev = t->ifp->nd;
295 skb = skb_clone(skb, GFP_ATOMIC); 288 skb = skb_clone(skb, GFP_ATOMIC);
296 if (skb) { 289 if (skb)
297 if (d->sendq_hd) 290 __skb_queue_tail(&d->sendq, skb);
298 d->sendq_tl->next = skb;
299 else
300 d->sendq_hd = skb;
301 d->sendq_tl = skb;
302 }
303 return 1; 291 return 1;
304} 292}
305 293
306/* some callers cannot sleep, and they can call this function, 294/* some callers cannot sleep, and they can call this function,
307 * transmitting the packets later, when interrupts are on 295 * transmitting the packets later, when interrupts are on
308 */ 296 */
309static struct sk_buff * 297static void
310aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) 298aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
311{ 299{
312 struct aoe_hdr *h; 300 struct aoe_hdr *h;
313 struct aoe_cfghdr *ch; 301 struct aoe_cfghdr *ch;
314 struct sk_buff *skb, *sl, *sl_tail; 302 struct sk_buff *skb;
315 struct net_device *ifp; 303 struct net_device *ifp;
316 304
317 sl = sl_tail = NULL;
318
319 read_lock(&dev_base_lock); 305 read_lock(&dev_base_lock);
320 for_each_netdev(&init_net, ifp) { 306 for_each_netdev(&init_net, ifp) {
321 dev_hold(ifp); 307 dev_hold(ifp);
@@ -329,8 +315,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
329 } 315 }
330 skb_put(skb, sizeof *h + sizeof *ch); 316 skb_put(skb, sizeof *h + sizeof *ch);
331 skb->dev = ifp; 317 skb->dev = ifp;
332 if (sl_tail == NULL) 318 __skb_queue_tail(queue, skb);
333 sl_tail = skb;
334 h = (struct aoe_hdr *) skb_mac_header(skb); 319 h = (struct aoe_hdr *) skb_mac_header(skb);
335 memset(h, 0, sizeof *h + sizeof *ch); 320 memset(h, 0, sizeof *h + sizeof *ch);
336 321
@@ -342,16 +327,10 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
342 h->minor = aoeminor; 327 h->minor = aoeminor;
343 h->cmd = AOECMD_CFG; 328 h->cmd = AOECMD_CFG;
344 329
345 skb->next = sl;
346 sl = skb;
347cont: 330cont:
348 dev_put(ifp); 331 dev_put(ifp);
349 } 332 }
350 read_unlock(&dev_base_lock); 333 read_unlock(&dev_base_lock);
351
352 if (tail != NULL)
353 *tail = sl_tail;
354 return sl;
355} 334}
356 335
357static void 336static void
@@ -406,11 +385,7 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
406 skb = skb_clone(skb, GFP_ATOMIC); 385 skb = skb_clone(skb, GFP_ATOMIC);
407 if (skb == NULL) 386 if (skb == NULL)
408 return; 387 return;
409 if (d->sendq_hd) 388 __skb_queue_tail(&d->sendq, skb);
410 d->sendq_tl->next = skb;
411 else
412 d->sendq_hd = skb;
413 d->sendq_tl = skb;
414} 389}
415 390
416static int 391static int
@@ -508,16 +483,15 @@ ata_scnt(unsigned char *packet) {
508static void 483static void
509rexmit_timer(ulong vp) 484rexmit_timer(ulong vp)
510{ 485{
486 struct sk_buff_head queue;
511 struct aoedev *d; 487 struct aoedev *d;
512 struct aoetgt *t, **tt, **te; 488 struct aoetgt *t, **tt, **te;
513 struct aoeif *ifp; 489 struct aoeif *ifp;
514 struct frame *f, *e; 490 struct frame *f, *e;
515 struct sk_buff *sl;
516 register long timeout; 491 register long timeout;
517 ulong flags, n; 492 ulong flags, n;
518 493
519 d = (struct aoedev *) vp; 494 d = (struct aoedev *) vp;
520 sl = NULL;
521 495
522 /* timeout is always ~150% of the moving average */ 496 /* timeout is always ~150% of the moving average */
523 timeout = d->rttavg; 497 timeout = d->rttavg;
@@ -589,7 +563,7 @@ rexmit_timer(ulong vp)
589 } 563 }
590 } 564 }
591 565
592 if (d->sendq_hd) { 566 if (!skb_queue_empty(&d->sendq)) {
593 n = d->rttavg <<= 1; 567 n = d->rttavg <<= 1;
594 if (n > MAXTIMER) 568 if (n > MAXTIMER)
595 d->rttavg = MAXTIMER; 569 d->rttavg = MAXTIMER;
@@ -600,15 +574,15 @@ rexmit_timer(ulong vp)
600 aoecmd_work(d); 574 aoecmd_work(d);
601 } 575 }
602 576
603 sl = d->sendq_hd; 577 __skb_queue_head_init(&queue);
604 d->sendq_hd = d->sendq_tl = NULL; 578 skb_queue_splice_init(&d->sendq, &queue);
605 579
606 d->timer.expires = jiffies + TIMERTICK; 580 d->timer.expires = jiffies + TIMERTICK;
607 add_timer(&d->timer); 581 add_timer(&d->timer);
608 582
609 spin_unlock_irqrestore(&d->lock, flags); 583 spin_unlock_irqrestore(&d->lock, flags);
610 584
611 aoenet_xmit(sl); 585 aoenet_xmit(&queue);
612} 586}
613 587
614/* enters with d->lock held */ 588/* enters with d->lock held */
@@ -767,12 +741,12 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
767void 741void
768aoecmd_ata_rsp(struct sk_buff *skb) 742aoecmd_ata_rsp(struct sk_buff *skb)
769{ 743{
744 struct sk_buff_head queue;
770 struct aoedev *d; 745 struct aoedev *d;
771 struct aoe_hdr *hin, *hout; 746 struct aoe_hdr *hin, *hout;
772 struct aoe_atahdr *ahin, *ahout; 747 struct aoe_atahdr *ahin, *ahout;
773 struct frame *f; 748 struct frame *f;
774 struct buf *buf; 749 struct buf *buf;
775 struct sk_buff *sl;
776 struct aoetgt *t; 750 struct aoetgt *t;
777 struct aoeif *ifp; 751 struct aoeif *ifp;
778 register long n; 752 register long n;
@@ -893,21 +867,21 @@ aoecmd_ata_rsp(struct sk_buff *skb)
893 867
894 aoecmd_work(d); 868 aoecmd_work(d);
895xmit: 869xmit:
896 sl = d->sendq_hd; 870 __skb_queue_head_init(&queue);
897 d->sendq_hd = d->sendq_tl = NULL; 871 skb_queue_splice_init(&d->sendq, &queue);
898 872
899 spin_unlock_irqrestore(&d->lock, flags); 873 spin_unlock_irqrestore(&d->lock, flags);
900 aoenet_xmit(sl); 874 aoenet_xmit(&queue);
901} 875}
902 876
903void 877void
904aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) 878aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
905{ 879{
906 struct sk_buff *sl; 880 struct sk_buff_head queue;
907
908 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
909 881
910 aoenet_xmit(sl); 882 __skb_queue_head_init(&queue);
883 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
884 aoenet_xmit(&queue);
911} 885}
912 886
913struct sk_buff * 887struct sk_buff *
@@ -1076,7 +1050,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
1076 1050
1077 spin_unlock_irqrestore(&d->lock, flags); 1051 spin_unlock_irqrestore(&d->lock, flags);
1078 1052
1079 aoenet_xmit(sl); 1053 if (sl) {
1054 struct sk_buff_head queue;
1055 __skb_queue_head_init(&queue);
1056 __skb_queue_tail(&queue, sl);
1057 aoenet_xmit(&queue);
1058 }
1080} 1059}
1081 1060
1082void 1061void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813ab0d6b..75a610adf515 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -188,14 +188,12 @@ skbfree(struct sk_buff *skb)
188static void 188static void
189skbpoolfree(struct aoedev *d) 189skbpoolfree(struct aoedev *d)
190{ 190{
191 struct sk_buff *skb; 191 struct sk_buff *skb, *tmp;
192 192
193 while ((skb = d->skbpool_hd)) { 193 skb_queue_walk_safe(&d->skbpool, skb, tmp)
194 d->skbpool_hd = skb->next;
195 skb->next = NULL;
196 skbfree(skb); 194 skbfree(skb);
197 } 195
198 d->skbpool_tl = NULL; 196 __skb_queue_head_init(&d->skbpool);
199} 197}
200 198
201/* find it or malloc it */ 199/* find it or malloc it */
@@ -217,6 +215,8 @@ aoedev_by_sysminor_m(ulong sysminor)
217 goto out; 215 goto out;
218 INIT_WORK(&d->work, aoecmd_sleepwork); 216 INIT_WORK(&d->work, aoecmd_sleepwork);
219 spin_lock_init(&d->lock); 217 spin_lock_init(&d->lock);
218 skb_queue_head_init(&d->sendq);
219 skb_queue_head_init(&d->skbpool);
220 init_timer(&d->timer); 220 init_timer(&d->timer);
221 d->timer.data = (ulong) d; 221 d->timer.data = (ulong) d;
222 d->timer.function = dummy_timer; 222 d->timer.function = dummy_timer;
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 7b15a5e9cec0..7f83ad90e76f 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -7,6 +7,7 @@
7#include <linux/hdreg.h> 7#include <linux/hdreg.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h>
10#include "aoe.h" 11#include "aoe.h"
11 12
12MODULE_LICENSE("GPL"); 13MODULE_LICENSE("GPL");
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 0c81ca731287..9157d64270cb 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -95,13 +95,12 @@ mac_addr(char addr[6])
95} 95}
96 96
97void 97void
98aoenet_xmit(struct sk_buff *sl) 98aoenet_xmit(struct sk_buff_head *queue)
99{ 99{
100 struct sk_buff *skb; 100 struct sk_buff *skb, *tmp;
101 101
102 while ((skb = sl)) { 102 skb_queue_walk_safe(queue, skb, tmp) {
103 sl = sl->next; 103 __skb_unlink(skb, queue);
104 skb->next = skb->prev = NULL;
105 dev_queue_xmit(skb); 104 dev_queue_xmit(skb);
106 } 105 }
107} 106}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 4d37bb312ee3..7938062c1cc7 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -352,14 +352,14 @@ static int bcsp_flush(struct hci_uart *hu)
352/* Remove ack'ed packets */ 352/* Remove ack'ed packets */
353static void bcsp_pkt_cull(struct bcsp_struct *bcsp) 353static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
354{ 354{
355 struct sk_buff *skb, *tmp;
355 unsigned long flags; 356 unsigned long flags;
356 struct sk_buff *skb;
357 int i, pkts_to_be_removed; 357 int i, pkts_to_be_removed;
358 u8 seqno; 358 u8 seqno;
359 359
360 spin_lock_irqsave(&bcsp->unack.lock, flags); 360 spin_lock_irqsave(&bcsp->unack.lock, flags);
361 361
362 pkts_to_be_removed = bcsp->unack.qlen; 362 pkts_to_be_removed = skb_queue_len(&bcsp->unack);
363 seqno = bcsp->msgq_txseq; 363 seqno = bcsp->msgq_txseq;
364 364
365 while (pkts_to_be_removed) { 365 while (pkts_to_be_removed) {
@@ -373,19 +373,19 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
373 BT_ERR("Peer acked invalid packet"); 373 BT_ERR("Peer acked invalid packet");
374 374
375 BT_DBG("Removing %u pkts out of %u, up to seqno %u", 375 BT_DBG("Removing %u pkts out of %u, up to seqno %u",
376 pkts_to_be_removed, bcsp->unack.qlen, (seqno - 1) & 0x07); 376 pkts_to_be_removed, skb_queue_len(&bcsp->unack),
377 (seqno - 1) & 0x07);
377 378
378 for (i = 0, skb = ((struct sk_buff *) &bcsp->unack)->next; i < pkts_to_be_removed 379 i = 0;
379 && skb != (struct sk_buff *) &bcsp->unack; i++) { 380 skb_queue_walk_safe(&bcsp->unack, skb, tmp) {
380 struct sk_buff *nskb; 381 if (i++ >= pkts_to_be_removed)
382 break;
381 383
382 nskb = skb->next;
383 __skb_unlink(skb, &bcsp->unack); 384 __skb_unlink(skb, &bcsp->unack);
384 kfree_skb(skb); 385 kfree_skb(skb);
385 skb = nskb;
386 } 386 }
387 387
388 if (bcsp->unack.qlen == 0) 388 if (skb_queue_empty(&bcsp->unack))
389 del_timer(&bcsp->tbcsp); 389 del_timer(&bcsp->tbcsp);
390 390
391 spin_unlock_irqrestore(&bcsp->unack.lock, flags); 391 spin_unlock_irqrestore(&bcsp->unack.lock, flags);
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 1790cc8e431e..8e659914523f 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -70,8 +70,8 @@ static inline void _urb_queue_head(struct _urb_queue *q, struct _urb *_urb)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 spin_lock_irqsave(&q->lock, flags); 72 spin_lock_irqsave(&q->lock, flags);
73 /* _urb_unlink needs to know which spinlock to use, thus mb(). */ 73 /* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
74 _urb->queue = q; mb(); list_add(&_urb->list, &q->head); 74 _urb->queue = q; smp_mb(); list_add(&_urb->list, &q->head);
75 spin_unlock_irqrestore(&q->lock, flags); 75 spin_unlock_irqrestore(&q->lock, flags);
76} 76}
77 77
@@ -79,8 +79,8 @@ static inline void _urb_queue_tail(struct _urb_queue *q, struct _urb *_urb)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 spin_lock_irqsave(&q->lock, flags); 81 spin_lock_irqsave(&q->lock, flags);
82 /* _urb_unlink needs to know which spinlock to use, thus mb(). */ 82 /* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
83 _urb->queue = q; mb(); list_add_tail(&_urb->list, &q->head); 83 _urb->queue = q; smp_mb(); list_add_tail(&_urb->list, &q->head);
84 spin_unlock_irqrestore(&q->lock, flags); 84 spin_unlock_irqrestore(&q->lock, flags);
85} 85}
86 86
@@ -89,7 +89,7 @@ static inline void _urb_unlink(struct _urb *_urb)
89 struct _urb_queue *q; 89 struct _urb_queue *q;
90 unsigned long flags; 90 unsigned long flags;
91 91
92 mb(); 92 smp_mb();
93 q = _urb->queue; 93 q = _urb->queue;
94 /* If q is NULL, it will die at easy-to-debug NULL pointer dereference. 94 /* If q is NULL, it will die at easy-to-debug NULL pointer dereference.
95 No need to BUG(). */ 95 No need to BUG(). */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1838aa3d24fe..7ce1ac4baa6d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -407,7 +407,7 @@ struct entropy_store {
407 /* read-write data: */ 407 /* read-write data: */
408 spinlock_t lock; 408 spinlock_t lock;
409 unsigned add_ptr; 409 unsigned add_ptr;
410 int entropy_count; 410 int entropy_count; /* Must at no time exceed ->POOLBITS! */
411 int input_rotate; 411 int input_rotate;
412}; 412};
413 413
@@ -520,6 +520,7 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
520static void credit_entropy_bits(struct entropy_store *r, int nbits) 520static void credit_entropy_bits(struct entropy_store *r, int nbits)
521{ 521{
522 unsigned long flags; 522 unsigned long flags;
523 int entropy_count;
523 524
524 if (!nbits) 525 if (!nbits)
525 return; 526 return;
@@ -527,20 +528,20 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
527 spin_lock_irqsave(&r->lock, flags); 528 spin_lock_irqsave(&r->lock, flags);
528 529
529 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); 530 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
530 r->entropy_count += nbits; 531 entropy_count = r->entropy_count;
531 if (r->entropy_count < 0) { 532 entropy_count += nbits;
533 if (entropy_count < 0) {
532 DEBUG_ENT("negative entropy/overflow\n"); 534 DEBUG_ENT("negative entropy/overflow\n");
533 r->entropy_count = 0; 535 entropy_count = 0;
534 } else if (r->entropy_count > r->poolinfo->POOLBITS) 536 } else if (entropy_count > r->poolinfo->POOLBITS)
535 r->entropy_count = r->poolinfo->POOLBITS; 537 entropy_count = r->poolinfo->POOLBITS;
538 r->entropy_count = entropy_count;
536 539
537 /* should we wake readers? */ 540 /* should we wake readers? */
538 if (r == &input_pool && 541 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
539 r->entropy_count >= random_read_wakeup_thresh) {
540 wake_up_interruptible(&random_read_wait); 542 wake_up_interruptible(&random_read_wait);
541 kill_fasync(&fasync, SIGIO, POLL_IN); 543 kill_fasync(&fasync, SIGIO, POLL_IN);
542 } 544 }
543
544 spin_unlock_irqrestore(&r->lock, flags); 545 spin_unlock_irqrestore(&r->lock, flags);
545} 546}
546 547
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 5ca1d80de182..4eee533f3f4a 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/delay.h>
24#include <asm/io.h> 25#include <asm/io.h>
25 26
26/* 27/*
@@ -151,13 +152,13 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
151 */ 152 */
152static int verify_pmtmr_rate(void) 153static int verify_pmtmr_rate(void)
153{ 154{
154 u32 value1, value2; 155 cycle_t value1, value2;
155 unsigned long count, delta; 156 unsigned long count, delta;
156 157
157 mach_prepare_counter(); 158 mach_prepare_counter();
158 value1 = read_pmtmr(); 159 value1 = clocksource_acpi_pm.read();
159 mach_countup(&count); 160 mach_countup(&count);
160 value2 = read_pmtmr(); 161 value2 = clocksource_acpi_pm.read();
161 delta = (value2 - value1) & ACPI_PM_MASK; 162 delta = (value2 - value1) & ACPI_PM_MASK;
162 163
163 /* Check that the PMTMR delta is within 5% of what we expect */ 164 /* Check that the PMTMR delta is within 5% of what we expect */
@@ -175,10 +176,13 @@ static int verify_pmtmr_rate(void)
175#define verify_pmtmr_rate() (0) 176#define verify_pmtmr_rate() (0)
176#endif 177#endif
177 178
179/* Number of monotonicity checks to perform during initialization */
180#define ACPI_PM_MONOTONICITY_CHECKS 10
181
178static int __init init_acpi_pm_clocksource(void) 182static int __init init_acpi_pm_clocksource(void)
179{ 183{
180 u32 value1, value2; 184 cycle_t value1, value2;
181 unsigned int i; 185 unsigned int i, j, good = 0;
182 186
183 if (!pmtmr_ioport) 187 if (!pmtmr_ioport)
184 return -ENODEV; 188 return -ENODEV;
@@ -187,24 +191,32 @@ static int __init init_acpi_pm_clocksource(void)
187 clocksource_acpi_pm.shift); 191 clocksource_acpi_pm.shift);
188 192
189 /* "verify" this timing source: */ 193 /* "verify" this timing source: */
190 value1 = read_pmtmr(); 194 for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
191 for (i = 0; i < 10000; i++) { 195 value1 = clocksource_acpi_pm.read();
192 value2 = read_pmtmr(); 196 for (i = 0; i < 10000; i++) {
193 if (value2 == value1) 197 value2 = clocksource_acpi_pm.read();
194 continue; 198 if (value2 == value1)
195 if (value2 > value1) 199 continue;
196 goto pm_good; 200 if (value2 > value1)
197 if ((value2 < value1) && ((value2) < 0xFFF)) 201 good++;
198 goto pm_good; 202 break;
199 printk(KERN_INFO "PM-Timer had inconsistent results:" 203 if ((value2 < value1) && ((value2) < 0xFFF))
200 " 0x%#x, 0x%#x - aborting.\n", value1, value2); 204 good++;
201 return -EINVAL; 205 break;
206 printk(KERN_INFO "PM-Timer had inconsistent results:"
207 " 0x%#llx, 0x%#llx - aborting.\n",
208 value1, value2);
209 return -EINVAL;
210 }
211 udelay(300 * i);
212 }
213
214 if (good != ACPI_PM_MONOTONICITY_CHECKS) {
215 printk(KERN_INFO "PM-Timer failed consistency check "
216 " (0x%#llx) - aborting.\n", value1);
217 return -ENODEV;
202 } 218 }
203 printk(KERN_INFO "PM-Timer had no reasonable result:"
204 " 0x%#x - aborting.\n", value1);
205 return -ENODEV;
206 219
207pm_good:
208 if (verify_pmtmr_rate() != 0) 220 if (verify_pmtmr_rate() != 0)
209 return -ENODEV; 221 return -ENODEV;
210 222
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 8024e3bfd877..b91ef63126ed 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -669,8 +669,7 @@ static int __init ibft_register_kobjects(struct ibft_table_header *header,
669 669
670 control = (void *)header + sizeof(*header); 670 control = (void *)header + sizeof(*header);
671 end = (void *)control + control->hdr.length; 671 end = (void *)control + control->hdr.length;
672 eot_offset = (void *)header + header->length - 672 eot_offset = (void *)header + header->length - (void *)control;
673 (void *)control - sizeof(*header);
674 rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control, 673 rc = ibft_verify_hdr("control", (struct ibft_hdr *)control, id_control,
675 sizeof(*control)); 674 sizeof(*control));
676 675
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 3331f88dcfb6..248ab4a7d39f 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -223,7 +223,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
223 return 0; 223 return 0;
224 DRM_UDELAY(1); 224 DRM_UDELAY(1);
225 } 225 }
226 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", 226 DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n",
227 RADEON_READ(RADEON_RBBM_STATUS), 227 RADEON_READ(RADEON_RBBM_STATUS),
228 RADEON_READ(R300_VAP_CNTL_STATUS)); 228 RADEON_READ(R300_VAP_CNTL_STATUS));
229 229
@@ -252,7 +252,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
252 } 252 }
253 DRM_UDELAY(1); 253 DRM_UDELAY(1);
254 } 254 }
255 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", 255 DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n",
256 RADEON_READ(RADEON_RBBM_STATUS), 256 RADEON_READ(RADEON_RBBM_STATUS),
257 RADEON_READ(R300_VAP_CNTL_STATUS)); 257 RADEON_READ(R300_VAP_CNTL_STATUS));
258 258
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 840e634fa31f..640cbb237328 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -31,13 +31,84 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/io.h> 32#include <linux/io.h>
33 33
34/* Transmit operation: */
35/* */
36/* 0 byte transmit */
37/* BUS: S A8 ACK P */
38/* IRQ: DTE WAIT */
39/* ICIC: */
40/* ICCR: 0x94 0x90 */
41/* ICDR: A8 */
42/* */
43/* 1 byte transmit */
44/* BUS: S A8 ACK D8(1) ACK P */
45/* IRQ: DTE WAIT WAIT */
46/* ICIC: -DTE */
47/* ICCR: 0x94 0x90 */
48/* ICDR: A8 D8(1) */
49/* */
50/* 2 byte transmit */
51/* BUS: S A8 ACK D8(1) ACK D8(2) ACK P */
52/* IRQ: DTE WAIT WAIT WAIT */
53/* ICIC: -DTE */
54/* ICCR: 0x94 0x90 */
55/* ICDR: A8 D8(1) D8(2) */
56/* */
57/* 3 bytes or more, +---------+ gets repeated */
58/* */
59/* */
60/* Receive operation: */
61/* */
62/* 0 byte receive - not supported since slave may hold SDA low */
63/* */
64/* 1 byte receive [TX] | [RX] */
65/* BUS: S A8 ACK | D8(1) ACK P */
66/* IRQ: DTE WAIT | WAIT DTE */
67/* ICIC: -DTE | +DTE */
68/* ICCR: 0x94 0x81 | 0xc0 */
69/* ICDR: A8 | D8(1) */
70/* */
71/* 2 byte receive [TX]| [RX] */
72/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK P */
73/* IRQ: DTE WAIT | WAIT WAIT DTE */
74/* ICIC: -DTE | +DTE */
75/* ICCR: 0x94 0x81 | 0xc0 */
76/* ICDR: A8 | D8(1) D8(2) */
77/* */
78/* 3 byte receive [TX] | [RX] */
79/* BUS: S A8 ACK | D8(1) ACK D8(2) ACK D8(3) ACK P */
80/* IRQ: DTE WAIT | WAIT WAIT WAIT DTE */
81/* ICIC: -DTE | +DTE */
82/* ICCR: 0x94 0x81 | 0xc0 */
83/* ICDR: A8 | D8(1) D8(2) D8(3) */
84/* */
85/* 4 bytes or more, this part is repeated +---------+ */
86/* */
87/* */
88/* Interrupt order and BUSY flag */
89/* ___ _ */
90/* SDA ___\___XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXAAAAAAAAA___/ */
91/* SCL \_/1\_/2\_/3\_/4\_/5\_/6\_/7\_/8\___/9\_____/ */
92/* */
93/* S D7 D6 D5 D4 D3 D2 D1 D0 P */
94/* ___ */
95/* WAIT IRQ ________________________________/ \___________ */
96/* TACK IRQ ____________________________________/ \_______ */
97/* DTE IRQ __________________________________________/ \_ */
98/* AL IRQ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
99/* _______________________________________________ */
100/* BUSY __/ \_ */
101/* */
102
34enum sh_mobile_i2c_op { 103enum sh_mobile_i2c_op {
35 OP_START = 0, 104 OP_START = 0,
36 OP_TX_ONLY, 105 OP_TX_FIRST,
106 OP_TX,
37 OP_TX_STOP, 107 OP_TX_STOP,
38 OP_TX_TO_RX, 108 OP_TX_TO_RX,
39 OP_RX_ONLY, 109 OP_RX,
40 OP_RX_STOP, 110 OP_RX_STOP,
111 OP_RX_STOP_DATA,
41}; 112};
42 113
43struct sh_mobile_i2c_data { 114struct sh_mobile_i2c_data {
@@ -127,25 +198,34 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
127 spin_lock_irqsave(&pd->lock, flags); 198 spin_lock_irqsave(&pd->lock, flags);
128 199
129 switch (op) { 200 switch (op) {
130 case OP_START: 201 case OP_START: /* issue start and trigger DTE interrupt */
131 iowrite8(0x94, ICCR(pd)); 202 iowrite8(0x94, ICCR(pd));
132 break; 203 break;
133 case OP_TX_ONLY: 204 case OP_TX_FIRST: /* disable DTE interrupt and write data */
205 iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE, ICIC(pd));
134 iowrite8(data, ICDR(pd)); 206 iowrite8(data, ICDR(pd));
135 break; 207 break;
136 case OP_TX_STOP: 208 case OP_TX: /* write data */
137 iowrite8(data, ICDR(pd)); 209 iowrite8(data, ICDR(pd));
138 iowrite8(0x90, ICCR(pd));
139 iowrite8(ICIC_ALE | ICIC_TACKE, ICIC(pd));
140 break; 210 break;
141 case OP_TX_TO_RX: 211 case OP_TX_STOP: /* write data and issue a stop afterwards */
142 iowrite8(data, ICDR(pd)); 212 iowrite8(data, ICDR(pd));
213 iowrite8(0x90, ICCR(pd));
214 break;
215 case OP_TX_TO_RX: /* select read mode */
143 iowrite8(0x81, ICCR(pd)); 216 iowrite8(0x81, ICCR(pd));
144 break; 217 break;
145 case OP_RX_ONLY: 218 case OP_RX: /* just read data */
146 ret = ioread8(ICDR(pd)); 219 ret = ioread8(ICDR(pd));
147 break; 220 break;
148 case OP_RX_STOP: 221 case OP_RX_STOP: /* enable DTE interrupt, issue stop */
222 iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
223 ICIC(pd));
224 iowrite8(0xc0, ICCR(pd));
225 break;
226 case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
227 iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE,
228 ICIC(pd));
149 ret = ioread8(ICDR(pd)); 229 ret = ioread8(ICDR(pd));
150 iowrite8(0xc0, ICCR(pd)); 230 iowrite8(0xc0, ICCR(pd));
151 break; 231 break;
@@ -157,58 +237,120 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd,
157 return ret; 237 return ret;
158} 238}
159 239
240static int sh_mobile_i2c_is_first_byte(struct sh_mobile_i2c_data *pd)
241{
242 if (pd->pos == -1)
243 return 1;
244
245 return 0;
246}
247
248static int sh_mobile_i2c_is_last_byte(struct sh_mobile_i2c_data *pd)
249{
250 if (pd->pos == (pd->msg->len - 1))
251 return 1;
252
253 return 0;
254}
255
256static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd,
257 unsigned char *buf)
258{
259 switch (pd->pos) {
260 case -1:
261 *buf = (pd->msg->addr & 0x7f) << 1;
262 *buf |= (pd->msg->flags & I2C_M_RD) ? 1 : 0;
263 break;
264 default:
265 *buf = pd->msg->buf[pd->pos];
266 }
267}
268
269static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
270{
271 unsigned char data;
272
273 if (pd->pos == pd->msg->len)
274 return 1;
275
276 sh_mobile_i2c_get_data(pd, &data);
277
278 if (sh_mobile_i2c_is_last_byte(pd))
279 i2c_op(pd, OP_TX_STOP, data);
280 else if (sh_mobile_i2c_is_first_byte(pd))
281 i2c_op(pd, OP_TX_FIRST, data);
282 else
283 i2c_op(pd, OP_TX, data);
284
285 pd->pos++;
286 return 0;
287}
288
289static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
290{
291 unsigned char data;
292 int real_pos;
293
294 do {
295 if (pd->pos <= -1) {
296 sh_mobile_i2c_get_data(pd, &data);
297
298 if (sh_mobile_i2c_is_first_byte(pd))
299 i2c_op(pd, OP_TX_FIRST, data);
300 else
301 i2c_op(pd, OP_TX, data);
302 break;
303 }
304
305 if (pd->pos == 0) {
306 i2c_op(pd, OP_TX_TO_RX, 0);
307 break;
308 }
309
310 real_pos = pd->pos - 2;
311
312 if (pd->pos == pd->msg->len) {
313 if (real_pos < 0) {
314 i2c_op(pd, OP_RX_STOP, 0);
315 break;
316 }
317 data = i2c_op(pd, OP_RX_STOP_DATA, 0);
318 } else
319 data = i2c_op(pd, OP_RX, 0);
320
321 pd->msg->buf[real_pos] = data;
322 } while (0);
323
324 pd->pos++;
325 return pd->pos == (pd->msg->len + 2);
326}
327
160static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) 328static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
161{ 329{
162 struct platform_device *dev = dev_id; 330 struct platform_device *dev = dev_id;
163 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev); 331 struct sh_mobile_i2c_data *pd = platform_get_drvdata(dev);
164 struct i2c_msg *msg = pd->msg; 332 unsigned char sr;
165 unsigned char data, sr; 333 int wakeup;
166 int wakeup = 0;
167 334
168 sr = ioread8(ICSR(pd)); 335 sr = ioread8(ICSR(pd));
169 pd->sr |= sr; 336 pd->sr |= sr; /* remember state */
170 337
171 dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, 338 dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr,
172 (msg->flags & I2C_M_RD) ? "read" : "write", 339 (pd->msg->flags & I2C_M_RD) ? "read" : "write",
173 pd->pos, msg->len); 340 pd->pos, pd->msg->len);
174 341
175 if (sr & (ICSR_AL | ICSR_TACK)) { 342 if (sr & (ICSR_AL | ICSR_TACK)) {
176 iowrite8(0, ICIC(pd)); /* disable interrupts */ 343 /* don't interrupt transaction - continue to issue stop */
177 wakeup = 1; 344 iowrite8(sr & ~(ICSR_AL | ICSR_TACK), ICSR(pd));
178 goto do_wakeup; 345 wakeup = 0;
179 } 346 } else if (pd->msg->flags & I2C_M_RD)
347 wakeup = sh_mobile_i2c_isr_rx(pd);
348 else
349 wakeup = sh_mobile_i2c_isr_tx(pd);
180 350
181 if (pd->pos == msg->len) { 351 if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */
182 i2c_op(pd, OP_RX_ONLY, 0); 352 iowrite8(sr & ~ICSR_WAIT, ICSR(pd));
183 wakeup = 1;
184 goto do_wakeup;
185 }
186 353
187 if (pd->pos == -1) {
188 data = (msg->addr & 0x7f) << 1;
189 data |= (msg->flags & I2C_M_RD) ? 1 : 0;
190 } else
191 data = msg->buf[pd->pos];
192
193 if ((pd->pos == -1) || !(msg->flags & I2C_M_RD)) {
194 if (msg->flags & I2C_M_RD)
195 i2c_op(pd, OP_TX_TO_RX, data);
196 else if (pd->pos == (msg->len - 1)) {
197 i2c_op(pd, OP_TX_STOP, data);
198 wakeup = 1;
199 } else
200 i2c_op(pd, OP_TX_ONLY, data);
201 } else {
202 if (pd->pos == (msg->len - 1))
203 data = i2c_op(pd, OP_RX_STOP, 0);
204 else
205 data = i2c_op(pd, OP_RX_ONLY, 0);
206
207 msg->buf[pd->pos] = data;
208 }
209 pd->pos++;
210
211 do_wakeup:
212 if (wakeup) { 354 if (wakeup) {
213 pd->sr |= SW_DONE; 355 pd->sr |= SW_DONE;
214 wake_up(&pd->wait); 356 wake_up(&pd->wait);
@@ -219,6 +361,11 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
219 361
220static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg) 362static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
221{ 363{
364 if (usr_msg->len == 0 && (usr_msg->flags & I2C_M_RD)) {
365 dev_err(pd->dev, "Unsupported zero length i2c read\n");
366 return -EIO;
367 }
368
222 /* Initialize channel registers */ 369 /* Initialize channel registers */
223 iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd)); 370 iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd));
224 371
@@ -233,9 +380,8 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg)
233 pd->pos = -1; 380 pd->pos = -1;
234 pd->sr = 0; 381 pd->sr = 0;
235 382
236 /* Enable all interrupts except wait */ 383 /* Enable all interrupts to begin with */
237 iowrite8(ioread8(ICIC(pd)) | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, 384 iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, ICIC(pd));
238 ICIC(pd));
239 return 0; 385 return 0;
240} 386}
241 387
@@ -268,25 +414,18 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
268 if (!k) 414 if (!k)
269 dev_err(pd->dev, "Transfer request timed out\n"); 415 dev_err(pd->dev, "Transfer request timed out\n");
270 416
271 retry_count = 10; 417 retry_count = 1000;
272again: 418again:
273 val = ioread8(ICSR(pd)); 419 val = ioread8(ICSR(pd));
274 420
275 dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); 421 dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr);
276 422
277 if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
278 err = -EIO;
279 break;
280 }
281
282 /* the interrupt handler may wake us up before the 423 /* the interrupt handler may wake us up before the
283 * transfer is finished, so poll the hardware 424 * transfer is finished, so poll the hardware
284 * until we're done. 425 * until we're done.
285 */ 426 */
286 427 if (val & ICSR_BUSY) {
287 if (!(!(val & ICSR_BUSY) && (val & ICSR_SCLM) && 428 udelay(10);
288 (val & ICSR_SDAM))) {
289 msleep(1);
290 if (retry_count--) 429 if (retry_count--)
291 goto again; 430 goto again;
292 431
@@ -294,6 +433,12 @@ again:
294 dev_err(pd->dev, "Polling timed out\n"); 433 dev_err(pd->dev, "Polling timed out\n");
295 break; 434 break;
296 } 435 }
436
437 /* handle missing acknowledge and arbitration lost */
438 if ((val | pd->sr) & (ICSR_TACK | ICSR_AL)) {
439 err = -EIO;
440 break;
441 }
297 } 442 }
298 443
299 deactivate_ch(pd); 444 deactivate_ch(pd);
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a34758d29516..fc735ab08ff4 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -242,7 +242,7 @@ config BLK_DEV_IDEFLOPPY
242 module will be called ide-floppy. 242 module will be called ide-floppy.
243 243
244config BLK_DEV_IDESCSI 244config BLK_DEV_IDESCSI
245 tristate "SCSI emulation support" 245 tristate "SCSI emulation support (DEPRECATED)"
246 depends on SCSI 246 depends on SCSI
247 select IDE_ATAPI 247 select IDE_ATAPI
248 ---help--- 248 ---help---
@@ -255,20 +255,6 @@ config BLK_DEV_IDESCSI
255 and will allow you to use a SCSI device driver instead of a native 255 and will allow you to use a SCSI device driver instead of a native
256 ATAPI driver. 256 ATAPI driver.
257 257
258 This is useful if you have an ATAPI device for which no native
259 driver has been written (for example, an ATAPI PD-CD drive);
260 you can then use this emulation together with an appropriate SCSI
261 device driver. In order to do this, say Y here and to "SCSI support"
262 and "SCSI generic support", below. You must then provide the kernel
263 command line "hdx=ide-scsi" (try "man bootparam" or see the
264 documentation of your boot loader (lilo or loadlin) about how to
265 pass options to the kernel at boot time) for devices if you want the
266 native EIDE sub-drivers to skip over the native support, so that
267 this SCSI emulation can be used instead.
268
269 Note that this option does NOT allow you to attach SCSI devices to a
270 box that doesn't have a SCSI host adapter installed.
271
272 If both this SCSI emulation and native ATAPI support are compiled 258 If both this SCSI emulation and native ATAPI support are compiled
273 into the kernel, the native support will be used. 259 into the kernel, the native support will be used.
274 260
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index f788fa5a977b..4fd91dcf1dc2 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -343,11 +343,10 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = {
343 .mwdma_mask = ATA_MWDMA2, 343 .mwdma_mask = ATA_MWDMA2,
344}; 344};
345 345
346static int __devinit palm_bk3710_probe(struct platform_device *pdev) 346static int __init palm_bk3710_probe(struct platform_device *pdev)
347{ 347{
348 struct clk *clk; 348 struct clk *clk;
349 struct resource *mem, *irq; 349 struct resource *mem, *irq;
350 struct ide_host *host;
351 unsigned long base, rate; 350 unsigned long base, rate;
352 int i, rc; 351 int i, rc;
353 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 352 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
@@ -390,6 +389,7 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
390 hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; 389 hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i;
391 hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; 390 hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET;
392 hw.irq = irq->start; 391 hw.irq = irq->start;
392 hw.dev = &pdev->dev;
393 hw.chipset = ide_palm3710; 393 hw.chipset = ide_palm3710;
394 394
395 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 : 395 palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 :
@@ -413,13 +413,11 @@ static struct platform_driver platform_bk_driver = {
413 .name = "palm_bk3710", 413 .name = "palm_bk3710",
414 .owner = THIS_MODULE, 414 .owner = THIS_MODULE,
415 }, 415 },
416 .probe = palm_bk3710_probe,
417 .remove = NULL,
418}; 416};
419 417
420static int __init palm_bk3710_init(void) 418static int __init palm_bk3710_init(void)
421{ 419{
422 return platform_driver_register(&platform_bk_driver); 420 return platform_driver_probe(&platform_bk_driver, palm_bk3710_probe);
423} 421}
424 422
425module_init(palm_bk3710_init); 423module_init(palm_bk3710_init);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 49a8c589e346..f1489999cf91 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1933,6 +1933,7 @@ static void ide_cd_remove(ide_drive_t *drive)
1933 1933
1934 ide_proc_unregister_driver(drive, info->driver); 1934 ide_proc_unregister_driver(drive, info->driver);
1935 1935
1936 blk_unregister_filter(info->disk);
1936 del_gendisk(info->disk); 1937 del_gendisk(info->disk);
1937 1938
1938 ide_cd_put(info); 1939 ide_cd_put(info);
@@ -2158,6 +2159,7 @@ static int ide_cd_probe(ide_drive_t *drive)
2158 g->fops = &idecd_ops; 2159 g->fops = &idecd_ops;
2159 g->flags |= GENHD_FL_REMOVABLE; 2160 g->flags |= GENHD_FL_REMOVABLE;
2160 add_disk(g); 2161 add_disk(g);
2162 blk_register_filter(g);
2161 return 0; 2163 return 0;
2162 2164
2163out_free_cd: 2165out_free_cd:
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 68b9cf0138b0..07ef88bd109b 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -445,20 +445,6 @@ static void idedisk_check_hpa(ide_drive_t *drive)
445 } 445 }
446} 446}
447 447
448/*
449 * Compute drive->capacity, the full capacity of the drive
450 * Called with drive->id != NULL.
451 *
452 * To compute capacity, this uses either of
453 *
454 * 1. CHS value set by user (whatever user sets will be trusted)
455 * 2. LBA value from target drive (require new ATA feature)
456 * 3. LBA value from system BIOS (new one is OK, old one may break)
457 * 4. CHS value from system BIOS (traditional style)
458 *
459 * in above order (i.e., if value of higher priority is available,
460 * reset will be ignored).
461 */
462static void init_idedisk_capacity(ide_drive_t *drive) 448static void init_idedisk_capacity(ide_drive_t *drive)
463{ 449{
464 struct hd_driveid *id = drive->id; 450 struct hd_driveid *id = drive->id;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 2ec921bf3c60..18f4d7f6ce6d 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -63,7 +63,7 @@
63} 63}
64 64
65/* table of devices that work with this driver */ 65/* table of devices that work with this driver */
66static const struct usb_device_id bcm5974_table [] = { 66static const struct usb_device_id bcm5974_table[] = {
67 /* MacbookAir1.1 */ 67 /* MacbookAir1.1 */
68 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 68 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
69 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), 69 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
@@ -105,7 +105,7 @@ struct tp_header {
105 105
106/* trackpad finger structure */ 106/* trackpad finger structure */
107struct tp_finger { 107struct tp_finger {
108 __le16 origin; /* left/right origin? */ 108 __le16 origin; /* zero when switching track finger */
109 __le16 abs_x; /* absolute x coodinate */ 109 __le16 abs_x; /* absolute x coodinate */
110 __le16 abs_y; /* absolute y coodinate */ 110 __le16 abs_y; /* absolute y coodinate */
111 __le16 rel_x; /* relative x coodinate */ 111 __le16 rel_x; /* relative x coodinate */
@@ -159,6 +159,7 @@ struct bcm5974 {
159 struct bt_data *bt_data; /* button transferred data */ 159 struct bt_data *bt_data; /* button transferred data */
160 struct urb *tp_urb; /* trackpad usb request block */ 160 struct urb *tp_urb; /* trackpad usb request block */
161 struct tp_data *tp_data; /* trackpad transferred data */ 161 struct tp_data *tp_data; /* trackpad transferred data */
162 int fingers; /* number of fingers on trackpad */
162}; 163};
163 164
164/* logical dimensions */ 165/* logical dimensions */
@@ -172,6 +173,10 @@ struct bcm5974 {
172#define SN_WIDTH 100 /* width signal-to-noise ratio */ 173#define SN_WIDTH 100 /* width signal-to-noise ratio */
173#define SN_COORD 250 /* coordinate signal-to-noise ratio */ 174#define SN_COORD 250 /* coordinate signal-to-noise ratio */
174 175
176/* pressure thresholds */
177#define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE)
178#define PRESSURE_HIGH (3 * PRESSURE_LOW)
179
175/* device constants */ 180/* device constants */
176static const struct bcm5974_config bcm5974_config_table[] = { 181static const struct bcm5974_config bcm5974_config_table[] = {
177 { 182 {
@@ -248,6 +253,7 @@ static void setup_events_to_report(struct input_dev *input_dev,
248 0, cfg->y.dim, cfg->y.fuzz, 0); 253 0, cfg->y.dim, cfg->y.fuzz, 0);
249 254
250 __set_bit(EV_KEY, input_dev->evbit); 255 __set_bit(EV_KEY, input_dev->evbit);
256 __set_bit(BTN_TOUCH, input_dev->keybit);
251 __set_bit(BTN_TOOL_FINGER, input_dev->keybit); 257 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
252 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); 258 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
253 __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); 259 __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
@@ -273,32 +279,66 @@ static int report_tp_state(struct bcm5974 *dev, int size)
273 const struct tp_finger *f = dev->tp_data->finger; 279 const struct tp_finger *f = dev->tp_data->finger;
274 struct input_dev *input = dev->input; 280 struct input_dev *input = dev->input;
275 const int fingers = (size - 26) / 28; 281 const int fingers = (size - 26) / 28;
276 int p = 0, w, x, y, n = 0; 282 int raw_p, raw_w, raw_x, raw_y;
283 int ptest = 0, origin = 0, nmin = 0, nmax = 0;
284 int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
277 285
278 if (size < 26 || (size - 26) % 28 != 0) 286 if (size < 26 || (size - 26) % 28 != 0)
279 return -EIO; 287 return -EIO;
280 288
289 /* always track the first finger; when detached, start over */
281 if (fingers) { 290 if (fingers) {
282 p = raw2int(f->force_major); 291 raw_p = raw2int(f->force_major);
283 w = raw2int(f->size_major); 292 raw_w = raw2int(f->size_major);
284 x = raw2int(f->abs_x); 293 raw_x = raw2int(f->abs_x);
285 y = raw2int(f->abs_y); 294 raw_y = raw2int(f->abs_y);
286 n = p > 0 ? fingers : 0;
287 295
288 dprintk(9, 296 dprintk(9,
289 "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", 297 "bcm5974: raw: p: %+05d w: %+05d x: %+05d y: %+05d\n",
290 p, w, x, y, n); 298 raw_p, raw_w, raw_x, raw_y);
299
300 ptest = int2bound(&c->p, raw_p);
301 origin = raw2int(f->origin);
302 }
291 303
292 input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w)); 304 /* while tracking finger still valid, count all fingers */
293 input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin)); 305 if (ptest > PRESSURE_LOW && origin) {
294 input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y)); 306 abs_p = ptest;
307 abs_w = int2bound(&c->w, raw_w);
308 abs_x = int2bound(&c->x, raw_x - c->x.devmin);
309 abs_y = int2bound(&c->y, c->y.devmax - raw_y);
310 for (; f != dev->tp_data->finger + fingers; f++) {
311 ptest = int2bound(&c->p, raw2int(f->force_major));
312 if (ptest > PRESSURE_LOW)
313 nmax++;
314 if (ptest > PRESSURE_HIGH)
315 nmin++;
316 }
295 } 317 }
296 318
297 input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p)); 319 if (dev->fingers < nmin)
320 dev->fingers = nmin;
321 if (dev->fingers > nmax)
322 dev->fingers = nmax;
323
324 input_report_key(input, BTN_TOUCH, dev->fingers > 0);
325 input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1);
326 input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2);
327 input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers > 2);
298 328
299 input_report_key(input, BTN_TOOL_FINGER, n == 1); 329 input_report_abs(input, ABS_PRESSURE, abs_p);
300 input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2); 330 input_report_abs(input, ABS_TOOL_WIDTH, abs_w);
301 input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2); 331
332 if (abs_p) {
333 input_report_abs(input, ABS_X, abs_x);
334 input_report_abs(input, ABS_Y, abs_y);
335
336 dprintk(8,
337 "bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d "
338 "nmin: %d nmax: %d n: %d\n",
339 abs_p, abs_w, abs_x, abs_y, nmin, nmax, dev->fingers);
340
341 }
302 342
303 input_sync(input); 343 input_sync(input);
304 344
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 3282b741e246..5aafe24984c5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -305,7 +305,7 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
305 .ident = "Lenovo 3000 n100", 305 .ident = "Lenovo 3000 n100",
306 .matches = { 306 .matches = {
307 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 307 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
308 DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), 308 DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
309 }, 309 },
310 }, 310 },
311 { 311 {
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 75726ea0fbbd..5360c4fd4739 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -828,15 +828,18 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
828 return -ESRCH; 828 return -ESRCH;
829 if (card->load_firmware == NULL) { 829 if (card->load_firmware == NULL) {
830 printk(KERN_DEBUG "kcapi: load: no load function\n"); 830 printk(KERN_DEBUG "kcapi: load: no load function\n");
831 capi_ctr_put(card);
831 return -ESRCH; 832 return -ESRCH;
832 } 833 }
833 834
834 if (ldef.t4file.len <= 0) { 835 if (ldef.t4file.len <= 0) {
835 printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len); 836 printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
837 capi_ctr_put(card);
836 return -EINVAL; 838 return -EINVAL;
837 } 839 }
838 if (ldef.t4file.data == NULL) { 840 if (ldef.t4file.data == NULL) {
839 printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n"); 841 printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
842 capi_ctr_put(card);
840 return -EINVAL; 843 return -EINVAL;
841 } 844 }
842 845
@@ -849,6 +852,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
849 852
850 if (card->cardstate != CARD_DETECTED) { 853 if (card->cardstate != CARD_DETECTED) {
851 printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr); 854 printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
855 capi_ctr_put(card);
852 return -EBUSY; 856 return -EBUSY;
853 } 857 }
854 card->cardstate = CARD_LOADING; 858 card->cardstate = CARD_LOADING;
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
index fd2c9be6d849..5783d22a18fe 100644
--- a/drivers/isdn/hardware/mISDN/hfc_pci.h
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -183,8 +183,8 @@
183#define D_FREG_MASK 0xF 183#define D_FREG_MASK 0xF
184 184
185struct zt { 185struct zt {
186 unsigned short z1; /* Z1 pointer 16 Bit */ 186 __le16 z1; /* Z1 pointer 16 Bit */
187 unsigned short z2; /* Z2 pointer 16 Bit */ 187 __le16 z2; /* Z2 pointer 16 Bit */
188}; 188};
189 189
190struct dfifo { 190struct dfifo {
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 9cf5edbb1a9b..cd8302af40eb 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43module_param(debug, uint, 0); 43module_param(debug, uint, 0);
44 44
45static LIST_HEAD(HFClist); 45static LIST_HEAD(HFClist);
46DEFINE_RWLOCK(HFClock); 46static DEFINE_RWLOCK(HFClock);
47 47
48enum { 48enum {
49 HFC_CCD_2BD0, 49 HFC_CCD_2BD0,
@@ -88,7 +88,7 @@ struct hfcPCI_hw {
88 unsigned char bswapped; 88 unsigned char bswapped;
89 unsigned char protocol; 89 unsigned char protocol;
90 int nt_timer; 90 int nt_timer;
91 unsigned char *pci_io; /* start of PCI IO memory */ 91 unsigned char __iomem *pci_io; /* start of PCI IO memory */
92 dma_addr_t dmahandle; 92 dma_addr_t dmahandle;
93 void *fifos; /* FIFO memory */ 93 void *fifos; /* FIFO memory */
94 int last_bfifo_cnt[2]; 94 int last_bfifo_cnt[2];
@@ -153,7 +153,7 @@ release_io_hfcpci(struct hfc_pci *hc)
153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0); 153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
154 del_timer(&hc->hw.timer); 154 del_timer(&hc->hw.timer);
155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle); 155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
156 iounmap((void *)hc->hw.pci_io); 156 iounmap(hc->hw.pci_io);
157} 157}
158 158
159/* 159/*
@@ -366,8 +366,7 @@ static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
366 bzt->f2 = MAX_B_FRAMES; 366 bzt->f2 = MAX_B_FRAMES;
367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */ 367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); 368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16( 369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
370 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
371 if (fifo_state) 370 if (fifo_state)
372 hc->hw.fifo_en |= fifo_state; 371 hc->hw.fifo_en |= fifo_state;
373 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 372 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
@@ -482,7 +481,7 @@ receive_dmsg(struct hfc_pci *hc)
482 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | 481 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
483 (MAX_D_FRAMES + 1); /* next buffer */ 482 (MAX_D_FRAMES + 1); /* next buffer */
484 df->za[df->f2 & D_FREG_MASK].z2 = 483 df->za[df->f2 & D_FREG_MASK].z2 =
485 cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1)); 484 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1));
486 } else { 485 } else {
487 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC); 486 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
488 if (!dch->rx_skb) { 487 if (!dch->rx_skb) {
@@ -523,10 +522,10 @@ receive_dmsg(struct hfc_pci *hc)
523/* 522/*
524 * check for transparent receive data and read max one threshold size if avail 523 * check for transparent receive data and read max one threshold size if avail
525 */ 524 */
526int 525static int
527hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) 526hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
528{ 527{
529 unsigned short *z1r, *z2r; 528 __le16 *z1r, *z2r;
530 int new_z2, fcnt, maxlen; 529 int new_z2, fcnt, maxlen;
531 u_char *ptr, *ptr1; 530 u_char *ptr, *ptr1;
532 531
@@ -576,7 +575,7 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
576/* 575/*
577 * B-channel main receive routine 576 * B-channel main receive routine
578 */ 577 */
579void 578static void
580main_rec_hfcpci(struct bchannel *bch) 579main_rec_hfcpci(struct bchannel *bch)
581{ 580{
582 struct hfc_pci *hc = bch->hw; 581 struct hfc_pci *hc = bch->hw;
@@ -724,7 +723,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
724 struct bzfifo *bz; 723 struct bzfifo *bz;
725 u_char *bdata; 724 u_char *bdata;
726 u_char new_f1, *src, *dst; 725 u_char new_f1, *src, *dst;
727 unsigned short *z1t, *z2t; 726 __le16 *z1t, *z2t;
728 727
729 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 728 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
730 printk(KERN_DEBUG "%s\n", __func__); 729 printk(KERN_DEBUG "%s\n", __func__);
@@ -1679,7 +1678,7 @@ hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1679 * called for card init message 1678 * called for card init message
1680 */ 1679 */
1681 1680
1682void 1681static void
1683inithfcpci(struct hfc_pci *hc) 1682inithfcpci(struct hfc_pci *hc)
1684{ 1683{
1685 printk(KERN_DEBUG "inithfcpci: entered\n"); 1684 printk(KERN_DEBUG "inithfcpci: entered\n");
@@ -1966,7 +1965,7 @@ setup_hw(struct hfc_pci *hc)
1966 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); 1965 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1967 return 1; 1966 return 1;
1968 } 1967 }
1969 hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start; 1968 hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
1970 1969
1971 if (!hc->hw.pci_io) { 1970 if (!hc->hw.pci_io) {
1972 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n"); 1971 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 127cfdad68e7..77c280ef2eb6 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1533,8 +1533,10 @@ static int isdn_ppp_mp_bundle_array_init(void)
1533 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); 1533 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
1534 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) 1534 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
1535 return -ENOMEM; 1535 return -ENOMEM;
1536 for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) 1536 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
1537 spin_lock_init(&isdn_ppp_bundle_arr[i].lock); 1537 spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
1538 skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
1539 }
1538 return 0; 1540 return 0;
1539} 1541}
1540 1542
@@ -1567,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1567 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) 1569 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
1568 return -ENOMEM; 1570 return -ENOMEM;
1569 lp->next = lp->last = lp; /* nobody else in a queue */ 1571 lp->next = lp->last = lp; /* nobody else in a queue */
1570 lp->netdev->pb->frags = NULL; 1572 skb_queue_head_init(&lp->netdev->pb->frags);
1571 lp->netdev->pb->frames = 0; 1573 lp->netdev->pb->frames = 0;
1572 lp->netdev->pb->seq = UINT_MAX; 1574 lp->netdev->pb->seq = UINT_MAX;
1573 } 1575 }
@@ -1579,28 +1581,29 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1579 1581
1580static u32 isdn_ppp_mp_get_seq( int short_seq, 1582static u32 isdn_ppp_mp_get_seq( int short_seq,
1581 struct sk_buff * skb, u32 last_seq ); 1583 struct sk_buff * skb, u32 last_seq );
1582static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, 1584static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
1583 struct sk_buff * from, struct sk_buff * to ); 1585 struct sk_buff *to);
1584static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, 1586static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1585 struct sk_buff * from, struct sk_buff * to ); 1587 struct sk_buff *from, struct sk_buff *to,
1586static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb ); 1588 u32 lastseq);
1589static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
1587static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); 1590static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
1588 1591
1589static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 1592static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1590 struct sk_buff *skb) 1593 struct sk_buff *skb)
1591{ 1594{
1592 struct ippp_struct *is; 1595 struct sk_buff *newfrag, *frag, *start, *nextf;
1593 isdn_net_local * lpq;
1594 ippp_bundle * mp;
1595 isdn_mppp_stats * stats;
1596 struct sk_buff * newfrag, * frag, * start, *nextf;
1597 u32 newseq, minseq, thisseq; 1596 u32 newseq, minseq, thisseq;
1597 isdn_mppp_stats *stats;
1598 struct ippp_struct *is;
1598 unsigned long flags; 1599 unsigned long flags;
1600 isdn_net_local *lpq;
1601 ippp_bundle *mp;
1599 int slot; 1602 int slot;
1600 1603
1601 spin_lock_irqsave(&net_dev->pb->lock, flags); 1604 spin_lock_irqsave(&net_dev->pb->lock, flags);
1602 mp = net_dev->pb; 1605 mp = net_dev->pb;
1603 stats = &mp->stats; 1606 stats = &mp->stats;
1604 slot = lp->ppp_slot; 1607 slot = lp->ppp_slot;
1605 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 1608 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
1606 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", 1609 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1611,20 +1614,19 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1611 return; 1614 return;
1612 } 1615 }
1613 is = ippp_table[slot]; 1616 is = ippp_table[slot];
1614 if( ++mp->frames > stats->max_queue_len ) 1617 if (++mp->frames > stats->max_queue_len)
1615 stats->max_queue_len = mp->frames; 1618 stats->max_queue_len = mp->frames;
1616 1619
1617 if (is->debug & 0x8) 1620 if (is->debug & 0x8)
1618 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); 1621 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
1619 1622
1620 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 1623 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
1621 skb, is->last_link_seqno); 1624 skb, is->last_link_seqno);
1622
1623 1625
1624 /* if this packet seq # is less than last already processed one, 1626 /* if this packet seq # is less than last already processed one,
1625 * toss it right away, but check for sequence start case first 1627 * toss it right away, but check for sequence start case first
1626 */ 1628 */
1627 if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { 1629 if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
1628 mp->seq = newseq; /* the first packet: required for 1630 mp->seq = newseq; /* the first packet: required for
1629 * rfc1990 non-compliant clients -- 1631 * rfc1990 non-compliant clients --
1630 * prevents constant packet toss */ 1632 * prevents constant packet toss */
@@ -1634,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1634 spin_unlock_irqrestore(&mp->lock, flags); 1636 spin_unlock_irqrestore(&mp->lock, flags);
1635 return; 1637 return;
1636 } 1638 }
1637 1639
1638 /* find the minimum received sequence number over all links */ 1640 /* find the minimum received sequence number over all links */
1639 is->last_link_seqno = minseq = newseq; 1641 is->last_link_seqno = minseq = newseq;
1640 for (lpq = net_dev->queue;;) { 1642 for (lpq = net_dev->queue;;) {
@@ -1655,22 +1657,31 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1655 * packets */ 1657 * packets */
1656 newfrag = skb; 1658 newfrag = skb;
1657 1659
1658 /* if this new fragment is before the first one, then enqueue it now. */ 1660 /* Insert new fragment into the proper sequence slot. */
1659 if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { 1661 skb_queue_walk(&mp->frags, frag) {
1660 newfrag->next = frag; 1662 if (MP_SEQ(frag) == newseq) {
1661 mp->frags = frag = newfrag; 1663 isdn_ppp_mp_free_skb(mp, newfrag);
1662 newfrag = NULL; 1664 newfrag = NULL;
1663 } 1665 break;
1666 }
1667 if (MP_LT(newseq, MP_SEQ(frag))) {
1668 __skb_queue_before(&mp->frags, frag, newfrag);
1669 newfrag = NULL;
1670 break;
1671 }
1672 }
1673 if (newfrag)
1674 __skb_queue_tail(&mp->frags, newfrag);
1664 1675
1665 start = MP_FLAGS(frag) & MP_BEGIN_FRAG && 1676 frag = skb_peek(&mp->frags);
1666 MP_SEQ(frag) == mp->seq ? frag : NULL; 1677 start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) &&
1678 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
1679 if (!start)
1680 goto check_overflow;
1667 1681
1668 /* 1682 /* main fragment traversing loop
1669 * main fragment traversing loop
1670 * 1683 *
1671 * try to accomplish several tasks: 1684 * try to accomplish several tasks:
1672 * - insert new fragment into the proper sequence slot (once that's done
1673 * newfrag will be set to NULL)
1674 * - reassemble any complete fragment sequence (non-null 'start' 1685 * - reassemble any complete fragment sequence (non-null 'start'
1675 * indicates there is a continguous sequence present) 1686 * indicates there is a continguous sequence present)
1676 * - discard any incomplete sequences that are below minseq -- due 1687 * - discard any incomplete sequences that are below minseq -- due
@@ -1679,71 +1690,46 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1679 * come to complete such sequence and it should be discarded 1690 * come to complete such sequence and it should be discarded
1680 * 1691 *
1681 * loop completes when we accomplished the following tasks: 1692 * loop completes when we accomplished the following tasks:
1682 * - new fragment is inserted in the proper sequence ('newfrag' is
1683 * set to NULL)
1684 * - we hit a gap in the sequence, so no reassembly/processing is 1693 * - we hit a gap in the sequence, so no reassembly/processing is
1685 * possible ('start' would be set to NULL) 1694 * possible ('start' would be set to NULL)
1686 * 1695 *
1687 * algorithm for this code is derived from code in the book 1696 * algorithm for this code is derived from code in the book
1688 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) 1697 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
1689 */ 1698 */
1690 while (start != NULL || newfrag != NULL) { 1699 skb_queue_walk_safe(&mp->frags, frag, nextf) {
1691 1700 thisseq = MP_SEQ(frag);
1692 thisseq = MP_SEQ(frag); 1701
1693 nextf = frag->next; 1702 /* check for misplaced start */
1694 1703 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1695 /* drop any duplicate fragments */ 1704 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1696 if (newfrag != NULL && thisseq == newseq) { 1705 "BEGIN flag with no prior END", thisseq);
1697 isdn_ppp_mp_free_skb(mp, newfrag); 1706 stats->seqerrs++;
1698 newfrag = NULL; 1707 stats->frame_drops++;
1699 } 1708 isdn_ppp_mp_discard(mp, start, frag);
1700 1709 start = frag;
1701 /* insert new fragment before next element if possible. */ 1710 } else if (MP_LE(thisseq, minseq)) {
1702 if (newfrag != NULL && (nextf == NULL || 1711 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1703 MP_LT(newseq, MP_SEQ(nextf)))) {
1704 newfrag->next = nextf;
1705 frag->next = nextf = newfrag;
1706 newfrag = NULL;
1707 }
1708
1709 if (start != NULL) {
1710 /* check for misplaced start */
1711 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1712 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1713 "BEGIN flag with no prior END", thisseq);
1714 stats->seqerrs++;
1715 stats->frame_drops++;
1716 start = isdn_ppp_mp_discard(mp, start,frag);
1717 nextf = frag->next;
1718 }
1719 } else if (MP_LE(thisseq, minseq)) {
1720 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1721 start = frag; 1712 start = frag;
1722 else { 1713 else {
1723 if (MP_FLAGS(frag) & MP_END_FRAG) 1714 if (MP_FLAGS(frag) & MP_END_FRAG)
1724 stats->frame_drops++; 1715 stats->frame_drops++;
1725 if( mp->frags == frag ) 1716 __skb_unlink(skb, &mp->frags);
1726 mp->frags = nextf;
1727 isdn_ppp_mp_free_skb(mp, frag); 1717 isdn_ppp_mp_free_skb(mp, frag);
1728 frag = nextf;
1729 continue; 1718 continue;
1730 } 1719 }
1731 } 1720 }
1732 1721
1733 /* if start is non-null and we have end fragment, then 1722 /* if we have end fragment, then we have full reassembly
1734 * we have full reassembly sequence -- reassemble 1723 * sequence -- reassemble and process packet now
1735 * and process packet now
1736 */ 1724 */
1737 if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { 1725 if (MP_FLAGS(frag) & MP_END_FRAG) {
1738 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; 1726 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
1739 /* Reassemble the packet then dispatch it */ 1727 /* Reassemble the packet then dispatch it */
1740 isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); 1728 isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq);
1741
1742 start = NULL;
1743 frag = NULL;
1744 1729
1745 mp->frags = nextf; 1730 start = NULL;
1746 } 1731 frag = NULL;
1732 }
1747 1733
1748 /* check if need to update start pointer: if we just 1734 /* check if need to update start pointer: if we just
1749 * reassembled the packet and sequence is contiguous 1735 * reassembled the packet and sequence is contiguous
@@ -1754,26 +1740,25 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1754 * below low watermark and set start to the next frag or 1740 * below low watermark and set start to the next frag or
1755 * clear start ptr. 1741 * clear start ptr.
1756 */ 1742 */
1757 if (nextf != NULL && 1743 if (nextf != (struct sk_buff *)&mp->frags &&
1758 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { 1744 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
1759 /* if we just reassembled and the next one is here, 1745 /* if we just reassembled and the next one is here,
1760 * then start another reassembly. */ 1746 * then start another reassembly.
1761 1747 */
1762 if (frag == NULL) { 1748 if (frag == NULL) {
1763 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) 1749 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
1764 start = nextf; 1750 start = nextf;
1765 else 1751 else {
1766 { 1752 printk(KERN_WARNING"isdn_mppp(seq %d):"
1767 printk(KERN_WARNING"isdn_mppp(seq %d):" 1753 " END flag with no following "
1768 " END flag with no following " 1754 "BEGIN", thisseq);
1769 "BEGIN", thisseq);
1770 stats->seqerrs++; 1755 stats->seqerrs++;
1771 } 1756 }
1772 } 1757 }
1773 1758 } else {
1774 } else { 1759 if (nextf != (struct sk_buff *)&mp->frags &&
1775 if ( nextf != NULL && frag != NULL && 1760 frag != NULL &&
1776 MP_LT(thisseq, minseq)) { 1761 MP_LT(thisseq, minseq)) {
1777 /* we've got a break in the sequence 1762 /* we've got a break in the sequence
1778 * and we not at the end yet 1763 * and we not at the end yet
1779 * and we did not just reassembled 1764 * and we did not just reassembled
@@ -1782,41 +1767,39 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1782 * discard all the frames below low watermark 1767 * discard all the frames below low watermark
1783 * and start over */ 1768 * and start over */
1784 stats->frame_drops++; 1769 stats->frame_drops++;
1785 mp->frags = isdn_ppp_mp_discard(mp,start,nextf); 1770 isdn_ppp_mp_discard(mp, start, nextf);
1786 } 1771 }
1787 /* break in the sequence, no reassembly */ 1772 /* break in the sequence, no reassembly */
1788 start = NULL; 1773 start = NULL;
1789 } 1774 }
1790 1775 if (!start)
1791 frag = nextf; 1776 break;
1792 } /* while -- main loop */ 1777 }
1793 1778
1794 if (mp->frags == NULL) 1779check_overflow:
1795 mp->frags = frag;
1796
1797 /* rather straighforward way to deal with (not very) possible 1780 /* rather straighforward way to deal with (not very) possible
1798 * queue overflow */ 1781 * queue overflow
1782 */
1799 if (mp->frames > MP_MAX_QUEUE_LEN) { 1783 if (mp->frames > MP_MAX_QUEUE_LEN) {
1800 stats->overflows++; 1784 stats->overflows++;
1801 while (mp->frames > MP_MAX_QUEUE_LEN) { 1785 skb_queue_walk_safe(&mp->frags, frag, nextf) {
1802 frag = mp->frags->next; 1786 if (mp->frames <= MP_MAX_QUEUE_LEN)
1803 isdn_ppp_mp_free_skb(mp, mp->frags); 1787 break;
1804 mp->frags = frag; 1788 __skb_unlink(frag, &mp->frags);
1789 isdn_ppp_mp_free_skb(mp, frag);
1805 } 1790 }
1806 } 1791 }
1807 spin_unlock_irqrestore(&mp->lock, flags); 1792 spin_unlock_irqrestore(&mp->lock, flags);
1808} 1793}
1809 1794
1810static void isdn_ppp_mp_cleanup( isdn_net_local * lp ) 1795static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
1811{ 1796{
1812 struct sk_buff * frag = lp->netdev->pb->frags; 1797 struct sk_buff *skb, *tmp;
1813 struct sk_buff * nextfrag; 1798
1814 while( frag ) { 1799 skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) {
1815 nextfrag = frag->next; 1800 __skb_unlink(skb, &lp->netdev->pb->frags);
1816 isdn_ppp_mp_free_skb(lp->netdev->pb, frag); 1801 isdn_ppp_mp_free_skb(lp->netdev->pb, skb);
1817 frag = nextfrag; 1802 }
1818 }
1819 lp->netdev->pb->frags = NULL;
1820} 1803}
1821 1804
1822static u32 isdn_ppp_mp_get_seq( int short_seq, 1805static u32 isdn_ppp_mp_get_seq( int short_seq,
@@ -1853,72 +1836,115 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
1853 return seq; 1836 return seq;
1854} 1837}
1855 1838
1856struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, 1839static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
1857 struct sk_buff * from, struct sk_buff * to ) 1840 struct sk_buff *to)
1858{ 1841{
1859 if( from ) 1842 if (from) {
1860 while (from != to) { 1843 struct sk_buff *skb, *tmp;
1861 struct sk_buff * next = from->next; 1844 int freeing = 0;
1862 isdn_ppp_mp_free_skb(mp, from); 1845
1863 from = next; 1846 skb_queue_walk_safe(&mp->frags, skb, tmp) {
1847 if (skb == to)
1848 break;
1849 if (skb == from)
1850 freeing = 1;
1851 if (!freeing)
1852 continue;
1853 __skb_unlink(skb, &mp->frags);
1854 isdn_ppp_mp_free_skb(mp, skb);
1864 } 1855 }
1865 return from; 1856 }
1866} 1857}
1867 1858
1868void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, 1859static unsigned int calc_tot_len(struct sk_buff_head *queue,
1869 struct sk_buff * from, struct sk_buff * to ) 1860 struct sk_buff *from, struct sk_buff *to)
1870{ 1861{
1871 ippp_bundle * mp = net_dev->pb; 1862 unsigned int tot_len = 0;
1872 int proto; 1863 struct sk_buff *skb;
1873 struct sk_buff * skb; 1864 int found_start = 0;
1865
1866 skb_queue_walk(queue, skb) {
1867 if (skb == from)
1868 found_start = 1;
1869 if (!found_start)
1870 continue;
1871 tot_len += skb->len - MP_HEADER_LEN;
1872 if (skb == to)
1873 break;
1874 }
1875 return tot_len;
1876}
1877
1878/* Reassemble packet using fragments in the reassembly queue from
1879 * 'from' until 'to', inclusive.
1880 */
1881static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1882 struct sk_buff *from, struct sk_buff *to,
1883 u32 lastseq)
1884{
1885 ippp_bundle *mp = net_dev->pb;
1874 unsigned int tot_len; 1886 unsigned int tot_len;
1887 struct sk_buff *skb;
1888 int proto;
1875 1889
1876 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { 1890 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
1877 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", 1891 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
1878 __func__, lp->ppp_slot); 1892 __func__, lp->ppp_slot);
1879 return; 1893 return;
1880 } 1894 }
1881 if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) { 1895
1882 if( ippp_table[lp->ppp_slot]->debug & 0x40 ) 1896 tot_len = calc_tot_len(&mp->frags, from, to);
1897
1898 if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
1899 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1883 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " 1900 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
1884 "len %d\n", MP_SEQ(from), from->len ); 1901 "len %d\n", MP_SEQ(from), from->len);
1885 skb = from; 1902 skb = from;
1886 skb_pull(skb, MP_HEADER_LEN); 1903 skb_pull(skb, MP_HEADER_LEN);
1904 __skb_unlink(skb, &mp->frags);
1887 mp->frames--; 1905 mp->frames--;
1888 } else { 1906 } else {
1889 struct sk_buff * frag; 1907 struct sk_buff *walk, *tmp;
1890 int n; 1908 int found_start = 0;
1891 1909
1892 for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++) 1910 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1893 tot_len += frag->len - MP_HEADER_LEN;
1894
1895 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1896 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " 1911 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
1897 "to %d, len %d\n", MP_SEQ(from), 1912 "to %d, len %d\n", MP_SEQ(from), lastseq,
1898 (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len ); 1913 tot_len);
1899 if( (skb = dev_alloc_skb(tot_len)) == NULL ) { 1914
1915 skb = dev_alloc_skb(tot_len);
1916 if (!skb)
1900 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " 1917 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
1901 "of size %d\n", tot_len); 1918 "of size %d\n", tot_len);
1902 isdn_ppp_mp_discard(mp, from, to); 1919
1903 return; 1920 found_start = 0;
1904 } 1921 skb_queue_walk_safe(&mp->frags, walk, tmp) {
1922 if (walk == from)
1923 found_start = 1;
1924 if (!found_start)
1925 continue;
1905 1926
1906 while( from != to ) { 1927 if (skb) {
1907 unsigned int len = from->len - MP_HEADER_LEN; 1928 unsigned int len = walk->len - MP_HEADER_LEN;
1929 skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
1930 skb_put(skb, len),
1931 len);
1932 }
1933 __skb_unlink(walk, &mp->frags);
1934 isdn_ppp_mp_free_skb(mp, walk);
1908 1935
1909 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, 1936 if (walk == to)
1910 skb_put(skb,len), 1937 break;
1911 len);
1912 frag = from->next;
1913 isdn_ppp_mp_free_skb(mp, from);
1914 from = frag;
1915 } 1938 }
1916 } 1939 }
1940 if (!skb)
1941 return;
1942
1917 proto = isdn_ppp_strip_proto(skb); 1943 proto = isdn_ppp_strip_proto(skb);
1918 isdn_ppp_push_higher(net_dev, lp, skb, proto); 1944 isdn_ppp_push_higher(net_dev, lp, skb, proto);
1919} 1945}
1920 1946
1921static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb) 1947static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
1922{ 1948{
1923 dev_kfree_skb(skb); 1949 dev_kfree_skb(skb);
1924 mp->frames--; 1950 mp->frames--;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index b5fabc7019d8..e7462924b505 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -124,18 +124,6 @@ mISDN_read(struct file *filep, char *buf, size_t count, loff_t *off)
124 return ret; 124 return ret;
125} 125}
126 126
127static loff_t
128mISDN_llseek(struct file *filep, loff_t offset, int orig)
129{
130 return -ESPIPE;
131}
132
133static ssize_t
134mISDN_write(struct file *filep, const char *buf, size_t count, loff_t *off)
135{
136 return -EOPNOTSUPP;
137}
138
139static unsigned int 127static unsigned int
140mISDN_poll(struct file *filep, poll_table *wait) 128mISDN_poll(struct file *filep, poll_table *wait)
141{ 129{
@@ -157,8 +145,9 @@ mISDN_poll(struct file *filep, poll_table *wait)
157} 145}
158 146
159static void 147static void
160dev_expire_timer(struct mISDNtimer *timer) 148dev_expire_timer(unsigned long data)
161{ 149{
150 struct mISDNtimer *timer = (void *)data;
162 u_long flags; 151 u_long flags;
163 152
164 spin_lock_irqsave(&timer->dev->lock, flags); 153 spin_lock_irqsave(&timer->dev->lock, flags);
@@ -191,7 +180,7 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
191 spin_unlock_irqrestore(&dev->lock, flags); 180 spin_unlock_irqrestore(&dev->lock, flags);
192 timer->dev = dev; 181 timer->dev = dev;
193 timer->tl.data = (long)timer; 182 timer->tl.data = (long)timer;
194 timer->tl.function = (void *) dev_expire_timer; 183 timer->tl.function = dev_expire_timer;
195 init_timer(&timer->tl); 184 init_timer(&timer->tl);
196 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); 185 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
197 add_timer(&timer->tl); 186 add_timer(&timer->tl);
@@ -211,6 +200,9 @@ misdn_del_timer(struct mISDNtimerdev *dev, int id)
211 list_for_each_entry(timer, &dev->pending, list) { 200 list_for_each_entry(timer, &dev->pending, list) {
212 if (timer->id == id) { 201 if (timer->id == id) {
213 list_del_init(&timer->list); 202 list_del_init(&timer->list);
203 /* RED-PEN AK: race -- timer can be still running on
204 * other CPU. Needs reference count I think
205 */
214 del_timer(&timer->tl); 206 del_timer(&timer->tl);
215 ret = timer->id; 207 ret = timer->id;
216 kfree(timer); 208 kfree(timer);
@@ -268,9 +260,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
268} 260}
269 261
270static struct file_operations mISDN_fops = { 262static struct file_operations mISDN_fops = {
271 .llseek = mISDN_llseek,
272 .read = mISDN_read, 263 .read = mISDN_read,
273 .write = mISDN_write,
274 .poll = mISDN_poll, 264 .poll = mISDN_poll,
275 .ioctl = mISDN_ioctl, 265 .ioctl = mISDN_ioctl,
276 .open = mISDN_open, 266 .open = mISDN_open,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7e65bad522cb..ac89a5deaca2 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -238,15 +238,47 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
238 238
239} 239}
240 240
241static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
242{
243 /* Iterate the disks of an mddev, using rcu to protect access to the
244 * linked list, and raising the refcount of devices we return to ensure
245 * they don't disappear while in use.
246 * As devices are only added or removed when raid_disk is < 0 and
247 * nr_pending is 0 and In_sync is clear, the entries we return will
248 * still be in the same position on the list when we re-enter
249 * list_for_each_continue_rcu.
250 */
251 struct list_head *pos;
252 rcu_read_lock();
253 if (rdev == NULL)
254 /* start at the beginning */
255 pos = &mddev->disks;
256 else {
257 /* release the previous rdev and start from there. */
258 rdev_dec_pending(rdev, mddev);
259 pos = &rdev->same_set;
260 }
261 list_for_each_continue_rcu(pos, &mddev->disks) {
262 rdev = list_entry(pos, mdk_rdev_t, same_set);
263 if (rdev->raid_disk >= 0 &&
264 test_bit(In_sync, &rdev->flags) &&
265 !test_bit(Faulty, &rdev->flags)) {
266 /* this is a usable devices */
267 atomic_inc(&rdev->nr_pending);
268 rcu_read_unlock();
269 return rdev;
270 }
271 }
272 rcu_read_unlock();
273 return NULL;
274}
275
241static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) 276static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
242{ 277{
243 mdk_rdev_t *rdev; 278 mdk_rdev_t *rdev = NULL;
244 mddev_t *mddev = bitmap->mddev; 279 mddev_t *mddev = bitmap->mddev;
245 280
246 rcu_read_lock(); 281 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
247 rdev_for_each_rcu(rdev, mddev)
248 if (test_bit(In_sync, &rdev->flags)
249 && !test_bit(Faulty, &rdev->flags)) {
250 int size = PAGE_SIZE; 282 int size = PAGE_SIZE;
251 if (page->index == bitmap->file_pages-1) 283 if (page->index == bitmap->file_pages-1)
252 size = roundup(bitmap->last_page_size, 284 size = roundup(bitmap->last_page_size,
@@ -281,8 +313,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
281 + page->index * (PAGE_SIZE/512), 313 + page->index * (PAGE_SIZE/512),
282 size, 314 size,
283 page); 315 page);
284 } 316 }
285 rcu_read_unlock();
286 317
287 if (wait) 318 if (wait)
288 md_super_wait(mddev); 319 md_super_wait(mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cfadc5bd2ba..4790c83d78d0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3841,8 +3841,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
3841 3841
3842 del_timer_sync(&mddev->safemode_timer); 3842 del_timer_sync(&mddev->safemode_timer);
3843 3843
3844 invalidate_partition(disk, 0);
3845
3846 switch(mode) { 3844 switch(mode) {
3847 case 1: /* readonly */ 3845 case 1: /* readonly */
3848 err = -ENXIO; 3846 err = -ENXIO;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index e8bc7abf2409..99be9e5c85f7 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1068,7 +1068,7 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1068 { 1068 {
1069 v4l2_std_id *id = arg; 1069 v4l2_std_id *id = arg;
1070 int found = 0; 1070 int found = 0;
1071 int i, err; 1071 int i;
1072 1072
1073 DEB_EE(("VIDIOC_S_STD\n")); 1073 DEB_EE(("VIDIOC_S_STD\n"));
1074 1074
@@ -1116,7 +1116,6 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1116 case VIDIOC_OVERLAY: 1116 case VIDIOC_OVERLAY:
1117 { 1117 {
1118 int on = *(int *)arg; 1118 int on = *(int *)arg;
1119 int err = 0;
1120 1119
1121 DEB_D(("VIDIOC_OVERLAY on:%d\n",on)); 1120 DEB_D(("VIDIOC_OVERLAY on:%d\n",on));
1122 if (on != 0) { 1121 if (on != 0) {
@@ -1192,7 +1191,6 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1192 case VIDIOCGMBUF: 1191 case VIDIOCGMBUF:
1193 { 1192 {
1194 struct video_mbuf *mbuf = arg; 1193 struct video_mbuf *mbuf = arg;
1195 struct videobuf_queue *q;
1196 int i; 1194 int i;
1197 1195
1198 /* fixme: number of capture buffers and sizes for v4l apps */ 1196 /* fixme: number of capture buffers and sizes for v4l apps */
diff --git a/drivers/media/common/tuners/mt2131.c b/drivers/media/common/tuners/mt2131.c
index e254bcfc2efb..e8d3c48f8605 100644
--- a/drivers/media/common/tuners/mt2131.c
+++ b/drivers/media/common/tuners/mt2131.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner" 2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/mt2131.h b/drivers/media/common/tuners/mt2131.h
index cd8376f6f7b4..6632de640df0 100644
--- a/drivers/media/common/tuners/mt2131.h
+++ b/drivers/media/common/tuners/mt2131.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner" 2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/mt2131_priv.h b/drivers/media/common/tuners/mt2131_priv.h
index e930759c2c00..4e05a67e88c1 100644
--- a/drivers/media/common/tuners/mt2131_priv.h
+++ b/drivers/media/common/tuners/mt2131_priv.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner" 2 * Driver for Microtune MT2131 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/mxl5005s.c b/drivers/media/common/tuners/mxl5005s.c
index 0dc2bef9f6a3..227642b044ae 100644
--- a/drivers/media/common/tuners/mxl5005s.c
+++ b/drivers/media/common/tuners/mxl5005s.c
@@ -2,7 +2,7 @@
2 MaxLinear MXL5005S VSB/QAM/DVBT tuner driver 2 MaxLinear MXL5005S VSB/QAM/DVBT tuner driver
3 3
4 Copyright (C) 2008 MaxLinear 4 Copyright (C) 2008 MaxLinear
5 Copyright (C) 2006 Steven Toth <stoth@hauppauge.com> 5 Copyright (C) 2006 Steven Toth <stoth@linuxtv.org>
6 Functions: 6 Functions:
7 mxl5005s_reset() 7 mxl5005s_reset()
8 mxl5005s_writereg() 8 mxl5005s_writereg()
@@ -3837,7 +3837,7 @@ static u16 MXL_Hystersis_Test(struct dvb_frontend *fe, int Hystersis)
3837/* ---------------------------------------------------------------- 3837/* ----------------------------------------------------------------
3838 * Begin: Everything after here is new code to adapt the 3838 * Begin: Everything after here is new code to adapt the
3839 * proprietary Realtek driver into a Linux API tuner. 3839 * proprietary Realtek driver into a Linux API tuner.
3840 * Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 3840 * Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
3841 */ 3841 */
3842static int mxl5005s_reset(struct dvb_frontend *fe) 3842static int mxl5005s_reset(struct dvb_frontend *fe)
3843{ 3843{
diff --git a/drivers/media/common/tuners/mxl5005s.h b/drivers/media/common/tuners/mxl5005s.h
index 396db150bf0c..7ac6815b30aa 100644
--- a/drivers/media/common/tuners/mxl5005s.h
+++ b/drivers/media/common/tuners/mxl5005s.h
@@ -2,7 +2,7 @@
2 MaxLinear MXL5005S VSB/QAM/DVBT tuner driver 2 MaxLinear MXL5005S VSB/QAM/DVBT tuner driver
3 3
4 Copyright (C) 2008 MaxLinear 4 Copyright (C) 2008 MaxLinear
5 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 5 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
6 6
7 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index 597e47f5d69c..aa773a658a2a 100644
--- a/drivers/media/common/tuners/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
@@ -253,7 +253,7 @@ static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
253 253
254static int simple_config_lookup(struct dvb_frontend *fe, 254static int simple_config_lookup(struct dvb_frontend *fe,
255 struct tuner_params *t_params, 255 struct tuner_params *t_params,
256 int *frequency, u8 *config, u8 *cb) 256 unsigned *frequency, u8 *config, u8 *cb)
257{ 257{
258 struct tuner_simple_priv *priv = fe->tuner_priv; 258 struct tuner_simple_priv *priv = fe->tuner_priv;
259 int i; 259 int i;
@@ -587,45 +587,45 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
587 priv->last_div = div; 587 priv->last_div = div;
588 if (t_params->has_tda9887) { 588 if (t_params->has_tda9887) {
589 struct v4l2_priv_tun_config tda9887_cfg; 589 struct v4l2_priv_tun_config tda9887_cfg;
590 int config = 0; 590 int tda_config = 0;
591 int is_secam_l = (params->std & (V4L2_STD_SECAM_L | 591 int is_secam_l = (params->std & (V4L2_STD_SECAM_L |
592 V4L2_STD_SECAM_LC)) && 592 V4L2_STD_SECAM_LC)) &&
593 !(params->std & ~(V4L2_STD_SECAM_L | 593 !(params->std & ~(V4L2_STD_SECAM_L |
594 V4L2_STD_SECAM_LC)); 594 V4L2_STD_SECAM_LC));
595 595
596 tda9887_cfg.tuner = TUNER_TDA9887; 596 tda9887_cfg.tuner = TUNER_TDA9887;
597 tda9887_cfg.priv = &config; 597 tda9887_cfg.priv = &tda_config;
598 598
599 if (params->std == V4L2_STD_SECAM_LC) { 599 if (params->std == V4L2_STD_SECAM_LC) {
600 if (t_params->port1_active ^ t_params->port1_invert_for_secam_lc) 600 if (t_params->port1_active ^ t_params->port1_invert_for_secam_lc)
601 config |= TDA9887_PORT1_ACTIVE; 601 tda_config |= TDA9887_PORT1_ACTIVE;
602 if (t_params->port2_active ^ t_params->port2_invert_for_secam_lc) 602 if (t_params->port2_active ^ t_params->port2_invert_for_secam_lc)
603 config |= TDA9887_PORT2_ACTIVE; 603 tda_config |= TDA9887_PORT2_ACTIVE;
604 } else { 604 } else {
605 if (t_params->port1_active) 605 if (t_params->port1_active)
606 config |= TDA9887_PORT1_ACTIVE; 606 tda_config |= TDA9887_PORT1_ACTIVE;
607 if (t_params->port2_active) 607 if (t_params->port2_active)
608 config |= TDA9887_PORT2_ACTIVE; 608 tda_config |= TDA9887_PORT2_ACTIVE;
609 } 609 }
610 if (t_params->intercarrier_mode) 610 if (t_params->intercarrier_mode)
611 config |= TDA9887_INTERCARRIER; 611 tda_config |= TDA9887_INTERCARRIER;
612 if (is_secam_l) { 612 if (is_secam_l) {
613 if (i == 0 && t_params->default_top_secam_low) 613 if (i == 0 && t_params->default_top_secam_low)
614 config |= TDA9887_TOP(t_params->default_top_secam_low); 614 tda_config |= TDA9887_TOP(t_params->default_top_secam_low);
615 else if (i == 1 && t_params->default_top_secam_mid) 615 else if (i == 1 && t_params->default_top_secam_mid)
616 config |= TDA9887_TOP(t_params->default_top_secam_mid); 616 tda_config |= TDA9887_TOP(t_params->default_top_secam_mid);
617 else if (t_params->default_top_secam_high) 617 else if (t_params->default_top_secam_high)
618 config |= TDA9887_TOP(t_params->default_top_secam_high); 618 tda_config |= TDA9887_TOP(t_params->default_top_secam_high);
619 } else { 619 } else {
620 if (i == 0 && t_params->default_top_low) 620 if (i == 0 && t_params->default_top_low)
621 config |= TDA9887_TOP(t_params->default_top_low); 621 tda_config |= TDA9887_TOP(t_params->default_top_low);
622 else if (i == 1 && t_params->default_top_mid) 622 else if (i == 1 && t_params->default_top_mid)
623 config |= TDA9887_TOP(t_params->default_top_mid); 623 tda_config |= TDA9887_TOP(t_params->default_top_mid);
624 else if (t_params->default_top_high) 624 else if (t_params->default_top_high)
625 config |= TDA9887_TOP(t_params->default_top_high); 625 tda_config |= TDA9887_TOP(t_params->default_top_high);
626 } 626 }
627 if (t_params->default_pll_gating_18) 627 if (t_params->default_pll_gating_18)
628 config |= TDA9887_GATING_18; 628 tda_config |= TDA9887_GATING_18;
629 i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG, 629 i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG,
630 &tda9887_cfg); 630 &tda9887_cfg);
631 } 631 }
@@ -813,7 +813,8 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
813 static struct tuner_params *t_params; 813 static struct tuner_params *t_params;
814 u8 config, cb; 814 u8 config, cb;
815 u32 div; 815 u32 div;
816 int ret, frequency = params->frequency / 62500; 816 int ret;
817 unsigned frequency = params->frequency / 62500;
817 818
818 t_params = simple_tuner_params(fe, TUNER_PARAM_TYPE_DIGITAL); 819 t_params = simple_tuner_params(fe, TUNER_PARAM_TYPE_DIGITAL);
819 ret = simple_config_lookup(fe, t_params, &frequency, &config, &cb); 820 ret = simple_config_lookup(fe, t_params, &frequency, &config, &cb);
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 5f99de0ad612..dcddfa803a75 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -2,7 +2,7 @@
2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner" 2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2007 Xceive Corporation 4 * Copyright (c) 2007 Xceive Corporation
5 * Copyright (c) 2007 Steven Toth <stoth@hauppauge.com> 5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index c910715addc9..5389f740945a 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner" 2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2007 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/common/tuners/xc5000_priv.h b/drivers/media/common/tuners/xc5000_priv.h
index a72a9887fe7f..b2a0074c99c9 100644
--- a/drivers/media/common/tuners/xc5000_priv.h
+++ b/drivers/media/common/tuners/xc5000_priv.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner" 2 * Driver for Xceive XC5000 "QAM/8VSB single chip tuner"
3 * 3 *
4 * Copyright (c) 2007 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index f9d087669d5d..4eed783f4bce 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -137,7 +137,8 @@ static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, un
137 flexcop_diseqc_send_byte(fe, 0xff); 137 flexcop_diseqc_send_byte(fe, 0xff);
138 else { 138 else {
139 flexcop_set_tone(fe, SEC_TONE_ON); 139 flexcop_set_tone(fe, SEC_TONE_ON);
140 udelay(12500); 140 mdelay(12);
141 udelay(500);
141 flexcop_set_tone(fe, SEC_TONE_OFF); 142 flexcop_set_tone(fe, SEC_TONE_OFF);
142 } 143 }
143 msleep(20); 144 msleep(20);
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index 55973eaf3711..43a112ec6d44 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -221,12 +221,12 @@ int flexcop_i2c_init(struct flexcop_device *fc)
221 fc->fc_i2c_adap[1].port = FC_I2C_PORT_EEPROM; 221 fc->fc_i2c_adap[1].port = FC_I2C_PORT_EEPROM;
222 fc->fc_i2c_adap[2].port = FC_I2C_PORT_TUNER; 222 fc->fc_i2c_adap[2].port = FC_I2C_PORT_TUNER;
223 223
224 strncpy(fc->fc_i2c_adap[0].i2c_adap.name, 224 strlcpy(fc->fc_i2c_adap[0].i2c_adap.name, "B2C2 FlexCop I2C to demod",
225 "B2C2 FlexCop I2C to demod", I2C_NAME_SIZE); 225 sizeof(fc->fc_i2c_adap[0].i2c_adap.name));
226 strncpy(fc->fc_i2c_adap[1].i2c_adap.name, 226 strlcpy(fc->fc_i2c_adap[1].i2c_adap.name, "B2C2 FlexCop I2C to eeprom",
227 "B2C2 FlexCop I2C to eeprom", I2C_NAME_SIZE); 227 sizeof(fc->fc_i2c_adap[1].i2c_adap.name));
228 strncpy(fc->fc_i2c_adap[2].i2c_adap.name, 228 strlcpy(fc->fc_i2c_adap[2].i2c_adap.name, "B2C2 FlexCop I2C to tuner",
229 "B2C2 FlexCop I2C to tuner", I2C_NAME_SIZE); 229 sizeof(fc->fc_i2c_adap[2].i2c_adap.name));
230 230
231 i2c_set_adapdata(&fc->fc_i2c_adap[0].i2c_adap, &fc->fc_i2c_adap[0]); 231 i2c_set_adapdata(&fc->fc_i2c_adap[0].i2c_adap, &fc->fc_i2c_adap[0]);
232 i2c_set_adapdata(&fc->fc_i2c_adap[1].i2c_adap, &fc->fc_i2c_adap[1]); 232 i2c_set_adapdata(&fc->fc_i2c_adap[1].i2c_adap, &fc->fc_i2c_adap[1]);
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index a7637562e742..aa3db57d32d9 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1244,7 +1244,7 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
1244 goto error; 1244 goto error;
1245 } 1245 }
1246 if (state->type_flags & DST_TYPE_HAS_FW_1) 1246 if (state->type_flags & DST_TYPE_HAS_FW_1)
1247 udelay(3000); 1247 mdelay(3);
1248 if (read_dst(state, &reply, GET_ACK)) { 1248 if (read_dst(state, &reply, GET_ACK)) {
1249 dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. "); 1249 dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
1250 if ((dst_error_recovery(state)) < 0) { 1250 if ((dst_error_recovery(state)) < 0) {
@@ -1260,7 +1260,7 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
1260 if (len >= 2 && data[0] == 0 && (data[1] == 1 || data[1] == 3)) 1260 if (len >= 2 && data[0] == 0 && (data[1] == 1 || data[1] == 3))
1261 goto error; 1261 goto error;
1262 if (state->type_flags & DST_TYPE_HAS_FW_1) 1262 if (state->type_flags & DST_TYPE_HAS_FW_1)
1263 udelay(3000); 1263 mdelay(3);
1264 else 1264 else
1265 udelay(2000); 1265 udelay(2000);
1266 if (!dst_wait_dst_ready(state, NO_DELAY)) 1266 if (!dst_wait_dst_ready(state, NO_DELAY))
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 1cf9fcb6f514..069d847ba887 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -641,7 +641,6 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
641 struct timespec timeout = { 0 }; 641 struct timespec timeout = { 0 };
642 struct dmx_pes_filter_params *para = &filter->params.pes; 642 struct dmx_pes_filter_params *para = &filter->params.pes;
643 dmx_output_t otype; 643 dmx_output_t otype;
644 int ret;
645 int ts_type; 644 int ts_type;
646 enum dmx_ts_pes ts_pes; 645 enum dmx_ts_pes ts_pes;
647 struct dmx_ts_feed **tsfeed = &filter->feed.ts; 646 struct dmx_ts_feed **tsfeed = &filter->feed.ts;
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 8e5dd7b1f034..98ee16773ff2 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -1032,7 +1032,7 @@ static int dvb_ca_en50221_thread(void *data)
1032 /* we need this extra check for annoying interfaces like the budget-av */ 1032 /* we need this extra check for annoying interfaces like the budget-av */
1033 if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && 1033 if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
1034 (ca->pub->poll_slot_status)) { 1034 (ca->pub->poll_slot_status)) {
1035 int status = ca->pub->poll_slot_status(ca->pub, slot, 0); 1035 status = ca->pub->poll_slot_status(ca->pub, slot, 0);
1036 if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { 1036 if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) {
1037 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; 1037 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
1038 dvb_ca_en50221_thread_update_delay(ca); 1038 dvb_ca_en50221_thread_update_delay(ca);
@@ -1089,7 +1089,7 @@ static int dvb_ca_en50221_thread(void *data)
1089 /* we need this extra check for annoying interfaces like the budget-av */ 1089 /* we need this extra check for annoying interfaces like the budget-av */
1090 if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) && 1090 if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
1091 (ca->pub->poll_slot_status)) { 1091 (ca->pub->poll_slot_status)) {
1092 int status = ca->pub->poll_slot_status(ca->pub, slot, 0); 1092 status = ca->pub->poll_slot_status(ca->pub, slot, 0);
1093 if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) { 1093 if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) {
1094 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE; 1094 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
1095 dvb_ca_en50221_thread_update_delay(ca); 1095 dvb_ca_en50221_thread_update_delay(ca);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 8cbdb218952f..3526e3ee9487 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -889,13 +889,13 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
889 * initialization, so parg is 8 bits and does not 889 * initialization, so parg is 8 bits and does not
890 * include the initialization or start bit 890 * include the initialization or start bit
891 */ 891 */
892 unsigned long cmd = ((unsigned long) parg) << 1; 892 unsigned long swcmd = ((unsigned long) parg) << 1;
893 struct timeval nexttime; 893 struct timeval nexttime;
894 struct timeval tv[10]; 894 struct timeval tv[10];
895 int i; 895 int i;
896 u8 last = 1; 896 u8 last = 1;
897 if (dvb_frontend_debug) 897 if (dvb_frontend_debug)
898 printk("%s switch command: 0x%04lx\n", __func__, cmd); 898 printk("%s switch command: 0x%04lx\n", __func__, swcmd);
899 do_gettimeofday(&nexttime); 899 do_gettimeofday(&nexttime);
900 if (dvb_frontend_debug) 900 if (dvb_frontend_debug)
901 memcpy(&tv[0], &nexttime, sizeof(struct timeval)); 901 memcpy(&tv[0], &nexttime, sizeof(struct timeval));
@@ -908,12 +908,12 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
908 for (i = 0; i < 9; i++) { 908 for (i = 0; i < 9; i++) {
909 if (dvb_frontend_debug) 909 if (dvb_frontend_debug)
910 do_gettimeofday(&tv[i + 1]); 910 do_gettimeofday(&tv[i + 1]);
911 if ((cmd & 0x01) != last) { 911 if ((swcmd & 0x01) != last) {
912 /* set voltage to (last ? 13V : 18V) */ 912 /* set voltage to (last ? 13V : 18V) */
913 fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); 913 fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18);
914 last = (last) ? 0 : 1; 914 last = (last) ? 0 : 1;
915 } 915 }
916 cmd = cmd >> 1; 916 swcmd = swcmd >> 1;
917 if (i != 8) 917 if (i != 8)
918 dvb_frontend_sleep_until(&nexttime, 8000); 918 dvb_frontend_sleep_until(&nexttime, 8000);
919 } 919 }
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index aaa0b6f0b521..563400277a42 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -210,7 +210,7 @@ static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff)
210 if (d->state == DVB_USB_STATE_INIT && 210 if (d->state == DVB_USB_STATE_INIT &&
211 usb_set_interface(d->udev, 0, 0) < 0) 211 usb_set_interface(d->udev, 0, 0) < 0)
212 err("set interface failed"); 212 err("set interface failed");
213 do; while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) && 213 do {} while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) &&
214 !(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) && 214 !(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) &&
215 !(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0); 215 !(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0);
216 if (!ret) { 216 if (!ret) {
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 3dd20bfbed32..6c0e5c5f4362 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -1117,7 +1117,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
1117 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_HT_EXPRESS) }, 1117 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_HT_EXPRESS) },
1118 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS) }, 1118 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS) },
1119 { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_STK7700P_2) }, 1119 { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_STK7700P_2) },
1120 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009) }, 1120/* 35 */{ USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009) },
1121 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_NOVA_T_500_3) },
1121 { 0 } /* Terminating entry */ 1122 { 0 } /* Terminating entry */
1122}; 1123};
1123MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); 1124MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -1373,7 +1374,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1373 } 1374 }
1374 }, 1375 },
1375 1376
1376 .num_device_descs = 3, 1377 .num_device_descs = 4,
1377 .devices = { 1378 .devices = {
1378 { "DiBcom STK7070PD reference design", 1379 { "DiBcom STK7070PD reference design",
1379 { &dib0700_usb_id_table[17], NULL }, 1380 { &dib0700_usb_id_table[17], NULL },
@@ -1386,6 +1387,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1386 { "Hauppauge Nova-TD Stick (52009)", 1387 { "Hauppauge Nova-TD Stick (52009)",
1387 { &dib0700_usb_id_table[35], NULL }, 1388 { &dib0700_usb_id_table[35], NULL },
1388 { NULL }, 1389 { NULL },
1390 },
1391 { "Hauppauge Nova-TD-500 (84xxx)",
1392 { &dib0700_usb_id_table[36], NULL },
1393 { NULL },
1389 } 1394 }
1390 } 1395 }
1391 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, 1396 }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 029b437caf9a..03dfb9f2fe30 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -129,6 +129,7 @@
129#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301 129#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
130#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 130#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941
131#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 131#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950
132#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400
132#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 133#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050
133#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060 134#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060
134#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070 135#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070
diff --git a/drivers/media/dvb/frontends/au8522.c b/drivers/media/dvb/frontends/au8522.c
index f7b71657f0f6..0b82cc2a1e16 100644
--- a/drivers/media/dvb/frontends/au8522.c
+++ b/drivers/media/dvb/frontends/au8522.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Auvitek AU8522 QAM/8VSB demodulator driver 2 Auvitek AU8522 QAM/8VSB demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -304,6 +304,43 @@ static int au8522_mse2snr_lookup(struct mse2snr_tab *tab, int sz, int mse,
304 return ret; 304 return ret;
305} 305}
306 306
307static int au8522_set_if(struct dvb_frontend *fe, enum au8522_if_freq if_freq)
308{
309 struct au8522_state *state = fe->demodulator_priv;
310 u8 r0b5, r0b6, r0b7;
311 char *ifmhz;
312
313 switch (if_freq) {
314 case AU8522_IF_3_25MHZ:
315 ifmhz = "3.25";
316 r0b5 = 0x00;
317 r0b6 = 0x3d;
318 r0b7 = 0xa0;
319 break;
320 case AU8522_IF_4MHZ:
321 ifmhz = "4.00";
322 r0b5 = 0x00;
323 r0b6 = 0x4b;
324 r0b7 = 0xd9;
325 break;
326 case AU8522_IF_6MHZ:
327 ifmhz = "6.00";
328 r0b5 = 0xfb;
329 r0b6 = 0x8e;
330 r0b7 = 0x39;
331 break;
332 default:
333 dprintk("%s() IF Frequency not supported\n", __func__);
334 return -EINVAL;
335 }
336 dprintk("%s() %s MHz\n", __func__, ifmhz);
337 au8522_writereg(state, 0x80b5, r0b5);
338 au8522_writereg(state, 0x80b6, r0b6);
339 au8522_writereg(state, 0x80b7, r0b7);
340
341 return 0;
342}
343
307/* VSB Modulation table */ 344/* VSB Modulation table */
308static struct { 345static struct {
309 u16 reg; 346 u16 reg;
@@ -334,9 +371,6 @@ static struct {
334 { 0x80af, 0x66 }, 371 { 0x80af, 0x66 },
335 { 0x821b, 0xcc }, 372 { 0x821b, 0xcc },
336 { 0x821d, 0x80 }, 373 { 0x821d, 0x80 },
337 { 0x80b5, 0xfb },
338 { 0x80b6, 0x8e },
339 { 0x80b7, 0x39 },
340 { 0x80a4, 0xe8 }, 374 { 0x80a4, 0xe8 },
341 { 0x8231, 0x13 }, 375 { 0x8231, 0x13 },
342}; 376};
@@ -350,9 +384,6 @@ static struct {
350 { 0x80a4, 0x00 }, 384 { 0x80a4, 0x00 },
351 { 0x8081, 0xc4 }, 385 { 0x8081, 0xc4 },
352 { 0x80a5, 0x40 }, 386 { 0x80a5, 0x40 },
353 { 0x80b5, 0xfb },
354 { 0x80b6, 0x8e },
355 { 0x80b7, 0x39 },
356 { 0x80aa, 0x77 }, 387 { 0x80aa, 0x77 },
357 { 0x80ad, 0x77 }, 388 { 0x80ad, 0x77 },
358 { 0x80a6, 0x67 }, 389 { 0x80a6, 0x67 },
@@ -438,6 +469,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
438 au8522_writereg(state, 469 au8522_writereg(state,
439 VSB_mod_tab[i].reg, 470 VSB_mod_tab[i].reg,
440 VSB_mod_tab[i].data); 471 VSB_mod_tab[i].data);
472 au8522_set_if(fe, state->config->vsb_if);
441 break; 473 break;
442 case QAM_64: 474 case QAM_64:
443 case QAM_256: 475 case QAM_256:
@@ -446,6 +478,7 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
446 au8522_writereg(state, 478 au8522_writereg(state,
447 QAM_mod_tab[i].reg, 479 QAM_mod_tab[i].reg,
448 QAM_mod_tab[i].data); 480 QAM_mod_tab[i].data);
481 au8522_set_if(fe, state->config->qam_if);
449 break; 482 break;
450 default: 483 default:
451 dprintk("%s() Invalid modulation\n", __func__); 484 dprintk("%s() Invalid modulation\n", __func__);
diff --git a/drivers/media/dvb/frontends/au8522.h b/drivers/media/dvb/frontends/au8522.h
index d7affa3cdb27..595915ade8c3 100644
--- a/drivers/media/dvb/frontends/au8522.h
+++ b/drivers/media/dvb/frontends/au8522.h
@@ -1,7 +1,7 @@
1/* 1/*
2 Auvitek AU8522 QAM/8VSB demodulator driver 2 Auvitek AU8522 QAM/8VSB demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -24,6 +24,12 @@
24 24
25#include <linux/dvb/frontend.h> 25#include <linux/dvb/frontend.h>
26 26
27enum au8522_if_freq {
28 AU8522_IF_6MHZ = 0,
29 AU8522_IF_4MHZ,
30 AU8522_IF_3_25MHZ,
31};
32
27struct au8522_config { 33struct au8522_config {
28 /* the demodulator's i2c address */ 34 /* the demodulator's i2c address */
29 u8 demod_address; 35 u8 demod_address;
@@ -32,6 +38,9 @@ struct au8522_config {
32#define AU8522_TUNERLOCKING 0 38#define AU8522_TUNERLOCKING 0
33#define AU8522_DEMODLOCKING 1 39#define AU8522_DEMODLOCKING 1
34 u8 status_mode; 40 u8 status_mode;
41
42 enum au8522_if_freq vsb_if;
43 enum au8522_if_freq qam_if;
35}; 44};
36 45
37#if defined(CONFIG_DVB_AU8522) || \ 46#if defined(CONFIG_DVB_AU8522) || \
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index cc1db4e371c3..9430e03dba6c 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -7,7 +7,7 @@
7 Copyright (C) 2001-2002 Convergence Integrated Media GmbH 7 Copyright (C) 2001-2002 Convergence Integrated Media GmbH
8 Holger Waechtler <holger@convergence.de> 8 Holger Waechtler <holger@convergence.de>
9 9
10 Copyright (C) 2004 Steven Toth <stoth@hauppauge.com> 10 Copyright (C) 2004 Steven Toth <stoth@linuxtv.org>
11 11
12 This program is free software; you can redistribute it and/or modify 12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by 13 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/frontends/cx22702.h b/drivers/media/dvb/frontends/cx22702.h
index 8af766a31552..b1e465c6c2ce 100644
--- a/drivers/media/dvb/frontends/cx22702.h
+++ b/drivers/media/dvb/frontends/cx22702.h
@@ -7,7 +7,7 @@
7 Copyright (C) 2001-2002 Convergence Integrated Media GmbH 7 Copyright (C) 2001-2002 Convergence Integrated Media GmbH
8 Holger Waechtler <holger@convergence.de> 8 Holger Waechtler <holger@convergence.de>
9 9
10 Copyright (C) 2004 Steven Toth <stoth@hauppauge.com> 10 Copyright (C) 2004 Steven Toth <stoth@linuxtv.org>
11 11
12 This program is free software; you can redistribute it and/or modify 12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by 13 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
index 7f68d78c6558..7156157cb34b 100644
--- a/drivers/media/dvb/frontends/cx24123.c
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver 2 * Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver
3 * 3 *
4 * Copyright (C) 2005 Steven Toth <stoth@hauppauge.com> 4 * Copyright (C) 2005 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * Support for KWorld DVB-S 100 by Vadim Catana <skystar@moldova.cc> 6 * Support for KWorld DVB-S 100 by Vadim Catana <skystar@moldova.cc>
7 * 7 *
@@ -1072,8 +1072,8 @@ struct dvb_frontend* cx24123_attach(const struct cx24123_config* config,
1072 if (config->dont_use_pll) 1072 if (config->dont_use_pll)
1073 cx24123_repeater_mode(state, 1, 0); 1073 cx24123_repeater_mode(state, 1, 0);
1074 1074
1075 strncpy(state->tuner_i2c_adapter.name, 1075 strlcpy(state->tuner_i2c_adapter.name, "CX24123 tuner I2C bus",
1076 "CX24123 tuner I2C bus", I2C_NAME_SIZE); 1076 sizeof(state->tuner_i2c_adapter.name));
1077 state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL, 1077 state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
1078 state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo; 1078 state->tuner_i2c_adapter.algo = &cx24123_tuner_i2c_algo;
1079 state->tuner_i2c_adapter.algo_data = NULL; 1079 state->tuner_i2c_adapter.algo_data = NULL;
diff --git a/drivers/media/dvb/frontends/cx24123.h b/drivers/media/dvb/frontends/cx24123.h
index 81ebc3d2f19f..cc6b411d6d20 100644
--- a/drivers/media/dvb/frontends/cx24123.h
+++ b/drivers/media/dvb/frontends/cx24123.h
@@ -1,7 +1,7 @@
1/* 1/*
2 Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver 2 Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver
3 3
4 Copyright (C) 2005 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2005 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/frontends/s5h1409.c b/drivers/media/dvb/frontends/s5h1409.c
index 5ddb2dca305c..7500a1c53e68 100644
--- a/drivers/media/dvb/frontends/s5h1409.c
+++ b/drivers/media/dvb/frontends/s5h1409.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Samsung S5H1409 VSB/QAM demodulator driver 2 Samsung S5H1409 VSB/QAM demodulator driver
3 3
4 Copyright (C) 2006 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2006 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -404,6 +404,7 @@ static int s5h1409_enable_modulation(struct dvb_frontend* fe,
404 break; 404 break;
405 case QAM_64: 405 case QAM_64:
406 case QAM_256: 406 case QAM_256:
407 case QAM_AUTO:
407 dprintk("%s() QAM_AUTO (64/256)\n", __func__); 408 dprintk("%s() QAM_AUTO (64/256)\n", __func__);
408 if (state->if_freq != S5H1409_QAM_IF_FREQ) 409 if (state->if_freq != S5H1409_QAM_IF_FREQ)
409 s5h1409_set_if_freq(fe, S5H1409_QAM_IF_FREQ); 410 s5h1409_set_if_freq(fe, S5H1409_QAM_IF_FREQ);
diff --git a/drivers/media/dvb/frontends/s5h1409.h b/drivers/media/dvb/frontends/s5h1409.h
index 59f4335964c6..d1a1d2eb8e11 100644
--- a/drivers/media/dvb/frontends/s5h1409.h
+++ b/drivers/media/dvb/frontends/s5h1409.h
@@ -1,7 +1,7 @@
1/* 1/*
2 Samsung S5H1409 VSB/QAM demodulator driver 2 Samsung S5H1409 VSB/QAM demodulator driver
3 3
4 Copyright (C) 2006 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2006 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index cff360ce1ba3..2da1a3763de9 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -1,7 +1,7 @@
1/* 1/*
2 Samsung S5H1411 VSB/QAM demodulator driver 2 Samsung S5H1411 VSB/QAM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -488,6 +488,7 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
488 break; 488 break;
489 case QAM_64: 489 case QAM_64:
490 case QAM_256: 490 case QAM_256:
491 case QAM_AUTO:
491 dprintk("%s() QAM_AUTO (64/256)\n", __func__); 492 dprintk("%s() QAM_AUTO (64/256)\n", __func__);
492 s5h1411_set_if_freq(fe, state->config->qam_if); 493 s5h1411_set_if_freq(fe, state->config->qam_if);
493 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171); 494 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x00, 0x0171);
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
index 1855f64ed4d8..7d542bc00c48 100644
--- a/drivers/media/dvb/frontends/s5h1411.h
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -1,7 +1,7 @@
1/* 1/*
2 Samsung S5H1411 VSB/QAM demodulator driver 2 Samsung S5H1411 VSB/QAM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 720ed9ff7c5f..747d3fa2e5e5 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -915,7 +915,8 @@ struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
915 state->frontend.demodulator_priv = state; 915 state->frontend.demodulator_priv = state;
916 916
917 /* create tuner i2c adapter */ 917 /* create tuner i2c adapter */
918 strncpy(state->tuner_i2c_adapter.name, "S5H1420-PN1010 tuner I2C bus", I2C_NAME_SIZE); 918 strlcpy(state->tuner_i2c_adapter.name, "S5H1420-PN1010 tuner I2C bus",
919 sizeof(state->tuner_i2c_adapter.name));
919 state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL, 920 state->tuner_i2c_adapter.class = I2C_CLASS_TV_DIGITAL,
920 state->tuner_i2c_adapter.algo = &s5h1420_tuner_i2c_algo; 921 state->tuner_i2c_adapter.algo = &s5h1420_tuner_i2c_algo;
921 state->tuner_i2c_adapter.algo_data = NULL; 922 state->tuner_i2c_adapter.algo_data = NULL;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 0ab8d86b3ae3..04e7f1cc1403 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -1,7 +1,7 @@
1/* 1/*
2 NXP TDA10048HN DVB OFDM demodulator driver 2 NXP TDA10048HN DVB OFDM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -303,7 +303,7 @@ static int tda10048_firmware_upload(struct dvb_frontend *fe)
303 303
304 if (fw->size != TDA10048_DEFAULT_FIRMWARE_SIZE) { 304 if (fw->size != TDA10048_DEFAULT_FIRMWARE_SIZE) {
305 printk(KERN_ERR "%s: firmware incorrect size\n", __func__); 305 printk(KERN_ERR "%s: firmware incorrect size\n", __func__);
306 return -EIO; 306 ret = -EIO;
307 } else { 307 } else {
308 printk(KERN_INFO "%s: firmware uploading\n", __func__); 308 printk(KERN_INFO "%s: firmware uploading\n", __func__);
309 309
diff --git a/drivers/media/dvb/frontends/tda10048.h b/drivers/media/dvb/frontends/tda10048.h
index 2b5c78e62c86..0457b24601fa 100644
--- a/drivers/media/dvb/frontends/tda10048.h
+++ b/drivers/media/dvb/frontends/tda10048.h
@@ -1,7 +1,7 @@
1/* 1/*
2 NXP TDA10048HN DVB OFDM demodulator driver 2 NXP TDA10048HN DVB OFDM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@hauppauge.com> 4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index e7a8ac0c4049..cc5efb643f33 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 2008 Michael Krufky <mkrufky@linuxtv.org> 4 * Copyright (c) 2008 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 3 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation; 8 * published by the Free Software Foundation;
9 * 9 *
10 * Software distributed under the License is distributed on an "AS IS" 10 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/siano/sms-cards.h b/drivers/media/dvb/siano/sms-cards.h
index 83b39bc203fe..c8f3da6f9bc1 100644
--- a/drivers/media/dvb/siano/sms-cards.h
+++ b/drivers/media/dvb/siano/sms-cards.h
@@ -4,7 +4,7 @@
4 * Copyright (c) 2008 Michael Krufky <mkrufky@linuxtv.org> 4 * Copyright (c) 2008 Michael Krufky <mkrufky@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 3 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation; 8 * published by the Free Software Foundation;
9 * 9 *
10 * Software distributed under the License is distributed on an "AS IS" 10 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index c5f45fed69dc..6576fbb40fc6 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -8,7 +8,7 @@
8 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 8 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 3 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation; 12 * published by the Free Software Foundation;
13 * 13 *
14 * Software distributed under the License is distributed on an "AS IS" 14 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index c1f8f1dccb11..8d973f726fb8 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -6,7 +6,7 @@
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 3 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation; 10 * published by the Free Software Foundation;
11 * 11 *
12 * Software distributed under the License is distributed on an "AS IS" 12 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 229274a14110..8d490e133f35 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -6,7 +6,7 @@
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 3 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation; 10 * published by the Free Software Foundation;
11 * 11 *
12 * Software distributed under the License is distributed on an "AS IS" 12 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index c10b1849c6a3..87a3c24454b9 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -6,7 +6,7 @@
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 3 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation; 10 * published by the Free Software Foundation;
11 * 11 *
12 * Software distributed under the License is distributed on an "AS IS" 12 * Software distributed under the License is distributed on an "AS IS"
diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c
index 39bd0a20f53a..aa5ed4ef19f2 100644
--- a/drivers/media/dvb/ttpci/budget-patch.c
+++ b/drivers/media/dvb/ttpci/budget-patch.c
@@ -116,7 +116,8 @@ static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long
116 DiseqcSendByte(budget, 0xff); 116 DiseqcSendByte(budget, 0xff);
117 else { 117 else {
118 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); 118 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
119 udelay(12500); 119 mdelay(12);
120 udelay(500);
120 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); 121 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
121 } 122 }
122 msleep(20); 123 msleep(20);
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 2293d80c6e51..f0068996ac07 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -108,7 +108,8 @@ static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long
108 DiseqcSendByte(budget, 0xff); 108 DiseqcSendByte(budget, 0xff);
109 else { 109 else {
110 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); 110 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
111 udelay(12500); 111 mdelay(12);
112 udelay(500);
112 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); 113 saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
113 } 114 }
114 msleep(20); 115 msleep(20);
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index a30159f6fa42..7ca71ab96b43 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -2,8 +2,6 @@
2# Makefile for the kernel character device drivers. 2# Makefile for the kernel character device drivers.
3# 3#
4 4
5miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o
6
7obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o 5obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o
8obj-$(CONFIG_RADIO_RTRACK2) += radio-rtrack2.o 6obj-$(CONFIG_RADIO_RTRACK2) += radio-rtrack2.o
9obj-$(CONFIG_RADIO_SF16FMI) += radio-sf16fmi.o 7obj-$(CONFIG_RADIO_SF16FMI) += radio-sf16fmi.o
@@ -14,8 +12,6 @@ obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
14obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o 12obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
15obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o 13obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
16obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o 14obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
17obj-$(CONFIG_RADIO_MIROPCM20) += miropcm20.o
18obj-$(CONFIG_RADIO_MIROPCM20_RDS) += miropcm20-rds.o
19obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o 15obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
20obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o 16obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
21obj-$(CONFIG_RADIO_TRUST) += radio-trust.o 17obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 1ed88f3abe61..70c65a745923 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -493,7 +493,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
493 radio->usbdev = interface_to_usbdev(intf); 493 radio->usbdev = interface_to_usbdev(intf);
494 radio->curfreq = FREQ_MIN*FREQ_MUL; 494 radio->curfreq = FREQ_MIN*FREQ_MUL;
495 video_set_drvdata(radio->videodev, radio); 495 video_set_drvdata(radio->videodev, radio);
496 if (video_register_device(radio->videodev, VFL_TYPE_RADIO,radio_nr)) { 496 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) {
497 warn("Could not register video device"); 497 warn("Could not register video device");
498 video_device_release(radio->videodev); 498 video_device_release(radio->videodev);
499 kfree(radio->transfer_buffer); 499 kfree(radio->transfer_buffer);
diff --git a/drivers/media/radio/miropcm20-radio.c b/drivers/media/radio/miropcm20-radio.c
deleted file mode 100644
index 7fd7ee2d32c1..000000000000
--- a/drivers/media/radio/miropcm20-radio.c
+++ /dev/null
@@ -1,266 +0,0 @@
1/* Miro PCM20 radio driver for Linux radio support
2 * (c) 1998 Ruurd Reitsma <R.A.Reitsma@wbmt.tudelft.nl>
3 * Thanks to Norberto Pellici for the ACI device interface specification
4 * The API part is based on the radiotrack driver by M. Kirkwood
5 * This driver relies on the aci mixer (drivers/sound/aci.c)
6 * Look there for further info...
7 */
8
9/* Revision history:
10 *
11 * 1998 Ruurd Reitsma <R.A.Reitsma@wbmt.tudelft.nl>
12 * 2000-09-05 Robert Siemer <Robert.Siemer@gmx.de>
13 * removed unfinished volume control (maybe adding it later again)
14 * use OSS-mixer; added stereo control
15 */
16
17/* What ever you think about the ACI, version 0x07 is not very well!
18 * I can't get frequency, 'tuner status', 'tuner flags' or mute/mono
19 * conditions... Robert
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/videodev.h>
25#include <media/v4l2-common.h>
26#include <media/v4l2-ioctl.h>
27#include "oss/aci.h"
28#include "miropcm20-rds-core.h"
29
30static int radio_nr = -1;
31module_param(radio_nr, int, 0);
32
33struct pcm20_device {
34 unsigned long freq;
35 int muted;
36 int stereo;
37};
38
39
40static int pcm20_mute(struct pcm20_device *dev, unsigned char mute)
41{
42 dev->muted = mute;
43 return aci_write_cmd(ACI_SET_TUNERMUTE, mute);
44}
45
46static int pcm20_stereo(struct pcm20_device *dev, unsigned char stereo)
47{
48 dev->stereo = stereo;
49 return aci_write_cmd(ACI_SET_TUNERMONO, !stereo);
50}
51
52static int pcm20_setfreq(struct pcm20_device *dev, unsigned long freq)
53{
54 unsigned char freql;
55 unsigned char freqh;
56
57 dev->freq=freq;
58
59 freq /= 160;
60 if (!(aci_version==0x07 || aci_version>=0xb0))
61 freq /= 10; /* I don't know exactly which version
62 * needs this hack */
63 freql = freq & 0xff;
64 freqh = freq >> 8;
65
66 aci_rds_cmd(RDS_RESET, NULL, 0);
67 pcm20_stereo(dev, 1);
68
69 return aci_rw_cmd(ACI_WRITE_TUNE, freql, freqh);
70}
71
72static int pcm20_getflags(struct pcm20_device *dev, __u32 *flags, __u16 *signal)
73{
74 /* okay, check for signal, stereo and rds here... */
75 int i;
76 unsigned char buf;
77
78 if ((i=aci_rw_cmd(ACI_READ_TUNERSTATION, -1, -1))<0)
79 return i;
80 pr_debug("check_sig: 0x%x\n", i);
81 if (i & 0x80) {
82 /* no signal from tuner */
83 *flags=0;
84 *signal=0;
85 return 0;
86 } else
87 *signal=0xffff;
88
89 if ((i=aci_rw_cmd(ACI_READ_TUNERSTEREO, -1, -1))<0)
90 return i;
91 if (i & 0x40) {
92 *flags=0;
93 } else {
94 /* stereo */
95 *flags=VIDEO_TUNER_STEREO_ON;
96 /* I can't see stereo, when forced to mono */
97 dev->stereo=1;
98 }
99
100 if ((i=aci_rds_cmd(RDS_STATUS, &buf, 1))<0)
101 return i;
102 if (buf & 1)
103 /* RDS available */
104 *flags|=VIDEO_TUNER_RDS_ON;
105 else
106 return 0;
107
108 if ((i=aci_rds_cmd(RDS_RXVALUE, &buf, 1))<0)
109 return i;
110 pr_debug("rds-signal: %d\n", buf);
111 if (buf > 15) {
112 printk("miropcm20-radio: RX strengths unexpected high...\n");
113 buf=15;
114 }
115 /* refine signal */
116 if ((*signal=SCALE(15, 0xffff, buf))==0)
117 *signal = 1;
118
119 return 0;
120}
121
122static int pcm20_do_ioctl(struct inode *inode, struct file *file,
123 unsigned int cmd, void *arg)
124{
125 struct video_device *dev = video_devdata(file);
126 struct pcm20_device *pcm20 = dev->priv;
127 int i;
128
129 switch(cmd)
130 {
131 case VIDIOCGCAP:
132 {
133 struct video_capability *v = arg;
134 memset(v,0,sizeof(*v));
135 v->type=VID_TYPE_TUNER;
136 strcpy(v->name, "Miro PCM20");
137 v->channels=1;
138 v->audios=1;
139 return 0;
140 }
141 case VIDIOCGTUNER:
142 {
143 struct video_tuner *v = arg;
144 if(v->tuner) /* Only 1 tuner */
145 return -EINVAL;
146 v->rangelow=87*16000;
147 v->rangehigh=108*16000;
148 pcm20_getflags(pcm20, &v->flags, &v->signal);
149 v->flags|=VIDEO_TUNER_LOW;
150 v->mode=VIDEO_MODE_AUTO;
151 strcpy(v->name, "FM");
152 return 0;
153 }
154 case VIDIOCSTUNER:
155 {
156 struct video_tuner *v = arg;
157 if(v->tuner!=0)
158 return -EINVAL;
159 /* Only 1 tuner so no setting needed ! */
160 return 0;
161 }
162 case VIDIOCGFREQ:
163 {
164 unsigned long *freq = arg;
165 *freq = pcm20->freq;
166 return 0;
167 }
168 case VIDIOCSFREQ:
169 {
170 unsigned long *freq = arg;
171 pcm20->freq = *freq;
172 i=pcm20_setfreq(pcm20, pcm20->freq);
173 pr_debug("First view (setfreq): 0x%x\n", i);
174 return i;
175 }
176 case VIDIOCGAUDIO:
177 {
178 struct video_audio *v = arg;
179 memset(v,0, sizeof(*v));
180 v->flags=VIDEO_AUDIO_MUTABLE;
181 if (pcm20->muted)
182 v->flags|=VIDEO_AUDIO_MUTE;
183 v->mode=VIDEO_SOUND_STEREO;
184 if (pcm20->stereo)
185 v->mode|=VIDEO_SOUND_MONO;
186 /* v->step=2048; */
187 strcpy(v->name, "Radio");
188 return 0;
189 }
190 case VIDIOCSAUDIO:
191 {
192 struct video_audio *v = arg;
193 if(v->audio)
194 return -EINVAL;
195
196 pcm20_mute(pcm20, !!(v->flags&VIDEO_AUDIO_MUTE));
197 if(v->flags&VIDEO_SOUND_MONO)
198 pcm20_stereo(pcm20, 0);
199 if(v->flags&VIDEO_SOUND_STEREO)
200 pcm20_stereo(pcm20, 1);
201
202 return 0;
203 }
204 default:
205 return -ENOIOCTLCMD;
206 }
207}
208
209static int pcm20_ioctl(struct inode *inode, struct file *file,
210 unsigned int cmd, unsigned long arg)
211{
212 return video_usercopy(inode, file, cmd, arg, pcm20_do_ioctl);
213}
214
215static struct pcm20_device pcm20_unit = {
216 .freq = 87*16000,
217 .muted = 1,
218};
219
220static const struct file_operations pcm20_fops = {
221 .owner = THIS_MODULE,
222 .open = video_exclusive_open,
223 .release = video_exclusive_release,
224 .ioctl = pcm20_ioctl,
225#ifdef CONFIG_COMPAT
226 .compat_ioctl = v4l_compat_ioctl32,
227#endif
228 .llseek = no_llseek,
229};
230
231static struct video_device pcm20_radio = {
232 .name = "Miro PCM 20 radio",
233 .fops = &pcm20_fops,
234 .priv = &pcm20_unit
235};
236
237static int __init pcm20_init(void)
238{
239 if(video_register_device(&pcm20_radio, VFL_TYPE_RADIO, radio_nr)==-1)
240 goto video_register_device;
241
242 if(attach_aci_rds()<0)
243 goto attach_aci_rds;
244
245 printk(KERN_INFO "Miro PCM20 radio card driver.\n");
246
247 return 0;
248
249 attach_aci_rds:
250 video_unregister_device(&pcm20_radio);
251 video_register_device:
252 return -EINVAL;
253}
254
255MODULE_AUTHOR("Ruurd Reitsma");
256MODULE_DESCRIPTION("A driver for the Miro PCM20 radio card.");
257MODULE_LICENSE("GPL");
258
259static void __exit pcm20_cleanup(void)
260{
261 unload_aci_rds();
262 video_unregister_device(&pcm20_radio);
263}
264
265module_init(pcm20_init);
266module_exit(pcm20_cleanup);
diff --git a/drivers/media/radio/miropcm20-rds-core.c b/drivers/media/radio/miropcm20-rds-core.c
deleted file mode 100644
index 9428d8b2642c..000000000000
--- a/drivers/media/radio/miropcm20-rds-core.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * Many thanks to Fred Seidel <seidel@metabox.de>, the
3 * designer of the RDS decoder hardware. With his help
4 * I was able to code this driver.
5 * Thanks also to Norberto Pellicci, Dominic Mounteney
6 * <DMounteney@pinnaclesys.com> and www.teleauskunft.de
7 * for good hints on finding Fred. It was somewhat hard
8 * to locate him here in Germany... [:
9 *
10 * Revision history:
11 *
12 * 2000-08-09 Robert Siemer <Robert.Siemer@gmx.de>
13 * RDS support for MiroSound PCM20 radio
14 */
15
16#include <linux/module.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/mutex.h>
22
23#include <asm/io.h>
24#include "oss/aci.h"
25#include "miropcm20-rds-core.h"
26
27#define DEBUG 0
28
29static struct mutex aci_rds_mutex;
30
31#define RDS_DATASHIFT 2 /* Bit 2 */
32#define RDS_DATAMASK (1 << RDS_DATASHIFT)
33#define RDS_BUSYMASK 0x10 /* Bit 4 */
34#define RDS_CLOCKMASK 0x08 /* Bit 3 */
35
36#define RDS_DATA(x) (((x) >> RDS_DATASHIFT) & 1)
37
38
39#if DEBUG
40static void print_matrix(char array[], unsigned int length)
41{
42 int i, j;
43
44 for (i=0; i<length; i++) {
45 printk(KERN_DEBUG "aci-rds: ");
46 for (j=7; j>=0; j--) {
47 printk("%d", (array[i] >> j) & 0x1);
48 }
49 if (i%8 == 0)
50 printk(" byte-border\n");
51 else
52 printk("\n");
53 }
54}
55#endif /* DEBUG */
56
57static int byte2trans(unsigned char byte, unsigned char sendbuffer[], int size)
58{
59 int i;
60
61 if (size != 8)
62 return -1;
63 for (i = 7; i >= 0; i--)
64 sendbuffer[7-i] = (byte & (1 << i)) ? RDS_DATAMASK : 0;
65 sendbuffer[0] |= RDS_CLOCKMASK;
66
67 return 0;
68}
69
70static int rds_waitread(void)
71{
72 unsigned char byte;
73 int i=2000;
74
75 do {
76 byte=inb(RDS_REGISTER);
77 i--;
78 }
79 while ((byte & RDS_BUSYMASK) && i);
80
81 if (i) {
82 #if DEBUG
83 printk(KERN_DEBUG "rds_waitread()");
84 print_matrix(&byte, 1);
85 #endif
86 return (byte);
87 } else {
88 printk(KERN_WARNING "aci-rds: rds_waitread() timeout...\n");
89 return -1;
90 }
91}
92
93/* don't use any ..._nowait() function if you are not sure what you do... */
94
95static inline void rds_rawwrite_nowait(unsigned char byte)
96{
97 #if DEBUG
98 printk(KERN_DEBUG "rds_rawwrite()");
99 print_matrix(&byte, 1);
100 #endif
101 outb(byte, RDS_REGISTER);
102}
103
104static int rds_rawwrite(unsigned char byte)
105{
106 if (rds_waitread() >= 0) {
107 rds_rawwrite_nowait(byte);
108 return 0;
109 } else
110 return -1;
111}
112
113static int rds_write(unsigned char cmd)
114{
115 unsigned char sendbuffer[8];
116 int i;
117
118 if (byte2trans(cmd, sendbuffer, 8) != 0){
119 return -1;
120 } else {
121 for (i=0; i<8; i++) {
122 rds_rawwrite(sendbuffer[i]);
123 }
124 }
125 return 0;
126}
127
128static int rds_readcycle_nowait(void)
129{
130 rds_rawwrite_nowait(0);
131 return rds_waitread();
132}
133
134static int rds_readcycle(void)
135{
136 if (rds_rawwrite(0) < 0)
137 return -1;
138 return rds_waitread();
139}
140
141static int rds_read(unsigned char databuffer[], int datasize)
142{
143 #define READSIZE (8*datasize)
144
145 int i,j;
146
147 if (datasize < 1) /* nothing to read */
148 return 0;
149
150 /* to be able to use rds_readcycle_nowait()
151 I have to waitread() here */
152 if (rds_waitread() < 0)
153 return -1;
154
155 memset(databuffer, 0, datasize);
156
157 for (i=0; i< READSIZE; i++)
158 if((j=rds_readcycle_nowait()) < 0) {
159 return -1;
160 } else {
161 databuffer[i/8]|=(RDS_DATA(j) << (7-(i%8)));
162 }
163
164 return 0;
165}
166
167static int rds_ack(void)
168{
169 int i=rds_readcycle();
170
171 if (i < 0)
172 return -1;
173 if (i & RDS_DATAMASK) {
174 return 0; /* ACK */
175 } else {
176 printk(KERN_DEBUG "aci-rds: NACK\n");
177 return 1; /* NACK */
178 }
179}
180
181int aci_rds_cmd(unsigned char cmd, unsigned char databuffer[], int datasize)
182{
183 int ret;
184
185 if (mutex_lock_interruptible(&aci_rds_mutex))
186 return -EINTR;
187
188 rds_write(cmd);
189
190 /* RDS_RESET doesn't need further processing */
191 if (cmd!=RDS_RESET && (rds_ack() || rds_read(databuffer, datasize)))
192 ret = -1;
193 else
194 ret = 0;
195
196 mutex_unlock(&aci_rds_mutex);
197
198 return ret;
199}
200EXPORT_SYMBOL(aci_rds_cmd);
201
202int __init attach_aci_rds(void)
203{
204 mutex_init(&aci_rds_mutex);
205 return 0;
206}
207
208void __exit unload_aci_rds(void)
209{
210}
211MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/miropcm20-rds-core.h b/drivers/media/radio/miropcm20-rds-core.h
deleted file mode 100644
index aeb5761f0469..000000000000
--- a/drivers/media/radio/miropcm20-rds-core.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#ifndef _MIROPCM20_RDS_CORE_H_
2#define _MIROPCM20_RDS_CORE_H_
3
4extern int aci_rds_cmd(unsigned char cmd, unsigned char databuffer[], int datasize);
5
6#define RDS_STATUS 0x01
7#define RDS_STATIONNAME 0x02
8#define RDS_TEXT 0x03
9#define RDS_ALTFREQ 0x04
10#define RDS_TIMEDATE 0x05
11#define RDS_PI_CODE 0x06
12#define RDS_PTYTATP 0x07
13#define RDS_RESET 0x08
14#define RDS_RXVALUE 0x09
15
16extern void __exit unload_aci_rds(void);
17extern int __init attach_aci_rds(void);
18
19#endif /* _MIROPCM20_RDS_CORE_H_ */
diff --git a/drivers/media/radio/miropcm20-rds.c b/drivers/media/radio/miropcm20-rds.c
deleted file mode 100644
index 3e840f74d45c..000000000000
--- a/drivers/media/radio/miropcm20-rds.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/* MiroSOUND PCM20 radio rds interface driver
2 * (c) 2001 Robert Siemer <Robert.Siemer@gmx.de>
3 * Thanks to Fred Seidel. See miropcm20-rds-core.c for further information.
4 */
5
6/* Revision history:
7 *
8 * 2001-04-18 Robert Siemer <Robert.Siemer@gmx.de>
9 * separate file for user interface driver
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/smp_lock.h>
16#include <linux/fs.h>
17#include <linux/miscdevice.h>
18#include <linux/delay.h>
19#include <asm/uaccess.h>
20#include "miropcm20-rds-core.h"
21
22static char * text_buffer;
23static int rds_users;
24
25
26static int rds_f_open(struct inode *in, struct file *fi)
27{
28 if (rds_users)
29 return -EBUSY;
30
31 lock_kernel();
32 rds_users++;
33 if ((text_buffer=kmalloc(66, GFP_KERNEL)) == 0) {
34 rds_users--;
35 printk(KERN_NOTICE "aci-rds: Out of memory by open()...\n");
36 unlock_kernel();
37 return -ENOMEM;
38 }
39
40 unlock_kernel();
41 return 0;
42}
43
44static int rds_f_release(struct inode *in, struct file *fi)
45{
46 kfree(text_buffer);
47
48 rds_users--;
49 return 0;
50}
51
52static void print_matrix(char *ch, char out[])
53{
54 int j;
55
56 for (j=7; j>=0; j--) {
57 out[7-j] = ((*ch >> j) & 0x1) + '0';
58 }
59}
60
61static ssize_t rds_f_read(struct file *file, char __user *buffer, size_t length, loff_t *offset)
62{
63// i = sprintf(text_buffer, "length: %d, offset: %d\n", length, *offset);
64
65 char c;
66 char bits[8];
67
68 msleep(2000);
69 aci_rds_cmd(RDS_STATUS, &c, 1);
70 print_matrix(&c, bits);
71 if (copy_to_user(buffer, bits, 8))
72 return -EFAULT;
73
74/* if ((c >> 3) & 1) {
75 aci_rds_cmd(RDS_STATIONNAME, text_buffer+1, 8);
76 text_buffer[0] = ' ' ;
77 text_buffer[9] = '\n';
78 return copy_to_user(buffer+8, text_buffer, 10) ? -EFAULT: 18;
79 }
80*/
81/* if ((c >> 6) & 1) {
82 aci_rds_cmd(RDS_PTYTATP, &c, 1);
83 if ( c & 1)
84 sprintf(text_buffer, " M");
85 else
86 sprintf(text_buffer, " S");
87 if ((c >> 1) & 1)
88 sprintf(text_buffer+2, " TA");
89 else
90 sprintf(text_buffer+2, " --");
91 if ((c >> 7) & 1)
92 sprintf(text_buffer+5, " TP");
93 else
94 sprintf(text_buffer+5, " --");
95 sprintf(text_buffer+8, " %2d\n", (c >> 2) & 0x1f);
96 return copy_to_user(buffer+8, text_buffer, 12) ? -EFAULT: 20;
97 }
98*/
99
100 if ((c >> 4) & 1) {
101 aci_rds_cmd(RDS_TEXT, text_buffer, 65);
102 text_buffer[0] = ' ' ;
103 text_buffer[65] = '\n';
104 return copy_to_user(buffer+8, text_buffer,66) ? -EFAULT : 66+8;
105 } else {
106 put_user('\n', buffer+8);
107 return 9;
108 }
109}
110
111static const struct file_operations rds_fops = {
112 .owner = THIS_MODULE,
113 .read = rds_f_read,
114 .open = rds_f_open,
115 .release = rds_f_release
116};
117
118static struct miscdevice rds_miscdev = {
119 .minor = MISC_DYNAMIC_MINOR,
120 .name = "radiotext",
121 .fops = &rds_fops,
122};
123
124static int __init miropcm20_rds_init(void)
125{
126 return misc_register(&rds_miscdev);
127}
128
129static void __exit miropcm20_rds_cleanup(void)
130{
131 misc_deregister(&rds_miscdev);
132}
133
134module_init(miropcm20_rds_init);
135module_exit(miropcm20_rds_cleanup);
136MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index eba9209b3024..1f064f4b32df 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -426,8 +426,7 @@ static int __init rtrack_init(void)
426 426
427 rtrack_radio.priv=&rtrack_unit; 427 rtrack_radio.priv=&rtrack_unit;
428 428
429 if(video_register_device(&rtrack_radio, VFL_TYPE_RADIO, radio_nr)==-1) 429 if (video_register_device(&rtrack_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
430 {
431 release_region(io, 2); 430 release_region(io, 2);
432 return -EINVAL; 431 return -EINVAL;
433 } 432 }
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 3fe5504428c5..628c689e3ffe 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -394,8 +394,7 @@ static int __init aztech_init(void)
394 mutex_init(&lock); 394 mutex_init(&lock);
395 aztech_radio.priv=&aztech_unit; 395 aztech_radio.priv=&aztech_unit;
396 396
397 if(video_register_device(&aztech_radio, VFL_TYPE_RADIO, radio_nr)==-1) 397 if (video_register_device(&aztech_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
398 {
399 release_region(io,2); 398 release_region(io,2);
400 return -EINVAL; 399 return -EINVAL;
401 } 400 }
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 6166e726ed72..04c3698d32e4 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -682,7 +682,7 @@ static int __init cadet_init(void)
682 } 682 }
683 if (!request_region(io,2,"cadet")) 683 if (!request_region(io,2,"cadet"))
684 goto fail; 684 goto fail;
685 if(video_register_device(&cadet_radio,VFL_TYPE_RADIO,radio_nr)==-1) { 685 if (video_register_device(&cadet_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
686 release_region(io,2); 686 release_region(io,2);
687 goto fail; 687 goto fail;
688 } 688 }
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index 36e754e3ffb2..5cd7f032298d 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -425,7 +425,7 @@ static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci
425 } 425 }
426 *devradio = vdev_template; 426 *devradio = vdev_template;
427 427
428 if ( video_register_device( devradio, VFL_TYPE_RADIO , nr_radio) == -1 ) { 428 if (video_register_device(devradio, VFL_TYPE_RADIO, nr_radio) < 0) {
429 kfree( devradio ); 429 kfree( devradio );
430 goto err_video; 430 goto err_video;
431 } 431 }
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 2b1a6221de6d..0a0f956bb308 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -612,8 +612,7 @@ static int __init gemtek_init(void)
612 612
613 gemtek_radio.priv = &gemtek_unit; 613 gemtek_radio.priv = &gemtek_unit;
614 614
615 if (video_register_device(&gemtek_radio, VFL_TYPE_RADIO, 615 if (video_register_device(&gemtek_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
616 radio_nr) == -1) {
617 release_region(io, 1); 616 release_region(io, 1);
618 return -EBUSY; 617 return -EBUSY;
619 } 618 }
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index 0ada1c697e8a..9ef0a763eeb7 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -409,8 +409,7 @@ static int __devinit maestro_probe(struct pci_dev *pdev,
409 video_set_drvdata(maestro_radio_inst, radio_unit); 409 video_set_drvdata(maestro_radio_inst, radio_unit);
410 pci_set_drvdata(pdev, maestro_radio_inst); 410 pci_set_drvdata(pdev, maestro_radio_inst);
411 411
412 retval = video_register_device(maestro_radio_inst, VFL_TYPE_RADIO, 412 retval = video_register_device(maestro_radio_inst, VFL_TYPE_RADIO, radio_nr);
413 radio_nr);
414 if (retval) { 413 if (retval) {
415 printk(KERN_ERR "can't register video device!\n"); 414 printk(KERN_ERR "can't register video device!\n");
416 goto errfr1; 415 goto errfr1;
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 43c75497dc49..0cc6fcb041fd 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -156,28 +156,28 @@ static void set_freq(__u16 io, __u32 freq)
156{ 156{
157 unsigned long int si; 157 unsigned long int si;
158 int bl; 158 int bl;
159 int data = FREQ2BITS(freq); 159 int val = FREQ2BITS(freq);
160 160
161 /* TEA5757 shift register bits (see pdf) */ 161 /* TEA5757 shift register bits (see pdf) */
162 162
163 outbit(0,io); // 24 search 163 outbit(0, io); /* 24 search */
164 outbit(1,io); // 23 search up/down 164 outbit(1, io); /* 23 search up/down */
165 165
166 outbit(0,io); // 22 stereo/mono 166 outbit(0, io); /* 22 stereo/mono */
167 167
168 outbit(0,io); // 21 band 168 outbit(0, io); /* 21 band */
169 outbit(0,io); // 20 band (only 00=FM works I think) 169 outbit(0, io); /* 20 band (only 00=FM works I think) */
170 170
171 outbit(0,io); // 19 port ? 171 outbit(0, io); /* 19 port ? */
172 outbit(0,io); // 18 port ? 172 outbit(0, io); /* 18 port ? */
173 173
174 outbit(0,io); // 17 search level 174 outbit(0, io); /* 17 search level */
175 outbit(0,io); // 16 search level 175 outbit(0, io); /* 16 search level */
176 176
177 si = 0x8000; 177 si = 0x8000;
178 for (bl = 1; bl <= 16 ; bl++) { 178 for (bl = 1; bl <= 16; bl++) {
179 outbit(data & si,io); 179 outbit(val & si, io);
180 si >>=1; 180 si >>= 1;
181 } 181 }
182 182
183 dprintk(1, "Radio freq set to %d.%02d MHz\n", 183 dprintk(1, "Radio freq set to %d.%02d MHz\n",
@@ -410,7 +410,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d
410 mutex_init(&radio_unit.lock); 410 mutex_init(&radio_unit.lock);
411 maxiradio_radio.priv = &radio_unit; 411 maxiradio_radio.priv = &radio_unit;
412 412
413 if (video_register_device(&maxiradio_radio, VFL_TYPE_RADIO, radio_nr)==-1) { 413 if (video_register_device(&maxiradio_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
414 printk("radio-maxiradio: can't register device!"); 414 printk("radio-maxiradio: can't register device!");
415 goto err_out_free_region; 415 goto err_out_free_region;
416 } 416 }
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index e2dde0807268..6d820e2481e7 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -332,8 +332,7 @@ static int __init rtrack2_init(void)
332 rtrack2_radio.priv=&rtrack2_unit; 332 rtrack2_radio.priv=&rtrack2_unit;
333 333
334 spin_lock_init(&lock); 334 spin_lock_init(&lock);
335 if(video_register_device(&rtrack2_radio, VFL_TYPE_RADIO, radio_nr)==-1) 335 if (video_register_device(&rtrack2_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
336 {
337 release_region(io, 4); 336 release_region(io, 4);
338 return -EINVAL; 337 return -EINVAL;
339 } 338 }
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index bb5d92f104af..0d478f54a907 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -377,7 +377,7 @@ static int __init fmi_init(void)
377 377
378 mutex_init(&lock); 378 mutex_init(&lock);
379 379
380 if (video_register_device(&fmi_radio, VFL_TYPE_RADIO, radio_nr) == -1) { 380 if (video_register_device(&fmi_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
381 release_region(io, 2); 381 release_region(io, 2);
382 return -EINVAL; 382 return -EINVAL;
383 } 383 }
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index a4984ff87c9c..16c7ef20265c 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -1694,8 +1694,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
1694 INIT_DELAYED_WORK(&radio->work, si470x_work); 1694 INIT_DELAYED_WORK(&radio->work, si470x_work);
1695 1695
1696 /* register video device */ 1696 /* register video device */
1697 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) { 1697 retval = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr);
1698 retval = -EIO; 1698 if (retval) {
1699 printk(KERN_WARNING DRIVER_NAME 1699 printk(KERN_WARNING DRIVER_NAME
1700 ": Could not register video device\n"); 1700 ": Could not register video device\n");
1701 goto err_all; 1701 goto err_all;
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index cefa44fc5aed..0876fecc5f27 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -405,8 +405,7 @@ static int __init terratec_init(void)
405 405
406 spin_lock_init(&lock); 406 spin_lock_init(&lock);
407 407
408 if(video_register_device(&terratec_radio, VFL_TYPE_RADIO, radio_nr)==-1) 408 if (video_register_device(&terratec_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
409 {
410 release_region(io,2); 409 release_region(io,2);
411 return -EINVAL; 410 return -EINVAL;
412 } 411 }
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index d70172d23edb..193161956253 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -378,8 +378,7 @@ static int __init trust_init(void)
378 printk(KERN_ERR "trust: port 0x%x already in use\n", io); 378 printk(KERN_ERR "trust: port 0x%x already in use\n", io);
379 return -EBUSY; 379 return -EBUSY;
380 } 380 }
381 if(video_register_device(&trust_radio, VFL_TYPE_RADIO, radio_nr)==-1) 381 if (video_register_device(&trust_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
382 {
383 release_region(io, 2); 382 release_region(io, 2);
384 return -EINVAL; 383 return -EINVAL;
385 } 384 }
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 9f17a332fa11..51d57ed3b3e1 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -446,8 +446,7 @@ static int __init zoltrix_init(void)
446 return -EBUSY; 446 return -EBUSY;
447 } 447 }
448 448
449 if (video_register_device(&zoltrix_radio, VFL_TYPE_RADIO, radio_nr) == -1) 449 if (video_register_device(&zoltrix_radio, VFL_TYPE_RADIO, radio_nr) < 0) {
450 {
451 release_region(io, 2); 450 release_region(io, 2);
452 return -EINVAL; 451 return -EINVAL;
453 } 452 }
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index bbc6f8b82297..ef7c8d3ffb18 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -20,6 +20,8 @@ ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
20 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o 20 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o
21endif 21endif
22 22
23obj-$(CONFIG_VIDEO_TUNER) += tuner.o
24
23obj-$(CONFIG_VIDEO_BT848) += bt8xx/ 25obj-$(CONFIG_VIDEO_BT848) += bt8xx/
24obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o 26obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
25obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o 27obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
@@ -85,8 +87,6 @@ obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
85obj-$(CONFIG_VIDEO_DPC) += dpc7146.o 87obj-$(CONFIG_VIDEO_DPC) += dpc7146.o
86obj-$(CONFIG_TUNER_3036) += tuner-3036.o 88obj-$(CONFIG_TUNER_3036) += tuner-3036.o
87 89
88obj-$(CONFIG_VIDEO_TUNER) += tuner.o
89
90obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o 90obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
91obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o 91obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
92obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o 92obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
diff --git a/drivers/media/video/au0828/Kconfig b/drivers/media/video/au0828/Kconfig
index ed9a50f189fc..018f72b8e3e2 100644
--- a/drivers/media/video/au0828/Kconfig
+++ b/drivers/media/video/au0828/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_AU0828
7 select DVB_AU8522 if !DVB_FE_CUSTOMIZE 7 select DVB_AU8522 if !DVB_FE_CUSTOMIZE
8 select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE 8 select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
9 select MEDIA_TUNER_MXL5007T if !DVB_FE_CUSTOMIZE 9 select MEDIA_TUNER_MXL5007T if !DVB_FE_CUSTOMIZE
10 select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
10 ---help--- 11 ---help---
11 This is a video4linux driver for Auvitek's USB device. 12 This is a video4linux driver for Auvitek's USB device.
12 13
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 443e59009762..ed48908a9034 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek USB bridge 2 * Driver for the Auvitek USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -38,6 +38,9 @@ struct au0828_board au0828_boards[] = {
38 [AU0828_BOARD_DVICO_FUSIONHDTV7] = { 38 [AU0828_BOARD_DVICO_FUSIONHDTV7] = {
39 .name = "DViCO FusionHDTV USB", 39 .name = "DViCO FusionHDTV USB",
40 }, 40 },
41 [AU0828_BOARD_HAUPPAUGE_WOODBURY] = {
42 .name = "Hauppauge Woodbury",
43 },
41}; 44};
42 45
43/* Tuner callback function for au0828 boards. Currently only needed 46/* Tuner callback function for au0828 boards. Currently only needed
@@ -115,6 +118,7 @@ void au0828_card_setup(struct au0828_dev *dev)
115 case AU0828_BOARD_HAUPPAUGE_HVR850: 118 case AU0828_BOARD_HAUPPAUGE_HVR850:
116 case AU0828_BOARD_HAUPPAUGE_HVR950Q: 119 case AU0828_BOARD_HAUPPAUGE_HVR950Q:
117 case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: 120 case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL:
121 case AU0828_BOARD_HAUPPAUGE_WOODBURY:
118 if (dev->i2c_rc == 0) 122 if (dev->i2c_rc == 0)
119 hauppauge_eeprom(dev, eeprom+0xa0); 123 hauppauge_eeprom(dev, eeprom+0xa0);
120 break; 124 break;
@@ -134,6 +138,7 @@ void au0828_gpio_setup(struct au0828_dev *dev)
134 case AU0828_BOARD_HAUPPAUGE_HVR850: 138 case AU0828_BOARD_HAUPPAUGE_HVR850:
135 case AU0828_BOARD_HAUPPAUGE_HVR950Q: 139 case AU0828_BOARD_HAUPPAUGE_HVR950Q:
136 case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: 140 case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL:
141 case AU0828_BOARD_HAUPPAUGE_WOODBURY:
137 /* GPIO's 142 /* GPIO's
138 * 4 - CS5340 143 * 4 - CS5340
139 * 5 - AU8522 Demodulator 144 * 5 - AU8522 Demodulator
@@ -205,6 +210,8 @@ struct usb_device_id au0828_usb_id_table [] = {
205 .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL }, 210 .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
206 { USB_DEVICE(0x2040, 0x7281), 211 { USB_DEVICE(0x2040, 0x7281),
207 .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL }, 212 .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
213 { USB_DEVICE(0x2040, 0x8200),
214 .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
208 { }, 215 { },
209}; 216};
210 217
diff --git a/drivers/media/video/au0828/au0828-cards.h b/drivers/media/video/au0828/au0828-cards.h
index c37f5fd0fa80..48a1882c2b6b 100644
--- a/drivers/media/video/au0828/au0828-cards.h
+++ b/drivers/media/video/au0828/au0828-cards.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek USB bridge 2 * Driver for the Auvitek USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -24,3 +24,4 @@
24#define AU0828_BOARD_HAUPPAUGE_HVR850 2 24#define AU0828_BOARD_HAUPPAUGE_HVR850 2
25#define AU0828_BOARD_DVICO_FUSIONHDTV7 3 25#define AU0828_BOARD_DVICO_FUSIONHDTV7 3
26#define AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL 4 26#define AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL 4
27#define AU0828_BOARD_HAUPPAUGE_WOODBURY 5
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index 54bfc0f05295..d856de9f742f 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek USB bridge 2 * Driver for the Auvitek USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -252,5 +252,5 @@ module_init(au0828_init);
252module_exit(au0828_exit); 252module_exit(au0828_exit);
253 253
254MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products"); 254MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products");
255MODULE_AUTHOR("Steven Toth <stoth@hauppauge.com>"); 255MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
256MODULE_LICENSE("GPL"); 256MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/au0828/au0828-dvb.c b/drivers/media/video/au0828/au0828-dvb.c
index 584a83a94a2a..ba94be7e0ac1 100644
--- a/drivers/media/video/au0828/au0828-dvb.c
+++ b/drivers/media/video/au0828/au0828-dvb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek USB bridge 2 * Driver for the Auvitek USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@
29#include "au8522.h" 29#include "au8522.h"
30#include "xc5000.h" 30#include "xc5000.h"
31#include "mxl5007t.h" 31#include "mxl5007t.h"
32#include "tda18271.h"
32 33
33DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 34DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
34 35
@@ -38,6 +39,15 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
38static struct au8522_config hauppauge_hvr950q_config = { 39static struct au8522_config hauppauge_hvr950q_config = {
39 .demod_address = 0x8e >> 1, 40 .demod_address = 0x8e >> 1,
40 .status_mode = AU8522_DEMODLOCKING, 41 .status_mode = AU8522_DEMODLOCKING,
42 .qam_if = AU8522_IF_6MHZ,
43 .vsb_if = AU8522_IF_6MHZ,
44};
45
46static struct au8522_config hauppauge_woodbury_config = {
47 .demod_address = 0x8e >> 1,
48 .status_mode = AU8522_DEMODLOCKING,
49 .qam_if = AU8522_IF_4MHZ,
50 .vsb_if = AU8522_IF_3_25MHZ,
41}; 51};
42 52
43static struct xc5000_config hauppauge_hvr950q_tunerconfig = { 53static struct xc5000_config hauppauge_hvr950q_tunerconfig = {
@@ -51,6 +61,10 @@ static struct mxl5007t_config mxl5007t_hvr950q_config = {
51 .if_freq_hz = MxL_IF_6_MHZ, 61 .if_freq_hz = MxL_IF_6_MHZ,
52}; 62};
53 63
64static struct tda18271_config hauppauge_woodbury_tunerconfig = {
65 .gate = TDA18271_GATE_DIGITAL,
66};
67
54/*-------------------------------------------------------------------*/ 68/*-------------------------------------------------------------------*/
55static void urb_completion(struct urb *purb) 69static void urb_completion(struct urb *purb)
56{ 70{
@@ -357,6 +371,15 @@ int au0828_dvb_register(struct au0828_dev *dev)
357 &dev->i2c_adap, 0x60, 371 &dev->i2c_adap, 0x60,
358 &mxl5007t_hvr950q_config); 372 &mxl5007t_hvr950q_config);
359 break; 373 break;
374 case AU0828_BOARD_HAUPPAUGE_WOODBURY:
375 dvb->frontend = dvb_attach(au8522_attach,
376 &hauppauge_woodbury_config,
377 &dev->i2c_adap);
378 if (dvb->frontend != NULL)
379 dvb_attach(tda18271_attach, dvb->frontend,
380 0x60, &dev->i2c_adap,
381 &hauppauge_woodbury_tunerconfig);
382 break;
360 default: 383 default:
361 printk(KERN_WARNING "The frontend of your DVB/ATSC card " 384 printk(KERN_WARNING "The frontend of your DVB/ATSC card "
362 "isn't supported yet\n"); 385 "isn't supported yet\n");
diff --git a/drivers/media/video/au0828/au0828-i2c.c b/drivers/media/video/au0828/au0828-i2c.c
index 741a4937b050..d618fbaade1b 100644
--- a/drivers/media/video/au0828/au0828-i2c.c
+++ b/drivers/media/video/au0828/au0828-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek AU0828 USB bridge 2 * Driver for the Auvitek AU0828 USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/au0828/au0828-reg.h b/drivers/media/video/au0828/au0828-reg.h
index 39827550891c..1e87fa0c6842 100644
--- a/drivers/media/video/au0828/au0828-reg.h
+++ b/drivers/media/video/au0828/au0828-reg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek USB bridge 2 * Driver for the Auvitek USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index 7beb571798e5..4f10ff300135 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Auvitek AU0828 USB bridge 2 * Driver for the Auvitek AU0828 USB bridge
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 1c56ae92ce74..6081edc362df 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -3144,8 +3144,9 @@ static void identify_by_eeprom(struct bttv *btv, unsigned char eeprom_data[256])
3144 3144
3145static void flyvideo_gpio(struct bttv *btv) 3145static void flyvideo_gpio(struct bttv *btv)
3146{ 3146{
3147 int gpio,has_remote,has_radio,is_capture_only,is_lr90,has_tda9820_tda9821; 3147 int gpio, has_remote, has_radio, is_capture_only;
3148 int tuner=UNSET,ttype; 3148 int is_lr90, has_tda9820_tda9821;
3149 int tuner_type = UNSET, ttype;
3149 3150
3150 gpio_inout(0xffffff, 0); 3151 gpio_inout(0xffffff, 0);
3151 udelay(8); /* without this we would see the 0x1800 mask */ 3152 udelay(8); /* without this we would see the 0x1800 mask */
@@ -3163,20 +3164,26 @@ static void flyvideo_gpio(struct bttv *btv)
3163 * xxxF00(LR26/LR50), xxxFE0(LR90): Remote control chip (LVA001 or CF45) soldered 3164 * xxxF00(LR26/LR50), xxxFE0(LR90): Remote control chip (LVA001 or CF45) soldered
3164 * Note: Some bits are Audio_Mask ! 3165 * Note: Some bits are Audio_Mask !
3165 */ 3166 */
3166 ttype=(gpio&0x0f0000)>>16; 3167 ttype = (gpio & 0x0f0000) >> 16;
3167 switch(ttype) { 3168 switch (ttype) {
3168 case 0x0: tuner=2; /* NTSC, e.g. TPI8NSR11P */ 3169 case 0x0:
3170 tuner_type = 2; /* NTSC, e.g. TPI8NSR11P */
3169 break; 3171 break;
3170 case 0x2: tuner=39;/* LG NTSC (newer TAPC series) TAPC-H701P */ 3172 case 0x2:
3173 tuner_type = 39; /* LG NTSC (newer TAPC series) TAPC-H701P */
3171 break; 3174 break;
3172 case 0x4: tuner=5; /* Philips PAL TPI8PSB02P, TPI8PSB12P, TPI8PSB12D or FI1216, FM1216 */ 3175 case 0x4:
3176 tuner_type = 5; /* Philips PAL TPI8PSB02P, TPI8PSB12P, TPI8PSB12D or FI1216, FM1216 */
3173 break; 3177 break;
3174 case 0x6: tuner=37;/* LG PAL (newer TAPC series) TAPC-G702P */ 3178 case 0x6:
3179 tuner_type = 37; /* LG PAL (newer TAPC series) TAPC-G702P */
3175 break; 3180 break;
3176 case 0xC: tuner=3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */ 3181 case 0xC:
3182 tuner_type = 3; /* Philips SECAM(+PAL) FQ1216ME or FI1216MF */
3177 break; 3183 break;
3178 default: 3184 default:
3179 printk(KERN_INFO "bttv%d: FlyVideo_gpio: unknown tuner type.\n", btv->c.nr); 3185 printk(KERN_INFO "bttv%d: FlyVideo_gpio: unknown tuner type.\n", btv->c.nr);
3186 break;
3180 } 3187 }
3181 3188
3182 has_remote = gpio & 0x800000; 3189 has_remote = gpio & 0x800000;
@@ -3189,23 +3196,26 @@ static void flyvideo_gpio(struct bttv *btv)
3189 /* 3196 /*
3190 * gpio & 0x001000 output bit for audio routing */ 3197 * gpio & 0x001000 output bit for audio routing */
3191 3198
3192 if(is_capture_only) 3199 if (is_capture_only)
3193 tuner = TUNER_ABSENT; /* No tuner present */ 3200 tuner_type = TUNER_ABSENT; /* No tuner present */
3194 3201
3195 printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n", 3202 printk(KERN_INFO "bttv%d: FlyVideo Radio=%s RemoteControl=%s Tuner=%d gpio=0x%06x\n",
3196 btv->c.nr, has_radio? "yes":"no ", has_remote? "yes":"no ", tuner, gpio); 3203 btv->c.nr, has_radio ? "yes" : "no ",
3204 has_remote ? "yes" : "no ", tuner_type, gpio);
3197 printk(KERN_INFO "bttv%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n", 3205 printk(KERN_INFO "bttv%d: FlyVideo LR90=%s tda9821/tda9820=%s capture_only=%s\n",
3198 btv->c.nr, is_lr90?"yes":"no ", has_tda9820_tda9821?"yes":"no ", 3206 btv->c.nr, is_lr90 ? "yes" : "no ",
3199 is_capture_only?"yes":"no "); 3207 has_tda9820_tda9821 ? "yes" : "no ",
3208 is_capture_only ? "yes" : "no ");
3200 3209
3201 if (tuner != UNSET) /* only set if known tuner autodetected, else let insmod option through */ 3210 if (tuner_type != UNSET) /* only set if known tuner autodetected, else let insmod option through */
3202 btv->tuner_type = tuner; 3211 btv->tuner_type = tuner_type;
3203 btv->has_radio = has_radio; 3212 btv->has_radio = has_radio;
3204 3213
3205 /* LR90 Audio Routing is done by 2 hef4052, so Audio_Mask has 4 bits: 0x001c80 3214 /* LR90 Audio Routing is done by 2 hef4052, so Audio_Mask has 4 bits: 0x001c80
3206 * LR26/LR50 only has 1 hef4052, Audio_Mask 0x000c00 3215 * LR26/LR50 only has 1 hef4052, Audio_Mask 0x000c00
3207 * Audio options: from tuner, from tda9821/tda9821(mono,stereo,sap), from tda9874, ext., mute */ 3216 * Audio options: from tuner, from tda9821/tda9821(mono,stereo,sap), from tda9874, ext., mute */
3208 if(has_tda9820_tda9821) btv->audio_mode_gpio = lt9415_audio; 3217 if (has_tda9820_tda9821)
3218 btv->audio_mode_gpio = lt9415_audio;
3209 /* todo: if(has_tda9874) btv->audio_mode_gpio = fv2000s_audio; */ 3219 /* todo: if(has_tda9874) btv->audio_mode_gpio = fv2000s_audio; */
3210} 3220}
3211 3221
@@ -3962,7 +3972,7 @@ static int tuner_1_table[] = {
3962 3972
3963static void __devinit avermedia_eeprom(struct bttv *btv) 3973static void __devinit avermedia_eeprom(struct bttv *btv)
3964{ 3974{
3965 int tuner_make,tuner_tv_fm,tuner_format,tuner=0; 3975 int tuner_make, tuner_tv_fm, tuner_format, tuner_type = 0;
3966 3976
3967 tuner_make = (eeprom_data[0x41] & 0x7); 3977 tuner_make = (eeprom_data[0x41] & 0x7);
3968 tuner_tv_fm = (eeprom_data[0x41] & 0x18) >> 3; 3978 tuner_tv_fm = (eeprom_data[0x41] & 0x18) >> 3;
@@ -3970,24 +3980,24 @@ static void __devinit avermedia_eeprom(struct bttv *btv)
3970 btv->has_remote = (eeprom_data[0x42] & 0x01); 3980 btv->has_remote = (eeprom_data[0x42] & 0x01);
3971 3981
3972 if (tuner_make == 0 || tuner_make == 2) 3982 if (tuner_make == 0 || tuner_make == 2)
3973 if(tuner_format <=0x0a) 3983 if (tuner_format <= 0x0a)
3974 tuner = tuner_0_table[tuner_format]; 3984 tuner_type = tuner_0_table[tuner_format];
3975 if (tuner_make == 1) 3985 if (tuner_make == 1)
3976 if(tuner_format <=9) 3986 if (tuner_format <= 9)
3977 tuner = tuner_1_table[tuner_format]; 3987 tuner_type = tuner_1_table[tuner_format];
3978 3988
3979 if (tuner_make == 4) 3989 if (tuner_make == 4)
3980 if(tuner_format == 0x09) 3990 if (tuner_format == 0x09)
3981 tuner = TUNER_LG_NTSC_NEW_TAPC; /* TAPC-G702P */ 3991 tuner_type = TUNER_LG_NTSC_NEW_TAPC; /* TAPC-G702P */
3982 3992
3983 printk(KERN_INFO "bttv%d: Avermedia eeprom[0x%02x%02x]: tuner=", 3993 printk(KERN_INFO "bttv%d: Avermedia eeprom[0x%02x%02x]: tuner=",
3984 btv->c.nr,eeprom_data[0x41],eeprom_data[0x42]); 3994 btv->c.nr, eeprom_data[0x41], eeprom_data[0x42]);
3985 if(tuner) { 3995 if (tuner_type) {
3986 btv->tuner_type=tuner; 3996 btv->tuner_type = tuner_type;
3987 printk("%d",tuner); 3997 printk(KERN_CONT "%d", tuner_type);
3988 } else 3998 } else
3989 printk("Unknown type"); 3999 printk(KERN_CONT "Unknown type");
3990 printk(" radio:%s remote control:%s\n", 4000 printk(KERN_CONT " radio:%s remote control:%s\n",
3991 tuner_tv_fm ? "yes" : "no", 4001 tuner_tv_fm ? "yes" : "no",
3992 btv->has_remote ? "yes" : "no"); 4002 btv->has_remote ? "yes" : "no");
3993} 4003}
@@ -4029,7 +4039,8 @@ static void __devinit boot_msp34xx(struct bttv *btv, int pin)
4029 4039
4030 gpio_inout(mask,mask); 4040 gpio_inout(mask,mask);
4031 gpio_bits(mask,0); 4041 gpio_bits(mask,0);
4032 udelay(2500); 4042 mdelay(2);
4043 udelay(500);
4033 gpio_bits(mask,mask); 4044 gpio_bits(mask,mask);
4034 4045
4035 if (bttv_gpio) 4046 if (bttv_gpio)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 85bf31ab8789..6ae4cc860efe 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -96,7 +96,6 @@ static unsigned int irq_iswitch;
96static unsigned int uv_ratio = 50; 96static unsigned int uv_ratio = 50;
97static unsigned int full_luma_range; 97static unsigned int full_luma_range;
98static unsigned int coring; 98static unsigned int coring;
99extern int no_overlay;
100 99
101/* API features (turn on/off stuff for testing) */ 100/* API features (turn on/off stuff for testing) */
102static unsigned int v4l2 = 1; 101static unsigned int v4l2 = 1;
diff --git a/drivers/media/video/bt8xx/bttv-risc.c b/drivers/media/video/bt8xx/bttv-risc.c
index 649682aac1ac..5b1b8e4c78ba 100644
--- a/drivers/media/video/bt8xx/bttv-risc.c
+++ b/drivers/media/video/bt8xx/bttv-risc.c
@@ -244,7 +244,8 @@ bttv_risc_overlay(struct bttv *btv, struct btcx_riscmem *risc,
244 const struct bttv_format *fmt, struct bttv_overlay *ov, 244 const struct bttv_format *fmt, struct bttv_overlay *ov,
245 int skip_even, int skip_odd) 245 int skip_even, int skip_odd)
246{ 246{
247 int dwords,rc,line,maxy,start,end,skip,nskips; 247 int dwords, rc, line, maxy, start, end;
248 unsigned skip, nskips;
248 struct btcx_skiplist *skips; 249 struct btcx_skiplist *skips;
249 __le32 *rp; 250 __le32 *rp;
250 u32 ri,ra; 251 u32 ri,ra;
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 08ef54a22c9e..b4d940b2e447 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -267,6 +267,11 @@ int bttv_sub_add_device(struct bttv_core *core, char *name);
267int bttv_sub_del_devices(struct bttv_core *core); 267int bttv_sub_del_devices(struct bttv_core *core);
268 268
269/* ---------------------------------------------------------- */ 269/* ---------------------------------------------------------- */
270/* bttv-cards.c */
271
272extern int no_overlay;
273
274/* ---------------------------------------------------------- */
270/* bttv-driver.c */ 275/* bttv-driver.c */
271 276
272/* insmod options */ 277/* insmod options */
diff --git a/drivers/media/video/btcx-risc.c b/drivers/media/video/btcx-risc.c
index f42701f82e7f..3324ab38f58c 100644
--- a/drivers/media/video/btcx-risc.c
+++ b/drivers/media/video/btcx-risc.c
@@ -184,12 +184,12 @@ btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips)
184} 184}
185 185
186void 186void
187btcx_calc_skips(int line, int width, unsigned int *maxy, 187btcx_calc_skips(int line, int width, int *maxy,
188 struct btcx_skiplist *skips, unsigned int *nskips, 188 struct btcx_skiplist *skips, unsigned int *nskips,
189 const struct v4l2_clip *clips, unsigned int nclips) 189 const struct v4l2_clip *clips, unsigned int nclips)
190{ 190{
191 unsigned int clip,skip; 191 unsigned int clip,skip;
192 int end,maxline; 192 int end, maxline;
193 193
194 skip=0; 194 skip=0;
195 maxline = 9999; 195 maxline = 9999;
diff --git a/drivers/media/video/btcx-risc.h b/drivers/media/video/btcx-risc.h
index 861bc8112824..f8bc6e8e7b51 100644
--- a/drivers/media/video/btcx-risc.h
+++ b/drivers/media/video/btcx-risc.h
@@ -23,7 +23,7 @@ int btcx_screen_clips(int swidth, int sheight, struct v4l2_rect *win,
23int btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, 23int btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips,
24 unsigned int n, int mask); 24 unsigned int n, int mask);
25void btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips); 25void btcx_sort_clips(struct v4l2_clip *clips, unsigned int nclips);
26void btcx_calc_skips(int line, int width, unsigned int *maxy, 26void btcx_calc_skips(int line, int width, int *maxy,
27 struct btcx_skiplist *skips, unsigned int *nskips, 27 struct btcx_skiplist *skips, unsigned int *nskips,
28 const struct v4l2_clip *clips, unsigned int nclips); 28 const struct v4l2_clip *clips, unsigned int nclips);
29 29
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index d3b3268bace8..6e39e253ce53 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -946,8 +946,7 @@ static int init_bwqcam(struct parport *port)
946 946
947 printk(KERN_INFO "Connectix Quickcam on %s\n", qcam->pport->name); 947 printk(KERN_INFO "Connectix Quickcam on %s\n", qcam->pport->name);
948 948
949 if(video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr)==-1) 949 if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
950 {
951 parport_unregister_device(qcam->pdev); 950 parport_unregister_device(qcam->pdev);
952 kfree(qcam); 951 kfree(qcam);
953 return -ENODEV; 952 return -ENODEV;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index fe9379b282d3..7f6c6b4bec10 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -787,8 +787,7 @@ static int init_cqcam(struct parport *port)
787 787
788 parport_release(qcam->pdev); 788 parport_release(qcam->pdev);
789 789
790 if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr)==-1) 790 if (video_register_device(&qcam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
791 {
792 printk(KERN_ERR "Unable to register Colour QuickCam on %s\n", 791 printk(KERN_ERR "Unable to register Colour QuickCam on %s\n",
793 qcam->pport->name); 792 qcam->pport->name);
794 parport_unregister_device(qcam->pdev); 793 parport_unregister_device(qcam->pdev);
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index dc8cc6115e2f..a661800b0e69 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -3955,7 +3955,7 @@ struct cam_data *cpia_register_camera(struct cpia_camera_ops *ops, void *lowleve
3955 camera->lowlevel_data = lowlevel; 3955 camera->lowlevel_data = lowlevel;
3956 3956
3957 /* register v4l device */ 3957 /* register v4l device */
3958 if (video_register_device(&camera->vdev, VFL_TYPE_GRABBER, video_nr) == -1) { 3958 if (video_register_device(&camera->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
3959 kfree(camera); 3959 kfree(camera);
3960 printk(KERN_DEBUG "video_register_device failed\n"); 3960 printk(KERN_DEBUG "video_register_device failed\n");
3961 return NULL; 3961 return NULL;
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 515c8b57a60d..eb9f15cd4c45 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1024,7 +1024,6 @@ static int ioctl_queryctrl(void *arg,struct camera_data *cam)
1024 if(cam->params.pnp_id.device_type == DEVICE_STV_672 && 1024 if(cam->params.pnp_id.device_type == DEVICE_STV_672 &&
1025 cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){ 1025 cam->params.version.sensor_flags==CPIA2_VP_SENSOR_FLAGS_500){
1026 // Maximum 15fps 1026 // Maximum 15fps
1027 int i;
1028 for(i=0; i<c->maximum; ++i) { 1027 for(i=0; i<c->maximum; ++i) {
1029 if(framerate_controls[i].value == 1028 if(framerate_controls[i].value ==
1030 CPIA2_VP_FRAMERATE_15) { 1029 CPIA2_VP_FRAMERATE_15) {
@@ -1959,8 +1958,7 @@ int cpia2_register_camera(struct camera_data *cam)
1959 reset_camera_struct_v4l(cam); 1958 reset_camera_struct_v4l(cam);
1960 1959
1961 /* register v4l device */ 1960 /* register v4l device */
1962 if (video_register_device 1961 if (video_register_device(cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
1963 (cam->vdev, VFL_TYPE_GRABBER, video_nr) == -1) {
1964 ERR("video_register_device failed\n"); 1962 ERR("video_register_device failed\n");
1965 video_device_release(cam->vdev); 1963 video_device_release(cam->vdev);
1966 return -ENODEV; 1964 return -ENODEV;
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
index 834b9248242e..e996a4e3123a 100644
--- a/drivers/media/video/cx18/cx18-av-firmware.c
+++ b/drivers/media/video/cx18/cx18-av-firmware.c
@@ -32,7 +32,7 @@ int cx18_av_loadfw(struct cx18 *cx)
32 u32 v; 32 u32 v;
33 const u8 *ptr; 33 const u8 *ptr;
34 int i; 34 int i;
35 int retries = 0; 35 int retries1 = 0;
36 36
37 if (request_firmware(&fw, FWFILE, &cx->dev->dev) != 0) { 37 if (request_firmware(&fw, FWFILE, &cx->dev->dev) != 0) {
38 CX18_ERR("unable to open firmware %s\n", FWFILE); 38 CX18_ERR("unable to open firmware %s\n", FWFILE);
@@ -41,7 +41,7 @@ int cx18_av_loadfw(struct cx18 *cx)
41 41
42 /* The firmware load often has byte errors, so allow for several 42 /* The firmware load often has byte errors, so allow for several
43 retries, both at byte level and at the firmware load level. */ 43 retries, both at byte level and at the firmware load level. */
44 while (retries < 5) { 44 while (retries1 < 5) {
45 cx18_av_write4(cx, CXADEC_CHIP_CTRL, 0x00010000); 45 cx18_av_write4(cx, CXADEC_CHIP_CTRL, 0x00010000);
46 cx18_av_write(cx, CXADEC_STD_DET_CTL, 0xf6); 46 cx18_av_write(cx, CXADEC_STD_DET_CTL, 0xf6);
47 47
@@ -57,9 +57,9 @@ int cx18_av_loadfw(struct cx18 *cx)
57 for (i = 0; i < size; i++) { 57 for (i = 0; i < size; i++) {
58 u32 dl_control = 0x0F000000 | i | ((u32)ptr[i] << 16); 58 u32 dl_control = 0x0F000000 | i | ((u32)ptr[i] << 16);
59 u32 value = 0; 59 u32 value = 0;
60 int retries; 60 int retries2;
61 61
62 for (retries = 0; retries < 5; retries++) { 62 for (retries2 = 0; retries2 < 5; retries2++) {
63 cx18_av_write4(cx, CXADEC_DL_CTL, dl_control); 63 cx18_av_write4(cx, CXADEC_DL_CTL, dl_control);
64 udelay(10); 64 udelay(10);
65 value = cx18_av_read4(cx, CXADEC_DL_CTL); 65 value = cx18_av_read4(cx, CXADEC_DL_CTL);
@@ -69,18 +69,18 @@ int cx18_av_loadfw(struct cx18 *cx)
69 the address. We can only write the lower 69 the address. We can only write the lower
70 address byte of the address. */ 70 address byte of the address. */
71 if ((value & 0x3F00) != (dl_control & 0x3F00)) { 71 if ((value & 0x3F00) != (dl_control & 0x3F00)) {
72 retries = 5; 72 retries2 = 5;
73 break; 73 break;
74 } 74 }
75 } 75 }
76 if (retries >= 5) 76 if (retries2 >= 5)
77 break; 77 break;
78 } 78 }
79 if (i == size) 79 if (i == size)
80 break; 80 break;
81 retries++; 81 retries1++;
82 } 82 }
83 if (retries >= 5) { 83 if (retries1 >= 5) {
84 CX18_ERR("unable to load firmware %s\n", FWFILE); 84 CX18_ERR("unable to load firmware %s\n", FWFILE);
85 release_firmware(fw); 85 release_firmware(fw);
86 return -EIO; 86 return -EIO;
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 22434aadde31..bd18afebbf86 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -74,9 +74,9 @@ static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, 74 -1, -1, -1, -1, -1, -1, -1, -1,
75 -1, -1, -1, -1, -1, -1, -1, -1 }; 75 -1, -1, -1, -1, -1, -1, -1, -1 };
76 76
77static int cardtype_c = 1; 77static unsigned cardtype_c = 1;
78static int tuner_c = 1; 78static unsigned tuner_c = 1;
79static int radio_c = 1; 79static unsigned radio_c = 1;
80static char pal[] = "--"; 80static char pal[] = "--";
81static char secam[] = "--"; 81static char secam[] = "--";
82static char ntsc[] = "-"; 82static char ntsc[] = "-";
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index cae38985b131..1e420a804fc9 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cx18 functions for DVB support 2 * cx18 functions for DVB support
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-dvb.h b/drivers/media/video/cx18/cx18-dvb.h
index d6a6ccda79a9..bf8d8f6f5455 100644
--- a/drivers/media/video/cx18/cx18-dvb.h
+++ b/drivers/media/video/cx18/cx18-dvb.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * cx18 functions for DVB support 2 * cx18 functions for DVB support
3 * 3 *
4 * Copyright (c) 2008 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx18/cx18-irq.c b/drivers/media/video/cx18/cx18-irq.c
index 25114a5cbd57..ab218315c84b 100644
--- a/drivers/media/video/cx18/cx18-irq.c
+++ b/drivers/media/video/cx18/cx18-irq.c
@@ -61,7 +61,7 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
61 CX18_WARN("Ack struct = %d for %s\n", 61 CX18_WARN("Ack struct = %d for %s\n",
62 mb->args[2], s->name); 62 mb->args[2], s->name);
63 id = read_enc(off); 63 id = read_enc(off);
64 buf = cx18_queue_find_buf(s, id, read_enc(off + 4)); 64 buf = cx18_queue_get_buf_irq(s, id, read_enc(off + 4));
65 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id); 65 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
66 if (buf) { 66 if (buf) {
67 cx18_buf_sync_for_cpu(s, buf); 67 cx18_buf_sync_for_cpu(s, buf);
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 6990b77c6200..dbe792ac3001 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -78,12 +78,13 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
78 return buf; 78 return buf;
79} 79}
80 80
81struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id, 81struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
82 u32 bytesused) 82 u32 bytesused)
83{ 83{
84 struct cx18 *cx = s->cx; 84 struct cx18 *cx = s->cx;
85 struct list_head *p; 85 struct list_head *p;
86 86
87 spin_lock(&s->qlock);
87 list_for_each(p, &s->q_free.list) { 88 list_for_each(p, &s->q_free.list) {
88 struct cx18_buffer *buf = 89 struct cx18_buffer *buf =
89 list_entry(p, struct cx18_buffer, list); 90 list_entry(p, struct cx18_buffer, list);
@@ -92,114 +93,48 @@ struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
92 continue; 93 continue;
93 buf->bytesused = bytesused; 94 buf->bytesused = bytesused;
94 /* the transport buffers are handled differently, 95 /* the transport buffers are handled differently,
95 so there is no need to move them to the full queue */ 96 they are not moved to the full queue */
96 if (s->type == CX18_ENC_STREAM_TYPE_TS) 97 if (s->type != CX18_ENC_STREAM_TYPE_TS) {
97 return buf; 98 s->q_free.buffers--;
98 s->q_free.buffers--; 99 s->q_free.length -= s->buf_size;
99 s->q_free.length -= s->buf_size; 100 s->q_full.buffers++;
100 s->q_full.buffers++; 101 s->q_full.length += s->buf_size;
101 s->q_full.length += s->buf_size; 102 s->q_full.bytesused += buf->bytesused;
102 s->q_full.bytesused += buf->bytesused; 103 list_move_tail(&buf->list, &s->q_full.list);
103 list_move_tail(&buf->list, &s->q_full.list); 104 }
105 spin_unlock(&s->qlock);
104 return buf; 106 return buf;
105 } 107 }
108 spin_unlock(&s->qlock);
106 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name); 109 CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
107 return NULL; 110 return NULL;
108} 111}
109 112
110static void cx18_queue_move_buf(struct cx18_stream *s, struct cx18_queue *from, 113/* Move all buffers of a queue to q_free, while flushing the buffers */
111 struct cx18_queue *to, int clear, int full) 114static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
112{
113 struct cx18_buffer *buf =
114 list_entry(from->list.next, struct cx18_buffer, list);
115
116 list_move_tail(from->list.next, &to->list);
117 from->buffers--;
118 from->length -= s->buf_size;
119 from->bytesused -= buf->bytesused - buf->readpos;
120 /* special handling for q_free */
121 if (clear)
122 buf->bytesused = buf->readpos = buf->b_flags = 0;
123 else if (full) {
124 /* special handling for stolen buffers, assume
125 all bytes are used. */
126 buf->bytesused = s->buf_size;
127 buf->readpos = buf->b_flags = 0;
128 }
129 to->buffers++;
130 to->length += s->buf_size;
131 to->bytesused += buf->bytesused - buf->readpos;
132}
133
134/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
135 If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
136 If 'steal' != NULL, then buffers may also taken from that queue if
137 needed.
138
139 The buffer is automatically cleared if it goes to the free queue. It is
140 also cleared if buffers need to be taken from the 'steal' queue and
141 the 'from' queue is the free queue.
142
143 When 'from' is q_free, then needed_bytes is compared to the total
144 available buffer length, otherwise needed_bytes is compared to the
145 bytesused value. For the 'steal' queue the total available buffer
146 length is always used.
147
148 -ENOMEM is returned if the buffers could not be obtained, 0 if all
149 buffers where obtained from the 'from' list and if non-zero then
150 the number of stolen buffers is returned. */
151static int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
152 struct cx18_queue *steal, struct cx18_queue *to,
153 int needed_bytes)
154{ 115{
155 unsigned long flags; 116 unsigned long flags;
156 int rc = 0; 117 struct cx18_buffer *buf;
157 int from_free = from == &s->q_free;
158 int to_free = to == &s->q_free;
159 int bytes_available;
160
161 spin_lock_irqsave(&s->qlock, flags);
162 if (needed_bytes == 0) {
163 from_free = 1;
164 needed_bytes = from->length;
165 }
166
167 bytes_available = from_free ? from->length : from->bytesused;
168 bytes_available += steal ? steal->length : 0;
169 118
170 if (bytes_available < needed_bytes) { 119 if (q == &s->q_free)
171 spin_unlock_irqrestore(&s->qlock, flags); 120 return;
172 return -ENOMEM;
173 }
174 if (from_free) {
175 u32 old_length = to->length;
176 121
177 while (to->length - old_length < needed_bytes) { 122 spin_lock_irqsave(&s->qlock, flags);
178 if (list_empty(&from->list)) 123 while (!list_empty(&q->list)) {
179 from = steal; 124 buf = list_entry(q->list.next, struct cx18_buffer, list);
180 if (from == steal) 125 list_move_tail(q->list.next, &s->q_free.list);
181 rc++; /* keep track of 'stolen' buffers */ 126 buf->bytesused = buf->readpos = buf->b_flags = 0;
182 cx18_queue_move_buf(s, from, to, 1, 0); 127 s->q_free.buffers++;
183 } 128 s->q_free.length += s->buf_size;
184 } else {
185 u32 old_bytesused = to->bytesused;
186
187 while (to->bytesused - old_bytesused < needed_bytes) {
188 if (list_empty(&from->list))
189 from = steal;
190 if (from == steal)
191 rc++; /* keep track of 'stolen' buffers */
192 cx18_queue_move_buf(s, from, to, to_free, rc);
193 }
194 } 129 }
130 cx18_queue_init(q);
195 spin_unlock_irqrestore(&s->qlock, flags); 131 spin_unlock_irqrestore(&s->qlock, flags);
196 return rc;
197} 132}
198 133
199void cx18_flush_queues(struct cx18_stream *s) 134void cx18_flush_queues(struct cx18_stream *s)
200{ 135{
201 cx18_queue_move(s, &s->q_io, NULL, &s->q_free, 0); 136 cx18_queue_flush(s, &s->q_io);
202 cx18_queue_move(s, &s->q_full, NULL, &s->q_free, 0); 137 cx18_queue_flush(s, &s->q_full);
203} 138}
204 139
205int cx18_stream_alloc(struct cx18_stream *s) 140int cx18_stream_alloc(struct cx18_stream *s)
@@ -214,10 +149,10 @@ int cx18_stream_alloc(struct cx18_stream *s)
214 s->name, s->buffers, s->buf_size, 149 s->name, s->buffers, s->buf_size,
215 s->buffers * s->buf_size / 1024); 150 s->buffers * s->buf_size / 1024);
216 151
217 if (((char *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] - 152 if (((char __iomem *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
218 (char *)cx->scb) > SCB_RESERVED_SIZE) { 153 (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
219 unsigned bufsz = (((char *)cx->scb) + SCB_RESERVED_SIZE - 154 unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
220 ((char *)cx->scb->cpu_mdl)); 155 ((char __iomem *)cx->scb->cpu_mdl));
221 156
222 CX18_ERR("Too many buffers, cannot fit in SCB area\n"); 157 CX18_ERR("Too many buffers, cannot fit in SCB area\n");
223 CX18_ERR("Max buffers = %zd\n", 158 CX18_ERR("Max buffers = %zd\n",
diff --git a/drivers/media/video/cx18/cx18-queue.h b/drivers/media/video/cx18/cx18-queue.h
index 91423b9863a4..7f93bb13c09f 100644
--- a/drivers/media/video/cx18/cx18-queue.h
+++ b/drivers/media/video/cx18/cx18-queue.h
@@ -46,7 +46,7 @@ void cx18_queue_init(struct cx18_queue *q);
46void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf, 46void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
47 struct cx18_queue *q); 47 struct cx18_queue *q);
48struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q); 48struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
49struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id, 49struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
50 u32 bytesused); 50 u32 bytesused);
51void cx18_flush_queues(struct cx18_stream *s); 51void cx18_flush_queues(struct cx18_stream *s);
52 52
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 8118091568fc..7b0e8c01692e 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * (c) 2004 Jelle Foks <jelle@foks.8m.com> 5 * (c) 2004 Jelle Foks <jelle@foks.8m.com>
6 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> 6 * (c) 2004 Gerd Knorr <kraxel@bytesex.org>
7 * (c) 2008 Steven Toth <stoth@hauppauge.com> 7 * (c) 2008 Steven Toth <stoth@linuxtv.org>
8 * - CX23885/7/8 support 8 * - CX23885/7/8 support
9 * 9 *
10 * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/), 10 * Includes parts from the ivtv driver( http://ivtv.sourceforge.net/),
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index a19de850955d..c36d3f632104 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 6286a9cf957e..25fb09938744 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@
33#include "cx23885.h" 33#include "cx23885.h"
34 34
35MODULE_DESCRIPTION("Driver for cx23885 based TV cards"); 35MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
36MODULE_AUTHOR("Steven Toth <stoth@hauppauge.com>"); 36MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static unsigned int debug; 39static unsigned int debug;
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 0a2e6558cd66..291b9d008da8 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index c6bb0a05bc1c..f98e476e9617 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx23885/cx23885-reg.h b/drivers/media/video/cx23885/cx23885-reg.h
index bdd11bc513ad..20b68a236260 100644
--- a/drivers/media/video/cx23885/cx23885-reg.h
+++ b/drivers/media/video/cx23885/cx23885-reg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx23885/cx23885-vbi.c b/drivers/media/video/cx23885/cx23885-vbi.c
index e36e3fcae2fb..35e61cd112fc 100644
--- a/drivers/media/video/cx23885/cx23885-vbi.c
+++ b/drivers/media/video/cx23885/cx23885-vbi.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2007 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ad2235dab5b1..6047c78d84bf 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2007 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards"); 43MODULE_DESCRIPTION("v4l2 driver module for cx23885 based TV cards");
44MODULE_AUTHOR("Steven Toth <stoth@hauppauge.com>"); 44MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46 46
47/* ------------------------------------------------------------------ */ 47/* ------------------------------------------------------------------ */
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 00dfdc89d641..e23d97c071e0 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for the Conexant CX23885 PCIe bridge 2 * Driver for the Conexant CX23885 PCIe bridge
3 * 3 *
4 * Copyright (c) 2006 Steven Toth <stoth@hauppauge.com> 4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 209d3bcb5dbb..4da8cd74f00e 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -13,7 +13,7 @@
13 * NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca> 13 * NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca>
14 * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>. 14 * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>.
15 * 15 *
16 * CX23885 support by Steven Toth <stoth@hauppauge.com>. 16 * CX23885 support by Steven Toth <stoth@linuxtv.org>.
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 48f4b92a8f8b..79faedf58521 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -403,6 +403,7 @@ static int dabusb_fpga_download (pdabusb_t s, const char *fname)
403 ret = request_firmware(&fw, "dabusb/bitstream.bin", &s->usbdev->dev); 403 ret = request_firmware(&fw, "dabusb/bitstream.bin", &s->usbdev->dev);
404 if (ret) { 404 if (ret) {
405 err("Failed to load \"dabusb/bitstream.bin\": %d\n", ret); 405 err("Failed to load \"dabusb/bitstream.bin\": %d\n", ret);
406 kfree(b);
406 return ret; 407 return ret;
407 } 408 }
408 409
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 2d170d101c21..8db2a05bf9c5 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -2588,6 +2588,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2588 cam->v4ldev->fops = &et61x251_fops; 2588 cam->v4ldev->fops = &et61x251_fops;
2589 cam->v4ldev->minor = video_nr[dev_nr]; 2589 cam->v4ldev->minor = video_nr[dev_nr];
2590 cam->v4ldev->release = video_device_release; 2590 cam->v4ldev->release = video_device_release;
2591 cam->v4ldev->parent = &udev->dev;
2591 video_set_drvdata(cam->v4ldev, cam); 2592 video_set_drvdata(cam->v4ldev, cam);
2592 2593
2593 init_completion(&cam->probe); 2594 init_completion(&cam->probe);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index cd3a3f5829b2..4d9f4cc255a9 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -124,7 +124,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
124 struct usb_device *dev = gspca_dev->dev; 124 struct usb_device *dev = gspca_dev->dev;
125 125
126#ifdef GSPCA_DEBUG 126#ifdef GSPCA_DEBUG
127 if (len > sizeof gspca_dev->usb_buf) { 127 if (len > USB_BUF_SZ) {
128 err("reg_r: buffer overflow"); 128 err("reg_r: buffer overflow");
129 return; 129 return;
130 } 130 }
@@ -164,7 +164,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
164 struct usb_device *dev = gspca_dev->dev; 164 struct usb_device *dev = gspca_dev->dev;
165 165
166#ifdef GSPCA_DEBUG 166#ifdef GSPCA_DEBUG
167 if (len > sizeof gspca_dev->usb_buf) { 167 if (len > USB_BUF_SZ) {
168 err("reg_w: buffer overflow"); 168 err("reg_w: buffer overflow");
169 return; 169 return;
170 } 170 }
@@ -731,13 +731,13 @@ static void cx11646_jpeg(struct gspca_dev*gspca_dev)
731 reg_w_val(gspca_dev, 0x0000, 0x00); 731 reg_w_val(gspca_dev, 0x0000, 0x00);
732 /* wait for completion */ 732 /* wait for completion */
733 retry = 50; 733 retry = 50;
734 while (retry--) { 734 do {
735 reg_r(gspca_dev, 0x0002, 1); 735 reg_r(gspca_dev, 0x0002, 1);
736 /* 0x07 until 0x00 */ 736 /* 0x07 until 0x00 */
737 if (gspca_dev->usb_buf[0] == 0x00) 737 if (gspca_dev->usb_buf[0] == 0x00)
738 break; 738 break;
739 reg_w_val(gspca_dev, 0x0053, 0x00); 739 reg_w_val(gspca_dev, 0x0053, 0x00);
740 } 740 } while (--retry);
741 if (retry == 0) 741 if (retry == 0)
742 PDEBUG(D_ERR, "Damned Errors sending jpeg Table"); 742 PDEBUG(D_ERR, "Damned Errors sending jpeg Table");
743 /* send the qtable now */ 743 /* send the qtable now */
@@ -826,8 +826,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
826 return 0; 826 return 0;
827} 827}
828 828
829/* this function is called at open time */ 829/* this function is called at probe and resume time */
830static int sd_open(struct gspca_dev *gspca_dev) 830static int sd_init(struct gspca_dev *gspca_dev)
831{ 831{
832 cx11646_init1(gspca_dev); 832 cx11646_init1(gspca_dev);
833 cx11646_initsize(gspca_dev); 833 cx11646_initsize(gspca_dev);
@@ -845,10 +845,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
845 cx11646_jpeg(gspca_dev); 845 cx11646_jpeg(gspca_dev);
846} 846}
847 847
848static void sd_stopN(struct gspca_dev *gspca_dev)
849{
850}
851
852static void sd_stop0(struct gspca_dev *gspca_dev) 848static void sd_stop0(struct gspca_dev *gspca_dev)
853{ 849{
854 int retry = 50; 850 int retry = 50;
@@ -871,10 +867,6 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
871 reg_w_val(gspca_dev, 0x00fc, 0xe0); 867 reg_w_val(gspca_dev, 0x00fc, 0xe0);
872} 868}
873 869
874static void sd_close(struct gspca_dev *gspca_dev)
875{
876}
877
878static void sd_pkt_scan(struct gspca_dev *gspca_dev, 870static void sd_pkt_scan(struct gspca_dev *gspca_dev,
879 struct gspca_frame *frame, /* target */ 871 struct gspca_frame *frame, /* target */
880 __u8 *data, /* isoc packet */ 872 __u8 *data, /* isoc packet */
@@ -998,11 +990,9 @@ static struct sd_desc sd_desc = {
998 .ctrls = sd_ctrls, 990 .ctrls = sd_ctrls,
999 .nctrls = ARRAY_SIZE(sd_ctrls), 991 .nctrls = ARRAY_SIZE(sd_ctrls),
1000 .config = sd_config, 992 .config = sd_config,
1001 .open = sd_open, 993 .init = sd_init,
1002 .start = sd_start, 994 .start = sd_start,
1003 .stopN = sd_stopN,
1004 .stop0 = sd_stop0, 995 .stop0 = sd_stop0,
1005 .close = sd_close,
1006 .pkt_scan = sd_pkt_scan, 996 .pkt_scan = sd_pkt_scan,
1007}; 997};
1008 998
@@ -1026,6 +1016,10 @@ static struct usb_driver sd_driver = {
1026 .id_table = device_table, 1016 .id_table = device_table,
1027 .probe = sd_probe, 1017 .probe = sd_probe,
1028 .disconnect = gspca_disconnect, 1018 .disconnect = gspca_disconnect,
1019#ifdef CONFIG_PM
1020 .suspend = gspca_suspend,
1021 .resume = gspca_resume,
1022#endif
1029}; 1023};
1030 1024
1031/* -- module insert / remove -- */ 1025/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 1dbe92d01e6a..4ff0e386914b 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -81,6 +81,7 @@ static struct ctrl sd_ctrls[] = {
81 .set = sd_setcontrast, 81 .set = sd_setcontrast,
82 .get = sd_getcontrast, 82 .get = sd_getcontrast,
83 }, 83 },
84#define COLOR_IDX 2
84 { 85 {
85 { 86 {
86 .id = V4L2_CID_SATURATION, 87 .id = V4L2_CID_SATURATION,
@@ -234,7 +235,7 @@ static void reg_r(struct gspca_dev *gspca_dev,
234 struct usb_device *dev = gspca_dev->dev; 235 struct usb_device *dev = gspca_dev->dev;
235 236
236#ifdef GSPCA_DEBUG 237#ifdef GSPCA_DEBUG
237 if (len > sizeof gspca_dev->usb_buf) { 238 if (len > USB_BUF_SZ) {
238 err("reg_r: buffer overflow"); 239 err("reg_r: buffer overflow");
239 return; 240 return;
240 } 241 }
@@ -272,7 +273,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
272 struct usb_device *dev = gspca_dev->dev; 273 struct usb_device *dev = gspca_dev->dev;
273 274
274#ifdef GSPCA_DEBUG 275#ifdef GSPCA_DEBUG
275 if (len > sizeof gspca_dev->usb_buf) { 276 if (len > USB_BUF_SZ) {
276 err("reg_w: buffer overflow"); 277 err("reg_w: buffer overflow");
277 return; 278 return;
278 } 279 }
@@ -665,6 +666,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
665 } else { 666 } else {
666 cam->cam_mode = vga_mode; 667 cam->cam_mode = vga_mode;
667 cam->nmodes = sizeof vga_mode / sizeof vga_mode[0]; 668 cam->nmodes = sizeof vga_mode / sizeof vga_mode[0];
669 gspca_dev->ctrl_dis = (1 << COLOR_IDX);
668 } 670 }
669 sd->brightness = BRIGHTNESS_DEF; 671 sd->brightness = BRIGHTNESS_DEF;
670 sd->contrast = CONTRAST_DEF; 672 sd->contrast = CONTRAST_DEF;
@@ -674,8 +676,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
674 return 0; 676 return 0;
675} 677}
676 678
677/* this function is called at open time */ 679/* this function is called at probe and resume time */
678static int sd_open(struct gspca_dev *gspca_dev) 680static int sd_init(struct gspca_dev *gspca_dev)
679{ 681{
680 struct sd *sd = (struct sd *) gspca_dev; 682 struct sd *sd = (struct sd *) gspca_dev;
681 683
@@ -709,14 +711,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
709 et_video(gspca_dev, 0); /* video off */ 711 et_video(gspca_dev, 0); /* video off */
710} 712}
711 713
712static void sd_stop0(struct gspca_dev *gspca_dev)
713{
714}
715
716static void sd_close(struct gspca_dev *gspca_dev)
717{
718}
719
720static __u8 Et_getgainG(struct gspca_dev *gspca_dev) 714static __u8 Et_getgainG(struct gspca_dev *gspca_dev)
721{ 715{
722 struct sd *sd = (struct sd *) gspca_dev; 716 struct sd *sd = (struct sd *) gspca_dev;
@@ -893,21 +887,19 @@ static struct sd_desc sd_desc = {
893 .ctrls = sd_ctrls, 887 .ctrls = sd_ctrls,
894 .nctrls = ARRAY_SIZE(sd_ctrls), 888 .nctrls = ARRAY_SIZE(sd_ctrls),
895 .config = sd_config, 889 .config = sd_config,
896 .open = sd_open, 890 .init = sd_init,
897 .start = sd_start, 891 .start = sd_start,
898 .stopN = sd_stopN, 892 .stopN = sd_stopN,
899 .stop0 = sd_stop0,
900 .close = sd_close,
901 .pkt_scan = sd_pkt_scan, 893 .pkt_scan = sd_pkt_scan,
902 .dq_callback = do_autogain, 894 .dq_callback = do_autogain,
903}; 895};
904 896
905/* -- module initialisation -- */ 897/* -- module initialisation -- */
906static __devinitdata struct usb_device_id device_table[] = { 898static __devinitdata struct usb_device_id device_table[] = {
907#ifndef CONFIG_USB_ET61X251
908 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106}, 899 {USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
909#endif 900#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
910 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX}, 901 {USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
902#endif
911 {} 903 {}
912}; 904};
913 905
@@ -926,6 +918,10 @@ static struct usb_driver sd_driver = {
926 .id_table = device_table, 918 .id_table = device_table,
927 .probe = sd_probe, 919 .probe = sd_probe,
928 .disconnect = gspca_disconnect, 920 .disconnect = gspca_disconnect,
921#ifdef CONFIG_PM
922 .suspend = gspca_suspend,
923 .resume = gspca_resume,
924#endif
929}; 925};
930 926
931/* -- module insert / remove -- */ 927/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 15d302b28b79..7be69284da03 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -114,7 +114,10 @@ static void fill_frame(struct gspca_dev *gspca_dev,
114 cam_pkt_op pkt_scan; 114 cam_pkt_op pkt_scan;
115 115
116 if (urb->status != 0) { 116 if (urb->status != 0) {
117 PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status); 117#ifdef CONFIG_PM
118 if (!gspca_dev->frozen)
119#endif
120 PDEBUG(D_ERR|D_PACK, "urb status: %d", urb->status);
118 return; /* disconnection ? */ 121 return; /* disconnection ? */
119 } 122 }
120 pkt_scan = gspca_dev->sd_desc->pkt_scan; 123 pkt_scan = gspca_dev->sd_desc->pkt_scan;
@@ -555,10 +558,12 @@ static void gspca_stream_off(struct gspca_dev *gspca_dev)
555 gspca_dev->streaming = 0; 558 gspca_dev->streaming = 0;
556 atomic_set(&gspca_dev->nevent, 0); 559 atomic_set(&gspca_dev->nevent, 0);
557 if (gspca_dev->present) { 560 if (gspca_dev->present) {
558 gspca_dev->sd_desc->stopN(gspca_dev); 561 if (gspca_dev->sd_desc->stopN)
562 gspca_dev->sd_desc->stopN(gspca_dev);
559 destroy_urbs(gspca_dev); 563 destroy_urbs(gspca_dev);
560 gspca_set_alt0(gspca_dev); 564 gspca_set_alt0(gspca_dev);
561 gspca_dev->sd_desc->stop0(gspca_dev); 565 if (gspca_dev->sd_desc->stop0)
566 gspca_dev->sd_desc->stop0(gspca_dev);
562 PDEBUG(D_STREAM, "stream off OK"); 567 PDEBUG(D_STREAM, "stream off OK");
563 } 568 }
564} 569}
@@ -767,19 +772,7 @@ static int dev_open(struct inode *inode, struct file *file)
767 goto out; 772 goto out;
768 } 773 }
769 774
770 /* if not done yet, initialize the sensor */ 775 if (gspca_dev->users > 4) { /* (arbitrary value) */
771 if (gspca_dev->users == 0) {
772 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) {
773 ret = -ERESTARTSYS;
774 goto out;
775 }
776 ret = gspca_dev->sd_desc->open(gspca_dev);
777 mutex_unlock(&gspca_dev->usb_lock);
778 if (ret != 0) {
779 PDEBUG(D_ERR|D_CONF, "init device failed %d", ret);
780 goto out;
781 }
782 } else if (gspca_dev->users > 4) { /* (arbitrary value) */
783 ret = -EBUSY; 776 ret = -EBUSY;
784 goto out; 777 goto out;
785 } 778 }
@@ -792,6 +785,7 @@ static int dev_open(struct inode *inode, struct file *file)
792 else 785 else
793 gspca_dev->vdev.debug &= ~3; 786 gspca_dev->vdev.debug &= ~3;
794#endif 787#endif
788 ret = 0;
795out: 789out:
796 mutex_unlock(&gspca_dev->queue_lock); 790 mutex_unlock(&gspca_dev->queue_lock);
797 if (ret != 0) 791 if (ret != 0)
@@ -812,11 +806,11 @@ static int dev_close(struct inode *inode, struct file *file)
812 806
813 /* if the file did the capture, free the streaming resources */ 807 /* if the file did the capture, free the streaming resources */
814 if (gspca_dev->capt_file == file) { 808 if (gspca_dev->capt_file == file) {
815 mutex_lock(&gspca_dev->usb_lock); 809 if (gspca_dev->streaming) {
816 if (gspca_dev->streaming) 810 mutex_lock(&gspca_dev->usb_lock);
817 gspca_stream_off(gspca_dev); 811 gspca_stream_off(gspca_dev);
818 gspca_dev->sd_desc->close(gspca_dev); 812 mutex_unlock(&gspca_dev->usb_lock);
819 mutex_unlock(&gspca_dev->usb_lock); 813 }
820 frame_free(gspca_dev); 814 frame_free(gspca_dev);
821 gspca_dev->capt_file = NULL; 815 gspca_dev->capt_file = NULL;
822 gspca_dev->memory = GSPCA_MEMORY_NO; 816 gspca_dev->memory = GSPCA_MEMORY_NO;
@@ -853,42 +847,44 @@ static int vidioc_querycap(struct file *file, void *priv,
853 return 0; 847 return 0;
854} 848}
855 849
856/* the use of V4L2_CTRL_FLAG_NEXT_CTRL asks for the controls to be sorted */
857static int vidioc_queryctrl(struct file *file, void *priv, 850static int vidioc_queryctrl(struct file *file, void *priv,
858 struct v4l2_queryctrl *q_ctrl) 851 struct v4l2_queryctrl *q_ctrl)
859{ 852{
860 struct gspca_dev *gspca_dev = priv; 853 struct gspca_dev *gspca_dev = priv;
861 int i; 854 int i, ix;
862 u32 id; 855 u32 id;
863 856
857 ix = -1;
864 id = q_ctrl->id; 858 id = q_ctrl->id;
865 if (id & V4L2_CTRL_FLAG_NEXT_CTRL) { 859 if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
866 id &= V4L2_CTRL_ID_MASK; 860 id &= V4L2_CTRL_ID_MASK;
867 id++; 861 id++;
868 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { 862 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
869 if (id >= gspca_dev->sd_desc->ctrls[i].qctrl.id) { 863 if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
870 memcpy(q_ctrl, 864 continue;
871 &gspca_dev->sd_desc->ctrls[i].qctrl, 865 if (ix < 0) {
872 sizeof *q_ctrl); 866 ix = i;
873 return 0; 867 continue;
874 } 868 }
869 if (gspca_dev->sd_desc->ctrls[i].qctrl.id
870 > gspca_dev->sd_desc->ctrls[ix].qctrl.id)
871 continue;
872 ix = i;
875 } 873 }
876 return -EINVAL;
877 } 874 }
878 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { 875 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
879 if (id == gspca_dev->sd_desc->ctrls[i].qctrl.id) { 876 if (id == gspca_dev->sd_desc->ctrls[i].qctrl.id) {
880 memcpy(q_ctrl, 877 ix = i;
881 &gspca_dev->sd_desc->ctrls[i].qctrl, 878 break;
882 sizeof *q_ctrl);
883 return 0;
884 } 879 }
885 } 880 }
886 if (id >= V4L2_CID_BASE 881 if (ix < 0)
887 && id <= V4L2_CID_LASTP1) { 882 return -EINVAL;
883 memcpy(q_ctrl, &gspca_dev->sd_desc->ctrls[ix].qctrl,
884 sizeof *q_ctrl);
885 if (gspca_dev->ctrl_dis & (1 << ix))
888 q_ctrl->flags |= V4L2_CTRL_FLAG_DISABLED; 886 q_ctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
889 return 0; 887 return 0;
890 }
891 return -EINVAL;
892} 888}
893 889
894static int vidioc_s_ctrl(struct file *file, void *priv, 890static int vidioc_s_ctrl(struct file *file, void *priv,
@@ -903,6 +899,8 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
903 i++, ctrls++) { 899 i++, ctrls++) {
904 if (ctrl->id != ctrls->qctrl.id) 900 if (ctrl->id != ctrls->qctrl.id)
905 continue; 901 continue;
902 if (gspca_dev->ctrl_dis & (1 << i))
903 return -EINVAL;
906 if (ctrl->value < ctrls->qctrl.minimum 904 if (ctrl->value < ctrls->qctrl.minimum
907 || ctrl->value > ctrls->qctrl.maximum) 905 || ctrl->value > ctrls->qctrl.maximum)
908 return -ERANGE; 906 return -ERANGE;
@@ -929,6 +927,8 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
929 i++, ctrls++) { 927 i++, ctrls++) {
930 if (ctrl->id != ctrls->qctrl.id) 928 if (ctrl->id != ctrls->qctrl.id)
931 continue; 929 continue;
930 if (gspca_dev->ctrl_dis & (1 << i))
931 return -EINVAL;
932 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 932 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
933 return -ERESTARTSYS; 933 return -ERESTARTSYS;
934 ret = ctrls->get(gspca_dev, &ctrl->value); 934 ret = ctrls->get(gspca_dev, &ctrl->value);
@@ -1403,7 +1403,7 @@ static int vidioc_dqbuf(struct file *file, void *priv,
1403 i = ret; /* frame index */ 1403 i = ret; /* frame index */
1404 frame = &gspca_dev->frame[i]; 1404 frame = &gspca_dev->frame[i];
1405 if (gspca_dev->memory == V4L2_MEMORY_USERPTR) { 1405 if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
1406 if (copy_to_user((__u8 *) frame->v4l2_buf.m.userptr, 1406 if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
1407 frame->data, 1407 frame->data,
1408 frame->v4l2_buf.bytesused)) { 1408 frame->v4l2_buf.bytesused)) {
1409 PDEBUG(D_ERR|D_STREAM, 1409 PDEBUG(D_ERR|D_STREAM,
@@ -1731,6 +1731,12 @@ int gspca_dev_probe(struct usb_interface *intf,
1731 err("couldn't kzalloc gspca struct"); 1731 err("couldn't kzalloc gspca struct");
1732 return -EIO; 1732 return -EIO;
1733 } 1733 }
1734 gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
1735 if (!gspca_dev->usb_buf) {
1736 err("out of memory");
1737 ret = -EIO;
1738 goto out;
1739 }
1734 gspca_dev->dev = dev; 1740 gspca_dev->dev = dev;
1735 gspca_dev->iface = interface->bInterfaceNumber; 1741 gspca_dev->iface = interface->bInterfaceNumber;
1736 gspca_dev->nbalt = intf->num_altsetting; 1742 gspca_dev->nbalt = intf->num_altsetting;
@@ -1738,10 +1744,13 @@ int gspca_dev_probe(struct usb_interface *intf,
1738/* gspca_dev->users = 0; (done by kzalloc) */ 1744/* gspca_dev->users = 0; (done by kzalloc) */
1739 gspca_dev->nbufread = 2; 1745 gspca_dev->nbufread = 2;
1740 1746
1741 /* configure the subdriver */ 1747 /* configure the subdriver and initialize the USB device */
1742 ret = gspca_dev->sd_desc->config(gspca_dev, id); 1748 ret = gspca_dev->sd_desc->config(gspca_dev, id);
1743 if (ret < 0) 1749 if (ret < 0)
1744 goto out; 1750 goto out;
1751 ret = gspca_dev->sd_desc->init(gspca_dev);
1752 if (ret < 0)
1753 goto out;
1745 ret = gspca_set_alt0(gspca_dev); 1754 ret = gspca_set_alt0(gspca_dev);
1746 if (ret < 0) 1755 if (ret < 0)
1747 goto out; 1756 goto out;
@@ -1771,6 +1780,7 @@ int gspca_dev_probe(struct usb_interface *intf,
1771 PDEBUG(D_PROBE, "probe ok"); 1780 PDEBUG(D_PROBE, "probe ok");
1772 return 0; 1781 return 0;
1773out: 1782out:
1783 kfree(gspca_dev->usb_buf);
1774 kfree(gspca_dev); 1784 kfree(gspca_dev);
1775 return ret; 1785 return ret;
1776} 1786}
@@ -1803,11 +1813,42 @@ void gspca_disconnect(struct usb_interface *intf)
1803/* We don't want people trying to open up the device */ 1813/* We don't want people trying to open up the device */
1804 video_unregister_device(&gspca_dev->vdev); 1814 video_unregister_device(&gspca_dev->vdev);
1805/* Free the memory */ 1815/* Free the memory */
1816 kfree(gspca_dev->usb_buf);
1806 kfree(gspca_dev); 1817 kfree(gspca_dev);
1807 PDEBUG(D_PROBE, "disconnect complete"); 1818 PDEBUG(D_PROBE, "disconnect complete");
1808} 1819}
1809EXPORT_SYMBOL(gspca_disconnect); 1820EXPORT_SYMBOL(gspca_disconnect);
1810 1821
1822#ifdef CONFIG_PM
1823int gspca_suspend(struct usb_interface *intf, pm_message_t message)
1824{
1825 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
1826
1827 if (!gspca_dev->streaming)
1828 return 0;
1829 gspca_dev->frozen = 1; /* avoid urb error messages */
1830 if (gspca_dev->sd_desc->stopN)
1831 gspca_dev->sd_desc->stopN(gspca_dev);
1832 destroy_urbs(gspca_dev);
1833 gspca_set_alt0(gspca_dev);
1834 if (gspca_dev->sd_desc->stop0)
1835 gspca_dev->sd_desc->stop0(gspca_dev);
1836 return 0;
1837}
1838EXPORT_SYMBOL(gspca_suspend);
1839
1840int gspca_resume(struct usb_interface *intf)
1841{
1842 struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
1843
1844 gspca_dev->frozen = 0;
1845 gspca_dev->sd_desc->init(gspca_dev);
1846 if (gspca_dev->streaming)
1847 return gspca_init_transfer(gspca_dev);
1848 return 0;
1849}
1850EXPORT_SYMBOL(gspca_resume);
1851#endif
1811/* -- cam driver utility functions -- */ 1852/* -- cam driver utility functions -- */
1812 1853
1813/* auto gain and exposure algorithm based on the knee algorithm described here: 1854/* auto gain and exposure algorithm based on the knee algorithm described here:
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 67e448940eaa..c17625cff9ba 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -56,7 +56,6 @@ extern int gspca_debug;
56 56
57/* device information - set at probe time */ 57/* device information - set at probe time */
58struct cam { 58struct cam {
59 char *dev_name;
60 struct v4l2_pix_format *cam_mode; /* size nmodes */ 59 struct v4l2_pix_format *cam_mode; /* size nmodes */
61 char nmodes; 60 char nmodes;
62 __u8 epaddr; 61 __u8 epaddr;
@@ -91,15 +90,14 @@ struct sd_desc {
91/* controls */ 90/* controls */
92 const struct ctrl *ctrls; 91 const struct ctrl *ctrls;
93 int nctrls; 92 int nctrls;
94/* operations */ 93/* mandatory operations */
95 cam_cf_op config; /* called on probe */ 94 cam_cf_op config; /* called on probe */
96 cam_op open; /* called on open */ 95 cam_op init; /* called on probe and resume */
97 cam_v_op start; /* called on stream on */ 96 cam_v_op start; /* called on stream on */
98 cam_v_op stopN; /* called on stream off - main alt */
99 cam_v_op stop0; /* called on stream off - alt 0 */
100 cam_v_op close; /* called on close */
101 cam_pkt_op pkt_scan; 97 cam_pkt_op pkt_scan;
102/* optional operations */ 98/* optional operations */
99 cam_v_op stopN; /* called on stream off - main alt */
100 cam_v_op stop0; /* called on stream off - alt 0 */
103 cam_v_op dq_callback; /* called when a frame has been dequeued */ 101 cam_v_op dq_callback; /* called when a frame has been dequeued */
104 cam_jpg_op get_jcomp; 102 cam_jpg_op get_jcomp;
105 cam_jpg_op set_jcomp; 103 cam_jpg_op set_jcomp;
@@ -127,8 +125,10 @@ struct gspca_dev {
127 125
128 struct cam cam; /* device information */ 126 struct cam cam; /* device information */
129 const struct sd_desc *sd_desc; /* subdriver description */ 127 const struct sd_desc *sd_desc; /* subdriver description */
128 unsigned ctrl_dis; /* disabled controls (bit map) */
130 129
131 __u8 usb_buf[8]; /* buffer for USB exchanges */ 130#define USB_BUF_SZ 64
131 __u8 *usb_buf; /* buffer for USB exchanges */
132 struct urb *urb[MAX_NURBS]; 132 struct urb *urb[MAX_NURBS];
133 133
134 __u8 *frbuf; /* buffer for nframes */ 134 __u8 *frbuf; /* buffer for nframes */
@@ -155,6 +155,9 @@ struct gspca_dev {
155 struct mutex queue_lock; /* ISOC queue protection */ 155 struct mutex queue_lock; /* ISOC queue protection */
156 __u32 sequence; /* frame sequence number */ 156 __u32 sequence; /* frame sequence number */
157 char streaming; 157 char streaming;
158#ifdef CONFIG_PM
159 char frozen; /* suspend - resume */
160#endif
158 char users; /* number of opens */ 161 char users; /* number of opens */
159 char present; /* device connected */ 162 char present; /* device connected */
160 char nbufread; /* number of buffers for read() */ 163 char nbufread; /* number of buffers for read() */
@@ -174,6 +177,10 @@ struct gspca_frame *gspca_frame_add(struct gspca_dev *gspca_dev,
174 struct gspca_frame *frame, 177 struct gspca_frame *frame,
175 const __u8 *data, 178 const __u8 *data,
176 int len); 179 int len);
180#ifdef CONFIG_PM
181int gspca_suspend(struct usb_interface *intf, pm_message_t message);
182int gspca_resume(struct usb_interface *intf);
183#endif
177int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum, 184int gspca_auto_gain_n_exposure(struct gspca_dev *gspca_dev, int avg_lum,
178 int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee); 185 int desired_avg_lum, int deadzone, int gain_knee, int exposure_knee);
179#endif /* GSPCAV2_H */ 186#endif /* GSPCAV2_H */
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index 21c4ee56a10a..4d5db47ba8cb 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -100,22 +100,6 @@ static int reg_w(struct gspca_dev *gspca_dev,
100 return rc; 100 return rc;
101} 101}
102 102
103static int reg_w_buf(struct gspca_dev *gspca_dev,
104 __u16 index, __u8 *buf, int len)
105{
106 int rc;
107
108 rc = usb_control_msg(gspca_dev->dev,
109 usb_sndbulkpipe(gspca_dev->dev, 4),
110 0x12,
111 0xc8, /* ?? */
112 0, /* value */
113 index, buf, len, 500);
114 if (rc < 0)
115 PDEBUG(D_ERR, "reg write [%02x] error %d", index, rc);
116 return rc;
117}
118
119static void bulk_w(struct gspca_dev *gspca_dev, 103static void bulk_w(struct gspca_dev *gspca_dev,
120 __u16 *pch, 104 __u16 *pch,
121 __u16 Address) 105 __u16 Address)
@@ -144,8 +128,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
144 return 0; 128 return 0;
145} 129}
146 130
147/* this function is called at open time */ 131/* this function is called at probe and resume time */
148static int sd_open(struct gspca_dev *gspca_dev) 132static int sd_init(struct gspca_dev *gspca_dev)
149{ 133{
150 return 0; 134 return 0;
151} 135}
@@ -175,7 +159,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
175 /* 159 /*
176 Initialize the MR97113 chip register 160 Initialize the MR97113 chip register
177 */ 161 */
178 data = kmalloc(16, GFP_KERNEL);
179 data[0] = 0x00; /* address */ 162 data[0] = 0x00; /* address */
180 data[1] = 0x0c | 0x01; /* reg 0 */ 163 data[1] = 0x0c | 0x01; /* reg 0 */
181 data[2] = 0x01; /* reg 1 */ 164 data[2] = 0x01; /* reg 1 */
@@ -195,12 +178,10 @@ static void sd_start(struct gspca_dev *gspca_dev)
195 data[10] = 0x5d; /* reg 9, I2C device address 178 data[10] = 0x5d; /* reg 9, I2C device address
196 * [for PAS5101 (0x40)] [for MI (0x5d)] */ 179 * [for PAS5101 (0x40)] [for MI (0x5d)] */
197 180
198 err_code = reg_w_buf(gspca_dev, data[0], data, 11); 181 err_code = reg_w(gspca_dev, data[0], 11);
199 kfree(data);
200 if (err_code < 0) 182 if (err_code < 0)
201 return; 183 return;
202 184
203 data = gspca_dev->usb_buf;
204 data[0] = 0x23; /* address */ 185 data[0] = 0x23; /* address */
205 data[1] = 0x09; /* reg 35, append frame header */ 186 data[1] = 0x09; /* reg 35, append frame header */
206 187
@@ -358,14 +339,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
358 PDEBUG(D_ERR, "Camera Stop failed"); 339 PDEBUG(D_ERR, "Camera Stop failed");
359} 340}
360 341
361static void sd_stop0(struct gspca_dev *gspca_dev)
362{
363}
364
365static void sd_close(struct gspca_dev *gspca_dev)
366{
367}
368
369static void sd_pkt_scan(struct gspca_dev *gspca_dev, 342static void sd_pkt_scan(struct gspca_dev *gspca_dev,
370 struct gspca_frame *frame, /* target */ 343 struct gspca_frame *frame, /* target */
371 __u8 *data, /* isoc packet */ 344 __u8 *data, /* isoc packet */
@@ -411,11 +384,9 @@ static const struct sd_desc sd_desc = {
411 .ctrls = sd_ctrls, 384 .ctrls = sd_ctrls,
412 .nctrls = ARRAY_SIZE(sd_ctrls), 385 .nctrls = ARRAY_SIZE(sd_ctrls),
413 .config = sd_config, 386 .config = sd_config,
414 .open = sd_open, 387 .init = sd_init,
415 .start = sd_start, 388 .start = sd_start,
416 .stopN = sd_stopN, 389 .stopN = sd_stopN,
417 .stop0 = sd_stop0,
418 .close = sd_close,
419 .pkt_scan = sd_pkt_scan, 390 .pkt_scan = sd_pkt_scan,
420}; 391};
421 392
@@ -439,6 +410,10 @@ static struct usb_driver sd_driver = {
439 .id_table = device_table, 410 .id_table = device_table,
440 .probe = sd_probe, 411 .probe = sd_probe,
441 .disconnect = gspca_disconnect, 412 .disconnect = gspca_disconnect,
413#ifdef CONFIG_PM
414 .suspend = gspca_suspend,
415 .resume = gspca_resume,
416#endif
442}; 417};
443 418
444/* -- module insert / remove -- */ 419/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index b4f00ec0885c..4df4eec9f7e7 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -63,11 +63,10 @@ struct sd {
63#define SEN_OV6630 2 63#define SEN_OV6630 2
64#define SEN_OV7610 3 64#define SEN_OV7610 3
65#define SEN_OV7620 4 65#define SEN_OV7620 4
66#define SEN_OV7630 5 66#define SEN_OV7640 5
67#define SEN_OV7640 6 67#define SEN_OV7670 6
68#define SEN_OV7670 7 68#define SEN_OV76BE 7
69#define SEN_OV76BE 8 69#define SEN_OV8610 8
70#define SEN_OV8610 9
71 70
72}; 71};
73 72
@@ -127,6 +126,7 @@ static struct ctrl sd_ctrls[] = {
127 .get = sd_getcolors, 126 .get = sd_getcolors,
128 }, 127 },
129/* next controls work with ov7670 only */ 128/* next controls work with ov7670 only */
129#define HFLIP_IDX 3
130 { 130 {
131 { 131 {
132 .id = V4L2_CID_HFLIP, 132 .id = V4L2_CID_HFLIP,
@@ -141,6 +141,7 @@ static struct ctrl sd_ctrls[] = {
141 .set = sd_sethflip, 141 .set = sd_sethflip,
142 .get = sd_gethflip, 142 .get = sd_gethflip,
143 }, 143 },
144#define VFLIP_IDX 4
144 { 145 {
145 { 146 {
146 .id = V4L2_CID_VFLIP, 147 .id = V4L2_CID_VFLIP,
@@ -293,6 +294,541 @@ static struct v4l2_pix_format sif_mode[] = {
293#define OV7670_REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */ 294#define OV7670_REG_HAECC7 0xaa /* Hist AEC/AGC control 7 */
294#define OV7670_REG_BD60MAX 0xab /* 60hz banding step limit */ 295#define OV7670_REG_BD60MAX 0xab /* 60hz banding step limit */
295 296
297struct ov_regvals {
298 __u8 reg;
299 __u8 val;
300};
301struct ov_i2c_regvals {
302 __u8 reg;
303 __u8 val;
304};
305
306static const struct ov_i2c_regvals norm_6x20[] = {
307 { 0x12, 0x80 }, /* reset */
308 { 0x11, 0x01 },
309 { 0x03, 0x60 },
310 { 0x05, 0x7f }, /* For when autoadjust is off */
311 { 0x07, 0xa8 },
312 /* The ratio of 0x0c and 0x0d controls the white point */
313 { 0x0c, 0x24 },
314 { 0x0d, 0x24 },
315 { 0x0f, 0x15 }, /* COMS */
316 { 0x10, 0x75 }, /* AEC Exposure time */
317 { 0x12, 0x24 }, /* Enable AGC */
318 { 0x14, 0x04 },
319 /* 0x16: 0x06 helps frame stability with moving objects */
320 { 0x16, 0x06 },
321/* { 0x20, 0x30 }, * Aperture correction enable */
322 { 0x26, 0xb2 }, /* BLC enable */
323 /* 0x28: 0x05 Selects RGB format if RGB on */
324 { 0x28, 0x05 },
325 { 0x2a, 0x04 }, /* Disable framerate adjust */
326/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
327 { 0x2d, 0x99 },
328 { 0x33, 0xa0 }, /* Color Processing Parameter */
329 { 0x34, 0xd2 }, /* Max A/D range */
330 { 0x38, 0x8b },
331 { 0x39, 0x40 },
332
333 { 0x3c, 0x39 }, /* Enable AEC mode changing */
334 { 0x3c, 0x3c }, /* Change AEC mode */
335 { 0x3c, 0x24 }, /* Disable AEC mode changing */
336
337 { 0x3d, 0x80 },
338 /* These next two registers (0x4a, 0x4b) are undocumented.
339 * They control the color balance */
340 { 0x4a, 0x80 },
341 { 0x4b, 0x80 },
342 { 0x4d, 0xd2 }, /* This reduces noise a bit */
343 { 0x4e, 0xc1 },
344 { 0x4f, 0x04 },
345/* Do 50-53 have any effect? */
346/* Toggle 0x12[2] off and on here? */
347};
348
349static const struct ov_i2c_regvals norm_6x30[] = {
350 { 0x12, 0x80 }, /* Reset */
351 { 0x00, 0x1f }, /* Gain */
352 { 0x01, 0x99 }, /* Blue gain */
353 { 0x02, 0x7c }, /* Red gain */
354 { 0x03, 0xc0 }, /* Saturation */
355 { 0x05, 0x0a }, /* Contrast */
356 { 0x06, 0x95 }, /* Brightness */
357 { 0x07, 0x2d }, /* Sharpness */
358 { 0x0c, 0x20 },
359 { 0x0d, 0x20 },
360 { 0x0e, 0x20 },
361 { 0x0f, 0x05 },
362 { 0x10, 0x9a },
363 { 0x11, 0x00 }, /* Pixel clock = fastest */
364 { 0x12, 0x24 }, /* Enable AGC and AWB */
365 { 0x13, 0x21 },
366 { 0x14, 0x80 },
367 { 0x15, 0x01 },
368 { 0x16, 0x03 },
369 { 0x17, 0x38 },
370 { 0x18, 0xea },
371 { 0x19, 0x04 },
372 { 0x1a, 0x93 },
373 { 0x1b, 0x00 },
374 { 0x1e, 0xc4 },
375 { 0x1f, 0x04 },
376 { 0x20, 0x20 },
377 { 0x21, 0x10 },
378 { 0x22, 0x88 },
379 { 0x23, 0xc0 }, /* Crystal circuit power level */
380 { 0x25, 0x9a }, /* Increase AEC black ratio */
381 { 0x26, 0xb2 }, /* BLC enable */
382 { 0x27, 0xa2 },
383 { 0x28, 0x00 },
384 { 0x29, 0x00 },
385 { 0x2a, 0x84 }, /* 60 Hz power */
386 { 0x2b, 0xa8 }, /* 60 Hz power */
387 { 0x2c, 0xa0 },
388 { 0x2d, 0x95 }, /* Enable auto-brightness */
389 { 0x2e, 0x88 },
390 { 0x33, 0x26 },
391 { 0x34, 0x03 },
392 { 0x36, 0x8f },
393 { 0x37, 0x80 },
394 { 0x38, 0x83 },
395 { 0x39, 0x80 },
396 { 0x3a, 0x0f },
397 { 0x3b, 0x3c },
398 { 0x3c, 0x1a },
399 { 0x3d, 0x80 },
400 { 0x3e, 0x80 },
401 { 0x3f, 0x0e },
402 { 0x40, 0x00 }, /* White bal */
403 { 0x41, 0x00 }, /* White bal */
404 { 0x42, 0x80 },
405 { 0x43, 0x3f }, /* White bal */
406 { 0x44, 0x80 },
407 { 0x45, 0x20 },
408 { 0x46, 0x20 },
409 { 0x47, 0x80 },
410 { 0x48, 0x7f },
411 { 0x49, 0x00 },
412 { 0x4a, 0x00 },
413 { 0x4b, 0x80 },
414 { 0x4c, 0xd0 },
415 { 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */
416 { 0x4e, 0x40 },
417 { 0x4f, 0x07 }, /* UV avg., col. killer: max */
418 { 0x50, 0xff },
419 { 0x54, 0x23 }, /* Max AGC gain: 18dB */
420 { 0x55, 0xff },
421 { 0x56, 0x12 },
422 { 0x57, 0x81 },
423 { 0x58, 0x75 },
424 { 0x59, 0x01 }, /* AGC dark current comp.: +1 */
425 { 0x5a, 0x2c },
426 { 0x5b, 0x0f }, /* AWB chrominance levels */
427 { 0x5c, 0x10 },
428 { 0x3d, 0x80 },
429 { 0x27, 0xa6 },
430 { 0x12, 0x20 }, /* Toggle AWB */
431 { 0x12, 0x24 },
432};
433
434/* Lawrence Glaister <lg@jfm.bc.ca> reports:
435 *
436 * Register 0x0f in the 7610 has the following effects:
437 *
438 * 0x85 (AEC method 1): Best overall, good contrast range
439 * 0x45 (AEC method 2): Very overexposed
440 * 0xa5 (spec sheet default): Ok, but the black level is
441 * shifted resulting in loss of contrast
442 * 0x05 (old driver setting): very overexposed, too much
443 * contrast
444 */
445static const struct ov_i2c_regvals norm_7610[] = {
446 { 0x10, 0xff },
447 { 0x16, 0x06 },
448 { 0x28, 0x24 },
449 { 0x2b, 0xac },
450 { 0x12, 0x00 },
451 { 0x38, 0x81 },
452 { 0x28, 0x24 }, /* 0c */
453 { 0x0f, 0x85 }, /* lg's setting */
454 { 0x15, 0x01 },
455 { 0x20, 0x1c },
456 { 0x23, 0x2a },
457 { 0x24, 0x10 },
458 { 0x25, 0x8a },
459 { 0x26, 0xa2 },
460 { 0x27, 0xc2 },
461 { 0x2a, 0x04 },
462 { 0x2c, 0xfe },
463 { 0x2d, 0x93 },
464 { 0x30, 0x71 },
465 { 0x31, 0x60 },
466 { 0x32, 0x26 },
467 { 0x33, 0x20 },
468 { 0x34, 0x48 },
469 { 0x12, 0x24 },
470 { 0x11, 0x01 },
471 { 0x0c, 0x24 },
472 { 0x0d, 0x24 },
473};
474
475static const struct ov_i2c_regvals norm_7620[] = {
476 { 0x00, 0x00 }, /* gain */
477 { 0x01, 0x80 }, /* blue gain */
478 { 0x02, 0x80 }, /* red gain */
479 { 0x03, 0xc0 }, /* OV7670_REG_VREF */
480 { 0x06, 0x60 },
481 { 0x07, 0x00 },
482 { 0x0c, 0x24 },
483 { 0x0c, 0x24 },
484 { 0x0d, 0x24 },
485 { 0x11, 0x01 },
486 { 0x12, 0x24 },
487 { 0x13, 0x01 },
488 { 0x14, 0x84 },
489 { 0x15, 0x01 },
490 { 0x16, 0x03 },
491 { 0x17, 0x2f },
492 { 0x18, 0xcf },
493 { 0x19, 0x06 },
494 { 0x1a, 0xf5 },
495 { 0x1b, 0x00 },
496 { 0x20, 0x18 },
497 { 0x21, 0x80 },
498 { 0x22, 0x80 },
499 { 0x23, 0x00 },
500 { 0x26, 0xa2 },
501 { 0x27, 0xea },
502 { 0x28, 0x20 },
503 { 0x29, 0x00 },
504 { 0x2a, 0x10 },
505 { 0x2b, 0x00 },
506 { 0x2c, 0x88 },
507 { 0x2d, 0x91 },
508 { 0x2e, 0x80 },
509 { 0x2f, 0x44 },
510 { 0x60, 0x27 },
511 { 0x61, 0x02 },
512 { 0x62, 0x5f },
513 { 0x63, 0xd5 },
514 { 0x64, 0x57 },
515 { 0x65, 0x83 },
516 { 0x66, 0x55 },
517 { 0x67, 0x92 },
518 { 0x68, 0xcf },
519 { 0x69, 0x76 },
520 { 0x6a, 0x22 },
521 { 0x6b, 0x00 },
522 { 0x6c, 0x02 },
523 { 0x6d, 0x44 },
524 { 0x6e, 0x80 },
525 { 0x6f, 0x1d },
526 { 0x70, 0x8b },
527 { 0x71, 0x00 },
528 { 0x72, 0x14 },
529 { 0x73, 0x54 },
530 { 0x74, 0x00 },
531 { 0x75, 0x8e },
532 { 0x76, 0x00 },
533 { 0x77, 0xff },
534 { 0x78, 0x80 },
535 { 0x79, 0x80 },
536 { 0x7a, 0x80 },
537 { 0x7b, 0xe2 },
538 { 0x7c, 0x00 },
539};
540
541/* 7640 and 7648. The defaults should be OK for most registers. */
542static const struct ov_i2c_regvals norm_7640[] = {
543 { 0x12, 0x80 },
544 { 0x12, 0x14 },
545};
546
547/* 7670. Defaults taken from OmniVision provided data,
548* as provided by Jonathan Corbet of OLPC */
549static const struct ov_i2c_regvals norm_7670[] = {
550 { OV7670_REG_COM7, OV7670_COM7_RESET },
551 { OV7670_REG_TSLB, 0x04 }, /* OV */
552 { OV7670_REG_COM7, OV7670_COM7_FMT_VGA }, /* VGA */
553 { OV7670_REG_CLKRC, 0x01 },
554/*
555 * Set the hardware window. These values from OV don't entirely
556 * make sense - hstop is less than hstart. But they work...
557 */
558 { OV7670_REG_HSTART, 0x13 },
559 { OV7670_REG_HSTOP, 0x01 },
560 { OV7670_REG_HREF, 0xb6 },
561 { OV7670_REG_VSTART, 0x02 },
562 { OV7670_REG_VSTOP, 0x7a },
563 { OV7670_REG_VREF, 0x0a },
564
565 { OV7670_REG_COM3, 0 },
566 { OV7670_REG_COM14, 0 },
567/* Mystery scaling numbers */
568 { 0x70, 0x3a },
569 { 0x71, 0x35 },
570 { 0x72, 0x11 },
571 { 0x73, 0xf0 },
572 { 0xa2, 0x02 },
573/* { OV7670_REG_COM10, 0x0 }, */
574
575/* Gamma curve values */
576 { 0x7a, 0x20 },
577 { 0x7b, 0x10 },
578 { 0x7c, 0x1e },
579 { 0x7d, 0x35 },
580 { 0x7e, 0x5a },
581 { 0x7f, 0x69 },
582 { 0x80, 0x76 },
583 { 0x81, 0x80 },
584 { 0x82, 0x88 },
585 { 0x83, 0x8f },
586 { 0x84, 0x96 },
587 { 0x85, 0xa3 },
588 { 0x86, 0xaf },
589 { 0x87, 0xc4 },
590 { 0x88, 0xd7 },
591 { 0x89, 0xe8 },
592
593/* AGC and AEC parameters. Note we start by disabling those features,
594 then turn them only after tweaking the values. */
595 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
596 | OV7670_COM8_AECSTEP
597 | OV7670_COM8_BFILT },
598 { OV7670_REG_GAIN, 0 },
599 { OV7670_REG_AECH, 0 },
600 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */
601 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
602 { OV7670_REG_BD50MAX, 0x05 },
603 { OV7670_REG_BD60MAX, 0x07 },
604 { OV7670_REG_AEW, 0x95 },
605 { OV7670_REG_AEB, 0x33 },
606 { OV7670_REG_VPT, 0xe3 },
607 { OV7670_REG_HAECC1, 0x78 },
608 { OV7670_REG_HAECC2, 0x68 },
609 { 0xa1, 0x03 }, /* magic */
610 { OV7670_REG_HAECC3, 0xd8 },
611 { OV7670_REG_HAECC4, 0xd8 },
612 { OV7670_REG_HAECC5, 0xf0 },
613 { OV7670_REG_HAECC6, 0x90 },
614 { OV7670_REG_HAECC7, 0x94 },
615 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
616 | OV7670_COM8_AECSTEP
617 | OV7670_COM8_BFILT
618 | OV7670_COM8_AGC
619 | OV7670_COM8_AEC },
620
621/* Almost all of these are magic "reserved" values. */
622 { OV7670_REG_COM5, 0x61 },
623 { OV7670_REG_COM6, 0x4b },
624 { 0x16, 0x02 },
625 { OV7670_REG_MVFP, 0x07 },
626 { 0x21, 0x02 },
627 { 0x22, 0x91 },
628 { 0x29, 0x07 },
629 { 0x33, 0x0b },
630 { 0x35, 0x0b },
631 { 0x37, 0x1d },
632 { 0x38, 0x71 },
633 { 0x39, 0x2a },
634 { OV7670_REG_COM12, 0x78 },
635 { 0x4d, 0x40 },
636 { 0x4e, 0x20 },
637 { OV7670_REG_GFIX, 0 },
638 { 0x6b, 0x4a },
639 { 0x74, 0x10 },
640 { 0x8d, 0x4f },
641 { 0x8e, 0 },
642 { 0x8f, 0 },
643 { 0x90, 0 },
644 { 0x91, 0 },
645 { 0x96, 0 },
646 { 0x9a, 0 },
647 { 0xb0, 0x84 },
648 { 0xb1, 0x0c },
649 { 0xb2, 0x0e },
650 { 0xb3, 0x82 },
651 { 0xb8, 0x0a },
652
653/* More reserved magic, some of which tweaks white balance */
654 { 0x43, 0x0a },
655 { 0x44, 0xf0 },
656 { 0x45, 0x34 },
657 { 0x46, 0x58 },
658 { 0x47, 0x28 },
659 { 0x48, 0x3a },
660 { 0x59, 0x88 },
661 { 0x5a, 0x88 },
662 { 0x5b, 0x44 },
663 { 0x5c, 0x67 },
664 { 0x5d, 0x49 },
665 { 0x5e, 0x0e },
666 { 0x6c, 0x0a },
667 { 0x6d, 0x55 },
668 { 0x6e, 0x11 },
669 { 0x6f, 0x9f },
670 /* "9e for advance AWB" */
671 { 0x6a, 0x40 },
672 { OV7670_REG_BLUE, 0x40 },
673 { OV7670_REG_RED, 0x60 },
674 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
675 | OV7670_COM8_AECSTEP
676 | OV7670_COM8_BFILT
677 | OV7670_COM8_AGC
678 | OV7670_COM8_AEC
679 | OV7670_COM8_AWB },
680
681/* Matrix coefficients */
682 { 0x4f, 0x80 },
683 { 0x50, 0x80 },
684 { 0x51, 0 },
685 { 0x52, 0x22 },
686 { 0x53, 0x5e },
687 { 0x54, 0x80 },
688 { 0x58, 0x9e },
689
690 { OV7670_REG_COM16, OV7670_COM16_AWBGAIN },
691 { OV7670_REG_EDGE, 0 },
692 { 0x75, 0x05 },
693 { 0x76, 0xe1 },
694 { 0x4c, 0 },
695 { 0x77, 0x01 },
696 { OV7670_REG_COM13, OV7670_COM13_GAMMA
697 | OV7670_COM13_UVSAT
698 | 2}, /* was 3 */
699 { 0x4b, 0x09 },
700 { 0xc9, 0x60 },
701 { OV7670_REG_COM16, 0x38 },
702 { 0x56, 0x40 },
703
704 { 0x34, 0x11 },
705 { OV7670_REG_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO },
706 { 0xa4, 0x88 },
707 { 0x96, 0 },
708 { 0x97, 0x30 },
709 { 0x98, 0x20 },
710 { 0x99, 0x30 },
711 { 0x9a, 0x84 },
712 { 0x9b, 0x29 },
713 { 0x9c, 0x03 },
714 { 0x9d, 0x4c },
715 { 0x9e, 0x3f },
716 { 0x78, 0x04 },
717
718/* Extra-weird stuff. Some sort of multiplexor register */
719 { 0x79, 0x01 },
720 { 0xc8, 0xf0 },
721 { 0x79, 0x0f },
722 { 0xc8, 0x00 },
723 { 0x79, 0x10 },
724 { 0xc8, 0x7e },
725 { 0x79, 0x0a },
726 { 0xc8, 0x80 },
727 { 0x79, 0x0b },
728 { 0xc8, 0x01 },
729 { 0x79, 0x0c },
730 { 0xc8, 0x0f },
731 { 0x79, 0x0d },
732 { 0xc8, 0x20 },
733 { 0x79, 0x09 },
734 { 0xc8, 0x80 },
735 { 0x79, 0x02 },
736 { 0xc8, 0xc0 },
737 { 0x79, 0x03 },
738 { 0xc8, 0x40 },
739 { 0x79, 0x05 },
740 { 0xc8, 0x30 },
741 { 0x79, 0x26 },
742};
743
744static const struct ov_i2c_regvals norm_8610[] = {
745 { 0x12, 0x80 },
746 { 0x00, 0x00 },
747 { 0x01, 0x80 },
748 { 0x02, 0x80 },
749 { 0x03, 0xc0 },
750 { 0x04, 0x30 },
751 { 0x05, 0x30 }, /* was 0x10, new from windrv 090403 */
752 { 0x06, 0x70 }, /* was 0x80, new from windrv 090403 */
753 { 0x0a, 0x86 },
754 { 0x0b, 0xb0 },
755 { 0x0c, 0x20 },
756 { 0x0d, 0x20 },
757 { 0x11, 0x01 },
758 { 0x12, 0x25 },
759 { 0x13, 0x01 },
760 { 0x14, 0x04 },
761 { 0x15, 0x01 }, /* Lin and Win think different about UV order */
762 { 0x16, 0x03 },
763 { 0x17, 0x38 }, /* was 0x2f, new from windrv 090403 */
764 { 0x18, 0xea }, /* was 0xcf, new from windrv 090403 */
765 { 0x19, 0x02 }, /* was 0x06, new from windrv 090403 */
766 { 0x1a, 0xf5 },
767 { 0x1b, 0x00 },
768 { 0x20, 0xd0 }, /* was 0x90, new from windrv 090403 */
769 { 0x23, 0xc0 }, /* was 0x00, new from windrv 090403 */
770 { 0x24, 0x30 }, /* was 0x1d, new from windrv 090403 */
771 { 0x25, 0x50 }, /* was 0x57, new from windrv 090403 */
772 { 0x26, 0xa2 },
773 { 0x27, 0xea },
774 { 0x28, 0x00 },
775 { 0x29, 0x00 },
776 { 0x2a, 0x80 },
777 { 0x2b, 0xc8 }, /* was 0xcc, new from windrv 090403 */
778 { 0x2c, 0xac },
779 { 0x2d, 0x45 }, /* was 0xd5, new from windrv 090403 */
780 { 0x2e, 0x80 },
781 { 0x2f, 0x14 }, /* was 0x01, new from windrv 090403 */
782 { 0x4c, 0x00 },
783 { 0x4d, 0x30 }, /* was 0x10, new from windrv 090403 */
784 { 0x60, 0x02 }, /* was 0x01, new from windrv 090403 */
785 { 0x61, 0x00 }, /* was 0x09, new from windrv 090403 */
786 { 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */
787 { 0x63, 0xff },
788 { 0x64, 0x53 }, /* new windrv 090403 says 0x57,
789 * maybe thats wrong */
790 { 0x65, 0x00 },
791 { 0x66, 0x55 },
792 { 0x67, 0xb0 },
793 { 0x68, 0xc0 }, /* was 0xaf, new from windrv 090403 */
794 { 0x69, 0x02 },
795 { 0x6a, 0x22 },
796 { 0x6b, 0x00 },
797 { 0x6c, 0x99 }, /* was 0x80, old windrv says 0x00, but
798 * deleting bit7 colors the first images red */
799 { 0x6d, 0x11 }, /* was 0x00, new from windrv 090403 */
800 { 0x6e, 0x11 }, /* was 0x00, new from windrv 090403 */
801 { 0x6f, 0x01 },
802 { 0x70, 0x8b },
803 { 0x71, 0x00 },
804 { 0x72, 0x14 },
805 { 0x73, 0x54 },
806 { 0x74, 0x00 },/* 0x60? - was 0x00, new from windrv 090403 */
807 { 0x75, 0x0e },
808 { 0x76, 0x02 }, /* was 0x02, new from windrv 090403 */
809 { 0x77, 0xff },
810 { 0x78, 0x80 },
811 { 0x79, 0x80 },
812 { 0x7a, 0x80 },
813 { 0x7b, 0x10 }, /* was 0x13, new from windrv 090403 */
814 { 0x7c, 0x00 },
815 { 0x7d, 0x08 }, /* was 0x09, new from windrv 090403 */
816 { 0x7e, 0x08 }, /* was 0xc0, new from windrv 090403 */
817 { 0x7f, 0xfb },
818 { 0x80, 0x28 },
819 { 0x81, 0x00 },
820 { 0x82, 0x23 },
821 { 0x83, 0x0b },
822 { 0x84, 0x00 },
823 { 0x85, 0x62 }, /* was 0x61, new from windrv 090403 */
824 { 0x86, 0xc9 },
825 { 0x87, 0x00 },
826 { 0x88, 0x00 },
827 { 0x89, 0x01 },
828 { 0x12, 0x20 },
829 { 0x12, 0x25 }, /* was 0x24, new from windrv 090403 */
830};
831
296static unsigned char ov7670_abs_to_sm(unsigned char v) 832static unsigned char ov7670_abs_to_sm(unsigned char v)
297{ 833{
298 if (v > 127) 834 if (v > 127)
@@ -537,18 +1073,10 @@ static int ov51x_set_slave_ids(struct sd *sd,
537 rc = reg_w(sd, R51x_I2C_W_SID, slave); 1073 rc = reg_w(sd, R51x_I2C_W_SID, slave);
538 if (rc < 0) 1074 if (rc < 0)
539 return rc; 1075 return rc;
1076 sd->primary_i2c_slave = slave;
540 return reg_w(sd, R51x_I2C_R_SID, slave + 1); 1077 return reg_w(sd, R51x_I2C_R_SID, slave + 1);
541} 1078}
542 1079
543struct ov_regvals {
544 __u8 reg;
545 __u8 val;
546};
547struct ov_i2c_regvals {
548 __u8 reg;
549 __u8 val;
550};
551
552static int write_regvals(struct sd *sd, 1080static int write_regvals(struct sd *sd,
553 const struct ov_regvals *regvals, 1081 const struct ov_regvals *regvals,
554 int n) 1082 int n)
@@ -591,101 +1119,9 @@ static int write_i2c_regvals(struct sd *sd,
591static int ov8xx0_configure(struct sd *sd) 1119static int ov8xx0_configure(struct sd *sd)
592{ 1120{
593 int rc; 1121 int rc;
594 static const struct ov_i2c_regvals norm_8610[] = {
595 { 0x12, 0x80 },
596 { 0x00, 0x00 },
597 { 0x01, 0x80 },
598 { 0x02, 0x80 },
599 { 0x03, 0xc0 },
600 { 0x04, 0x30 },
601 { 0x05, 0x30 }, /* was 0x10, new from windrv 090403 */
602 { 0x06, 0x70 }, /* was 0x80, new from windrv 090403 */
603 { 0x0a, 0x86 },
604 { 0x0b, 0xb0 },
605 { 0x0c, 0x20 },
606 { 0x0d, 0x20 },
607 { 0x11, 0x01 },
608 { 0x12, 0x25 },
609 { 0x13, 0x01 },
610 { 0x14, 0x04 },
611 { 0x15, 0x01 }, /* Lin and Win think different about UV order */
612 { 0x16, 0x03 },
613 { 0x17, 0x38 }, /* was 0x2f, new from windrv 090403 */
614 { 0x18, 0xea }, /* was 0xcf, new from windrv 090403 */
615 { 0x19, 0x02 }, /* was 0x06, new from windrv 090403 */
616 { 0x1a, 0xf5 },
617 { 0x1b, 0x00 },
618 { 0x20, 0xd0 }, /* was 0x90, new from windrv 090403 */
619 { 0x23, 0xc0 }, /* was 0x00, new from windrv 090403 */
620 { 0x24, 0x30 }, /* was 0x1d, new from windrv 090403 */
621 { 0x25, 0x50 }, /* was 0x57, new from windrv 090403 */
622 { 0x26, 0xa2 },
623 { 0x27, 0xea },
624 { 0x28, 0x00 },
625 { 0x29, 0x00 },
626 { 0x2a, 0x80 },
627 { 0x2b, 0xc8 }, /* was 0xcc, new from windrv 090403 */
628 { 0x2c, 0xac },
629 { 0x2d, 0x45 }, /* was 0xd5, new from windrv 090403 */
630 { 0x2e, 0x80 },
631 { 0x2f, 0x14 }, /* was 0x01, new from windrv 090403 */
632 { 0x4c, 0x00 },
633 { 0x4d, 0x30 }, /* was 0x10, new from windrv 090403 */
634 { 0x60, 0x02 }, /* was 0x01, new from windrv 090403 */
635 { 0x61, 0x00 }, /* was 0x09, new from windrv 090403 */
636 { 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */
637 { 0x63, 0xff },
638 { 0x64, 0x53 }, /* new windrv 090403 says 0x57,
639 * maybe thats wrong */
640 { 0x65, 0x00 },
641 { 0x66, 0x55 },
642 { 0x67, 0xb0 },
643 { 0x68, 0xc0 }, /* was 0xaf, new from windrv 090403 */
644 { 0x69, 0x02 },
645 { 0x6a, 0x22 },
646 { 0x6b, 0x00 },
647 { 0x6c, 0x99 }, /* was 0x80, old windrv says 0x00, but
648 deleting bit7 colors the first images red */
649 { 0x6d, 0x11 }, /* was 0x00, new from windrv 090403 */
650 { 0x6e, 0x11 }, /* was 0x00, new from windrv 090403 */
651 { 0x6f, 0x01 },
652 { 0x70, 0x8b },
653 { 0x71, 0x00 },
654 { 0x72, 0x14 },
655 { 0x73, 0x54 },
656 { 0x74, 0x00 },/* 0x60? - was 0x00, new from windrv 090403 */
657 { 0x75, 0x0e },
658 { 0x76, 0x02 }, /* was 0x02, new from windrv 090403 */
659 { 0x77, 0xff },
660 { 0x78, 0x80 },
661 { 0x79, 0x80 },
662 { 0x7a, 0x80 },
663 { 0x7b, 0x10 }, /* was 0x13, new from windrv 090403 */
664 { 0x7c, 0x00 },
665 { 0x7d, 0x08 }, /* was 0x09, new from windrv 090403 */
666 { 0x7e, 0x08 }, /* was 0xc0, new from windrv 090403 */
667 { 0x7f, 0xfb },
668 { 0x80, 0x28 },
669 { 0x81, 0x00 },
670 { 0x82, 0x23 },
671 { 0x83, 0x0b },
672 { 0x84, 0x00 },
673 { 0x85, 0x62 }, /* was 0x61, new from windrv 090403 */
674 { 0x86, 0xc9 },
675 { 0x87, 0x00 },
676 { 0x88, 0x00 },
677 { 0x89, 0x01 },
678 { 0x12, 0x20 },
679 { 0x12, 0x25 }, /* was 0x24, new from windrv 090403 */
680 };
681 1122
682 PDEBUG(D_PROBE, "starting ov8xx0 configuration"); 1123 PDEBUG(D_PROBE, "starting ov8xx0 configuration");
683 1124
684 if (init_ov_sensor(sd) < 0)
685 PDEBUG(D_ERR|D_PROBE, "Failed to read sensor ID");
686 else
687 PDEBUG(D_PROBE, "OV86x0 initialized");
688
689 /* Detect sensor (sub)type */ 1125 /* Detect sensor (sub)type */
690 rc = i2c_r(sd, OV7610_REG_COM_I); 1126 rc = i2c_r(sd, OV7610_REG_COM_I);
691 if (rc < 0) { 1127 if (rc < 0) {
@@ -698,9 +1134,6 @@ static int ov8xx0_configure(struct sd *sd)
698 PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3); 1134 PDEBUG(D_ERR, "Unknown image sensor version: %d", rc & 3);
699 return -1; 1135 return -1;
700 } 1136 }
701 PDEBUG(D_PROBE, "Writing 8610 registers");
702 if (write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610)))
703 return -1;
704 1137
705 /* Set sensor-specific vars */ 1138 /* Set sensor-specific vars */
706/* sd->sif = 0; already done */ 1139/* sd->sif = 0; already done */
@@ -714,252 +1147,6 @@ static int ov7xx0_configure(struct sd *sd)
714{ 1147{
715 int rc, high, low; 1148 int rc, high, low;
716 1149
717 /* Lawrence Glaister <lg@jfm.bc.ca> reports:
718 *
719 * Register 0x0f in the 7610 has the following effects:
720 *
721 * 0x85 (AEC method 1): Best overall, good contrast range
722 * 0x45 (AEC method 2): Very overexposed
723 * 0xa5 (spec sheet default): Ok, but the black level is
724 * shifted resulting in loss of contrast
725 * 0x05 (old driver setting): very overexposed, too much
726 * contrast
727 */
728 static const struct ov_i2c_regvals norm_7610[] = {
729 { 0x10, 0xff },
730 { 0x16, 0x06 },
731 { 0x28, 0x24 },
732 { 0x2b, 0xac },
733 { 0x12, 0x00 },
734 { 0x38, 0x81 },
735 { 0x28, 0x24 }, /* 0c */
736 { 0x0f, 0x85 }, /* lg's setting */
737 { 0x15, 0x01 },
738 { 0x20, 0x1c },
739 { 0x23, 0x2a },
740 { 0x24, 0x10 },
741 { 0x25, 0x8a },
742 { 0x26, 0xa2 },
743 { 0x27, 0xc2 },
744 { 0x2a, 0x04 },
745 { 0x2c, 0xfe },
746 { 0x2d, 0x93 },
747 { 0x30, 0x71 },
748 { 0x31, 0x60 },
749 { 0x32, 0x26 },
750 { 0x33, 0x20 },
751 { 0x34, 0x48 },
752 { 0x12, 0x24 },
753 { 0x11, 0x01 },
754 { 0x0c, 0x24 },
755 { 0x0d, 0x24 },
756 };
757
758 static const struct ov_i2c_regvals norm_7620[] = {
759 { 0x00, 0x00 }, /* gain */
760 { 0x01, 0x80 }, /* blue gain */
761 { 0x02, 0x80 }, /* red gain */
762 { 0x03, 0xc0 }, /* OV7670_REG_VREF */
763 { 0x06, 0x60 },
764 { 0x07, 0x00 },
765 { 0x0c, 0x24 },
766 { 0x0c, 0x24 },
767 { 0x0d, 0x24 },
768 { 0x11, 0x01 },
769 { 0x12, 0x24 },
770 { 0x13, 0x01 },
771 { 0x14, 0x84 },
772 { 0x15, 0x01 },
773 { 0x16, 0x03 },
774 { 0x17, 0x2f },
775 { 0x18, 0xcf },
776 { 0x19, 0x06 },
777 { 0x1a, 0xf5 },
778 { 0x1b, 0x00 },
779 { 0x20, 0x18 },
780 { 0x21, 0x80 },
781 { 0x22, 0x80 },
782 { 0x23, 0x00 },
783 { 0x26, 0xa2 },
784 { 0x27, 0xea },
785 { 0x28, 0x20 },
786 { 0x29, 0x00 },
787 { 0x2a, 0x10 },
788 { 0x2b, 0x00 },
789 { 0x2c, 0x88 },
790 { 0x2d, 0x91 },
791 { 0x2e, 0x80 },
792 { 0x2f, 0x44 },
793 { 0x60, 0x27 },
794 { 0x61, 0x02 },
795 { 0x62, 0x5f },
796 { 0x63, 0xd5 },
797 { 0x64, 0x57 },
798 { 0x65, 0x83 },
799 { 0x66, 0x55 },
800 { 0x67, 0x92 },
801 { 0x68, 0xcf },
802 { 0x69, 0x76 },
803 { 0x6a, 0x22 },
804 { 0x6b, 0x00 },
805 { 0x6c, 0x02 },
806 { 0x6d, 0x44 },
807 { 0x6e, 0x80 },
808 { 0x6f, 0x1d },
809 { 0x70, 0x8b },
810 { 0x71, 0x00 },
811 { 0x72, 0x14 },
812 { 0x73, 0x54 },
813 { 0x74, 0x00 },
814 { 0x75, 0x8e },
815 { 0x76, 0x00 },
816 { 0x77, 0xff },
817 { 0x78, 0x80 },
818 { 0x79, 0x80 },
819 { 0x7a, 0x80 },
820 { 0x7b, 0xe2 },
821 { 0x7c, 0x00 },
822 };
823
824 /* 7640 and 7648. The defaults should be OK for most registers. */
825 static const struct ov_i2c_regvals norm_7640[] = {
826 { 0x12, 0x80 },
827 { 0x12, 0x14 },
828 };
829
830 /* 7670. Defaults taken from OmniVision provided data,
831 * as provided by Jonathan Corbet of OLPC */
832 static const struct ov_i2c_regvals norm_7670[] = {
833 { OV7670_REG_COM7, OV7670_COM7_RESET },
834 { OV7670_REG_TSLB, 0x04 }, /* OV */
835 { OV7670_REG_COM7, OV7670_COM7_FMT_VGA }, /* VGA */
836 { OV7670_REG_CLKRC, 0x01 },
837 /*
838 * Set the hardware window. These values from OV don't entirely
839 * make sense - hstop is less than hstart. But they work...
840 */
841 { OV7670_REG_HSTART, 0x13 }, { OV7670_REG_HSTOP, 0x01 },
842 { OV7670_REG_HREF, 0xb6 }, { OV7670_REG_VSTART, 0x02 },
843 { OV7670_REG_VSTOP, 0x7a }, { OV7670_REG_VREF, 0x0a },
844
845 { OV7670_REG_COM3, 0 }, { OV7670_REG_COM14, 0 },
846 /* Mystery scaling numbers */
847 { 0x70, 0x3a }, { 0x71, 0x35 },
848 { 0x72, 0x11 }, { 0x73, 0xf0 },
849 { 0xa2, 0x02 },
850/* { OV7670_REG_COM10, 0x0 }, */
851
852 /* Gamma curve values */
853 { 0x7a, 0x20 },
854 { 0x7b, 0x10 },
855 { 0x7c, 0x1e },
856 { 0x7d, 0x35 },
857 { 0x7e, 0x5a }, { 0x7f, 0x69 },
858 { 0x80, 0x76 }, { 0x81, 0x80 },
859 { 0x82, 0x88 }, { 0x83, 0x8f },
860 { 0x84, 0x96 }, { 0x85, 0xa3 },
861 { 0x86, 0xaf }, { 0x87, 0xc4 },
862 { 0x88, 0xd7 }, { 0x89, 0xe8 },
863
864 /* AGC and AEC parameters. Note we start by disabling those features,
865 then turn them only after tweaking the values. */
866 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
867 | OV7670_COM8_AECSTEP
868 | OV7670_COM8_BFILT },
869 { OV7670_REG_GAIN, 0 }, { OV7670_REG_AECH, 0 },
870 { OV7670_REG_COM4, 0x40 }, /* magic reserved bit */
871 { OV7670_REG_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
872 { OV7670_REG_BD50MAX, 0x05 }, { OV7670_REG_BD60MAX, 0x07 },
873 { OV7670_REG_AEW, 0x95 }, { OV7670_REG_AEB, 0x33 },
874 { OV7670_REG_VPT, 0xe3 }, { OV7670_REG_HAECC1, 0x78 },
875 { OV7670_REG_HAECC2, 0x68 },
876 { 0xa1, 0x03 }, /* magic */
877 { OV7670_REG_HAECC3, 0xd8 }, { OV7670_REG_HAECC4, 0xd8 },
878 { OV7670_REG_HAECC5, 0xf0 }, { OV7670_REG_HAECC6, 0x90 },
879 { OV7670_REG_HAECC7, 0x94 },
880 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
881 | OV7670_COM8_AECSTEP
882 | OV7670_COM8_BFILT
883 | OV7670_COM8_AGC
884 | OV7670_COM8_AEC },
885
886 /* Almost all of these are magic "reserved" values. */
887 { OV7670_REG_COM5, 0x61 }, { OV7670_REG_COM6, 0x4b },
888 { 0x16, 0x02 },
889 { OV7670_REG_MVFP, 0x07 },
890 { 0x21, 0x02 }, { 0x22, 0x91 },
891 { 0x29, 0x07 }, { 0x33, 0x0b },
892 { 0x35, 0x0b }, { 0x37, 0x1d },
893 { 0x38, 0x71 }, { 0x39, 0x2a },
894 { OV7670_REG_COM12, 0x78 }, { 0x4d, 0x40 },
895 { 0x4e, 0x20 }, { OV7670_REG_GFIX, 0 },
896 { 0x6b, 0x4a }, { 0x74, 0x10 },
897 { 0x8d, 0x4f }, { 0x8e, 0 },
898 { 0x8f, 0 }, { 0x90, 0 },
899 { 0x91, 0 }, { 0x96, 0 },
900 { 0x9a, 0 }, { 0xb0, 0x84 },
901 { 0xb1, 0x0c }, { 0xb2, 0x0e },
902 { 0xb3, 0x82 }, { 0xb8, 0x0a },
903
904 /* More reserved magic, some of which tweaks white balance */
905 { 0x43, 0x0a }, { 0x44, 0xf0 },
906 { 0x45, 0x34 }, { 0x46, 0x58 },
907 { 0x47, 0x28 }, { 0x48, 0x3a },
908 { 0x59, 0x88 }, { 0x5a, 0x88 },
909 { 0x5b, 0x44 }, { 0x5c, 0x67 },
910 { 0x5d, 0x49 }, { 0x5e, 0x0e },
911 { 0x6c, 0x0a }, { 0x6d, 0x55 },
912 { 0x6e, 0x11 }, { 0x6f, 0x9f },
913 /* "9e for advance AWB" */
914 { 0x6a, 0x40 }, { OV7670_REG_BLUE, 0x40 },
915 { OV7670_REG_RED, 0x60 },
916 { OV7670_REG_COM8, OV7670_COM8_FASTAEC
917 | OV7670_COM8_AECSTEP
918 | OV7670_COM8_BFILT
919 | OV7670_COM8_AGC
920 | OV7670_COM8_AEC
921 | OV7670_COM8_AWB },
922
923 /* Matrix coefficients */
924 { 0x4f, 0x80 }, { 0x50, 0x80 },
925 { 0x51, 0 }, { 0x52, 0x22 },
926 { 0x53, 0x5e }, { 0x54, 0x80 },
927 { 0x58, 0x9e },
928
929 { OV7670_REG_COM16, OV7670_COM16_AWBGAIN },
930 { OV7670_REG_EDGE, 0 },
931 { 0x75, 0x05 }, { 0x76, 0xe1 },
932 { 0x4c, 0 }, { 0x77, 0x01 },
933 { OV7670_REG_COM13, OV7670_COM13_GAMMA
934 | OV7670_COM13_UVSAT
935 | 2}, /* was 3 */
936 { 0x4b, 0x09 },
937 { 0xc9, 0x60 }, { OV7670_REG_COM16, 0x38 },
938 { 0x56, 0x40 },
939
940 { 0x34, 0x11 },
941 { OV7670_REG_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO },
942 { 0xa4, 0x88 }, { 0x96, 0 },
943 { 0x97, 0x30 }, { 0x98, 0x20 },
944 { 0x99, 0x30 }, { 0x9a, 0x84 },
945 { 0x9b, 0x29 }, { 0x9c, 0x03 },
946 { 0x9d, 0x4c }, { 0x9e, 0x3f },
947 { 0x78, 0x04 },
948
949 /* Extra-weird stuff. Some sort of multiplexor register */
950 { 0x79, 0x01 }, { 0xc8, 0xf0 },
951 { 0x79, 0x0f }, { 0xc8, 0x00 },
952 { 0x79, 0x10 }, { 0xc8, 0x7e },
953 { 0x79, 0x0a }, { 0xc8, 0x80 },
954 { 0x79, 0x0b }, { 0xc8, 0x01 },
955 { 0x79, 0x0c }, { 0xc8, 0x0f },
956 { 0x79, 0x0d }, { 0xc8, 0x20 },
957 { 0x79, 0x09 }, { 0xc8, 0x80 },
958 { 0x79, 0x02 }, { 0xc8, 0xc0 },
959 { 0x79, 0x03 }, { 0xc8, 0x40 },
960 { 0x79, 0x05 }, { 0xc8, 0x30 },
961 { 0x79, 0x26 },
962 };
963 1150
964 PDEBUG(D_PROBE, "starting OV7xx0 configuration"); 1151 PDEBUG(D_PROBE, "starting OV7xx0 configuration");
965 1152
@@ -1011,8 +1198,9 @@ static int ov7xx0_configure(struct sd *sd)
1011 switch (low) { 1198 switch (low) {
1012 case 0x30: 1199 case 0x30:
1013 PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635"); 1200 PDEBUG(D_PROBE, "Sensor is an OV7630/OV7635");
1014 sd->sensor = SEN_OV7630; 1201 PDEBUG(D_ERR,
1015 break; 1202 "7630 is not supported by this driver");
1203 return -1;
1016 case 0x40: 1204 case 0x40:
1017 PDEBUG(D_PROBE, "Sensor is an OV7645"); 1205 PDEBUG(D_PROBE, "Sensor is an OV7645");
1018 sd->sensor = SEN_OV7640; /* FIXME */ 1206 sd->sensor = SEN_OV7640; /* FIXME */
@@ -1038,32 +1226,6 @@ static int ov7xx0_configure(struct sd *sd)
1038 return -1; 1226 return -1;
1039 } 1227 }
1040 1228
1041 switch (sd->sensor) {
1042 case SEN_OV7620:
1043 PDEBUG(D_PROBE, "Writing 7620 registers");
1044 if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
1045 return -1;
1046 break;
1047 case SEN_OV7630:
1048 PDEBUG(D_ERR, "7630 is not supported by this driver version");
1049 return -1;
1050 case SEN_OV7640:
1051 PDEBUG(D_PROBE, "Writing 7640 registers");
1052 if (write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640)))
1053 return -1;
1054 break;
1055 case SEN_OV7670:
1056 PDEBUG(D_PROBE, "Writing 7670 registers");
1057 if (write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)))
1058 return -1;
1059 break;
1060 default:
1061 PDEBUG(D_PROBE, "Writing 7610 registers");
1062 if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
1063 return -1;
1064 break;
1065 }
1066
1067 /* Set sensor-specific vars */ 1229 /* Set sensor-specific vars */
1068/* sd->sif = 0; already done */ 1230/* sd->sif = 0; already done */
1069 return 0; 1231 return 0;
@@ -1073,141 +1235,7 @@ static int ov7xx0_configure(struct sd *sd)
1073static int ov6xx0_configure(struct sd *sd) 1235static int ov6xx0_configure(struct sd *sd)
1074{ 1236{
1075 int rc; 1237 int rc;
1076 static const struct ov_i2c_regvals norm_6x20[] = { 1238 PDEBUG(D_PROBE, "starting OV6xx0 configuration");
1077 { 0x12, 0x80 }, /* reset */
1078 { 0x11, 0x01 },
1079 { 0x03, 0x60 },
1080 { 0x05, 0x7f }, /* For when autoadjust is off */
1081 { 0x07, 0xa8 },
1082 /* The ratio of 0x0c and 0x0d controls the white point */
1083 { 0x0c, 0x24 },
1084 { 0x0d, 0x24 },
1085 { 0x0f, 0x15 }, /* COMS */
1086 { 0x10, 0x75 }, /* AEC Exposure time */
1087 { 0x12, 0x24 }, /* Enable AGC */
1088 { 0x14, 0x04 },
1089 /* 0x16: 0x06 helps frame stability with moving objects */
1090 { 0x16, 0x06 },
1091/* { 0x20, 0x30 }, * Aperture correction enable */
1092 { 0x26, 0xb2 }, /* BLC enable */
1093 /* 0x28: 0x05 Selects RGB format if RGB on */
1094 { 0x28, 0x05 },
1095 { 0x2a, 0x04 }, /* Disable framerate adjust */
1096/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
1097 { 0x2d, 0x99 },
1098 { 0x33, 0xa0 }, /* Color Processing Parameter */
1099 { 0x34, 0xd2 }, /* Max A/D range */
1100 { 0x38, 0x8b },
1101 { 0x39, 0x40 },
1102
1103 { 0x3c, 0x39 }, /* Enable AEC mode changing */
1104 { 0x3c, 0x3c }, /* Change AEC mode */
1105 { 0x3c, 0x24 }, /* Disable AEC mode changing */
1106
1107 { 0x3d, 0x80 },
1108 /* These next two registers (0x4a, 0x4b) are undocumented.
1109 * They control the color balance */
1110 { 0x4a, 0x80 },
1111 { 0x4b, 0x80 },
1112 { 0x4d, 0xd2 }, /* This reduces noise a bit */
1113 { 0x4e, 0xc1 },
1114 { 0x4f, 0x04 },
1115/* Do 50-53 have any effect? */
1116/* Toggle 0x12[2] off and on here? */
1117 };
1118
1119 static const struct ov_i2c_regvals norm_6x30[] = {
1120 { 0x12, 0x80 }, /* Reset */
1121 { 0x00, 0x1f }, /* Gain */
1122 { 0x01, 0x99 }, /* Blue gain */
1123 { 0x02, 0x7c }, /* Red gain */
1124 { 0x03, 0xc0 }, /* Saturation */
1125 { 0x05, 0x0a }, /* Contrast */
1126 { 0x06, 0x95 }, /* Brightness */
1127 { 0x07, 0x2d }, /* Sharpness */
1128 { 0x0c, 0x20 },
1129 { 0x0d, 0x20 },
1130 { 0x0e, 0x20 },
1131 { 0x0f, 0x05 },
1132 { 0x10, 0x9a },
1133 { 0x11, 0x00 }, /* Pixel clock = fastest */
1134 { 0x12, 0x24 }, /* Enable AGC and AWB */
1135 { 0x13, 0x21 },
1136 { 0x14, 0x80 },
1137 { 0x15, 0x01 },
1138 { 0x16, 0x03 },
1139 { 0x17, 0x38 },
1140 { 0x18, 0xea },
1141 { 0x19, 0x04 },
1142 { 0x1a, 0x93 },
1143 { 0x1b, 0x00 },
1144 { 0x1e, 0xc4 },
1145 { 0x1f, 0x04 },
1146 { 0x20, 0x20 },
1147 { 0x21, 0x10 },
1148 { 0x22, 0x88 },
1149 { 0x23, 0xc0 }, /* Crystal circuit power level */
1150 { 0x25, 0x9a }, /* Increase AEC black ratio */
1151 { 0x26, 0xb2 }, /* BLC enable */
1152 { 0x27, 0xa2 },
1153 { 0x28, 0x00 },
1154 { 0x29, 0x00 },
1155 { 0x2a, 0x84 }, /* 60 Hz power */
1156 { 0x2b, 0xa8 }, /* 60 Hz power */
1157 { 0x2c, 0xa0 },
1158 { 0x2d, 0x95 }, /* Enable auto-brightness */
1159 { 0x2e, 0x88 },
1160 { 0x33, 0x26 },
1161 { 0x34, 0x03 },
1162 { 0x36, 0x8f },
1163 { 0x37, 0x80 },
1164 { 0x38, 0x83 },
1165 { 0x39, 0x80 },
1166 { 0x3a, 0x0f },
1167 { 0x3b, 0x3c },
1168 { 0x3c, 0x1a },
1169 { 0x3d, 0x80 },
1170 { 0x3e, 0x80 },
1171 { 0x3f, 0x0e },
1172 { 0x40, 0x00 }, /* White bal */
1173 { 0x41, 0x00 }, /* White bal */
1174 { 0x42, 0x80 },
1175 { 0x43, 0x3f }, /* White bal */
1176 { 0x44, 0x80 },
1177 { 0x45, 0x20 },
1178 { 0x46, 0x20 },
1179 { 0x47, 0x80 },
1180 { 0x48, 0x7f },
1181 { 0x49, 0x00 },
1182 { 0x4a, 0x00 },
1183 { 0x4b, 0x80 },
1184 { 0x4c, 0xd0 },
1185 { 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */
1186 { 0x4e, 0x40 },
1187 { 0x4f, 0x07 }, /* UV avg., col. killer: max */
1188 { 0x50, 0xff },
1189 { 0x54, 0x23 }, /* Max AGC gain: 18dB */
1190 { 0x55, 0xff },
1191 { 0x56, 0x12 },
1192 { 0x57, 0x81 },
1193 { 0x58, 0x75 },
1194 { 0x59, 0x01 }, /* AGC dark current comp.: +1 */
1195 { 0x5a, 0x2c },
1196 { 0x5b, 0x0f }, /* AWB chrominance levels */
1197 { 0x5c, 0x10 },
1198 { 0x3d, 0x80 },
1199 { 0x27, 0xa6 },
1200 { 0x12, 0x20 }, /* Toggle AWB */
1201 { 0x12, 0x24 },
1202 };
1203
1204 PDEBUG(D_PROBE, "starting sensor configuration");
1205
1206 if (init_ov_sensor(sd) < 0) {
1207 PDEBUG(D_ERR, "Failed to read sensor ID.");
1208 return -1;
1209 }
1210 PDEBUG(D_PROBE, "OV6xx0 sensor detected");
1211 1239
1212 /* Detect sensor (sub)type */ 1240 /* Detect sensor (sub)type */
1213 rc = i2c_r(sd, OV7610_REG_COM_I); 1241 rc = i2c_r(sd, OV7610_REG_COM_I);
@@ -1251,15 +1279,6 @@ static int ov6xx0_configure(struct sd *sd)
1251 /* Set sensor-specific vars */ 1279 /* Set sensor-specific vars */
1252 sd->sif = 1; 1280 sd->sif = 1;
1253 1281
1254 if (sd->sensor == SEN_OV6620) {
1255 PDEBUG(D_PROBE, "Writing 6x20 registers");
1256 if (write_i2c_regvals(sd, norm_6x20, ARRAY_SIZE(norm_6x20)))
1257 return -1;
1258 } else {
1259 PDEBUG(D_PROBE, "Writing 6x30 registers");
1260 if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
1261 return -1;
1262 }
1263 return 0; 1282 return 0;
1264} 1283}
1265 1284
@@ -1298,22 +1317,31 @@ static int sd_config(struct gspca_dev *gspca_dev,
1298 ov51x_led_control(sd, 0); /* turn LED off */ 1317 ov51x_led_control(sd, 0); /* turn LED off */
1299 1318
1300 /* Test for 76xx */ 1319 /* Test for 76xx */
1301 sd->primary_i2c_slave = OV7xx0_SID;
1302 if (ov51x_set_slave_ids(sd, OV7xx0_SID) < 0) 1320 if (ov51x_set_slave_ids(sd, OV7xx0_SID) < 0)
1303 goto error; 1321 goto error;
1304 1322
1305 /* The OV519 must be more aggressive about sensor detection since 1323 /* The OV519 must be more aggressive about sensor detection since
1306 * I2C write will never fail if the sensor is not present. We have 1324 * I2C write will never fail if the sensor is not present. We have
1307 * to try to initialize the sensor to detect its presence */ 1325 * to try to initialize the sensor to detect its presence */
1308 if (init_ov_sensor(sd) < 0) { 1326 if (init_ov_sensor(sd) >= 0) {
1327 if (ov7xx0_configure(sd) < 0) {
1328 PDEBUG(D_ERR, "Failed to configure OV7xx0");
1329 goto error;
1330 }
1331 } else {
1332
1309 /* Test for 6xx0 */ 1333 /* Test for 6xx0 */
1310 sd->primary_i2c_slave = OV6xx0_SID;
1311 if (ov51x_set_slave_ids(sd, OV6xx0_SID) < 0) 1334 if (ov51x_set_slave_ids(sd, OV6xx0_SID) < 0)
1312 goto error; 1335 goto error;
1313 1336
1314 if (init_ov_sensor(sd) < 0) { 1337 if (init_ov_sensor(sd) >= 0) {
1338 if (ov6xx0_configure(sd) < 0) {
1339 PDEBUG(D_ERR, "Failed to configure OV6xx0");
1340 goto error;
1341 }
1342 } else {
1343
1315 /* Test for 8xx0 */ 1344 /* Test for 8xx0 */
1316 sd->primary_i2c_slave = OV8xx0_SID;
1317 if (ov51x_set_slave_ids(sd, OV8xx0_SID) < 0) 1345 if (ov51x_set_slave_ids(sd, OV8xx0_SID) < 0)
1318 goto error; 1346 goto error;
1319 1347
@@ -1321,24 +1349,13 @@ static int sd_config(struct gspca_dev *gspca_dev,
1321 PDEBUG(D_ERR, 1349 PDEBUG(D_ERR,
1322 "Can't determine sensor slave IDs"); 1350 "Can't determine sensor slave IDs");
1323 goto error; 1351 goto error;
1324 } else {
1325 if (ov8xx0_configure(sd) < 0) {
1326 PDEBUG(D_ERR,
1327 "Failed to configure OV8xx0 sensor");
1328 goto error;
1329 }
1330 } 1352 }
1331 } else { 1353 if (ov8xx0_configure(sd) < 0) {
1332 if (ov6xx0_configure(sd) < 0) { 1354 PDEBUG(D_ERR,
1333 PDEBUG(D_ERR, "Failed to configure OV6xx0"); 1355 "Failed to configure OV8xx0 sensor");
1334 goto error; 1356 goto error;
1335 } 1357 }
1336 } 1358 }
1337 } else {
1338 if (ov7xx0_configure(sd) < 0) {
1339 PDEBUG(D_ERR, "Failed to configure OV7xx0");
1340 goto error;
1341 }
1342 } 1359 }
1343 1360
1344 cam = &gspca_dev->cam; 1361 cam = &gspca_dev->cam;
@@ -1355,15 +1372,53 @@ static int sd_config(struct gspca_dev *gspca_dev,
1355 sd->colors = COLOR_DEF; 1372 sd->colors = COLOR_DEF;
1356 sd->hflip = HFLIP_DEF; 1373 sd->hflip = HFLIP_DEF;
1357 sd->vflip = VFLIP_DEF; 1374 sd->vflip = VFLIP_DEF;
1375 if (sd->sensor != SEN_OV7670)
1376 gspca_dev->ctrl_dis = (1 << HFLIP_IDX)
1377 | (1 << VFLIP_IDX);
1358 return 0; 1378 return 0;
1359error: 1379error:
1360 PDEBUG(D_ERR, "OV519 Config failed"); 1380 PDEBUG(D_ERR, "OV519 Config failed");
1361 return -EBUSY; 1381 return -EBUSY;
1362} 1382}
1363 1383
1364/* this function is called at open time */ 1384/* this function is called at probe and resume time */
1365static int sd_open(struct gspca_dev *gspca_dev) 1385static int sd_init(struct gspca_dev *gspca_dev)
1366{ 1386{
1387 struct sd *sd = (struct sd *) gspca_dev;
1388
1389 /* initialize the sensor */
1390 switch (sd->sensor) {
1391 case SEN_OV6620:
1392 if (write_i2c_regvals(sd, norm_6x20, ARRAY_SIZE(norm_6x20)))
1393 return -EIO;
1394 break;
1395 case SEN_OV6630:
1396 if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
1397 return -EIO;
1398 break;
1399 default:
1400/* case SEN_OV7610: */
1401/* case SEN_OV76BE: */
1402 if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
1403 return -EIO;
1404 break;
1405 case SEN_OV7620:
1406 if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
1407 return -EIO;
1408 break;
1409 case SEN_OV7640:
1410 if (write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640)))
1411 return -EIO;
1412 break;
1413 case SEN_OV7670:
1414 if (write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)))
1415 return -EIO;
1416 break;
1417 case SEN_OV8610:
1418 if (write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610)))
1419 return -EIO;
1420 break;
1421 }
1367 return 0; 1422 return 0;
1368} 1423}
1369 1424
@@ -1827,14 +1882,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1827 ov51x_led_control((struct sd *) gspca_dev, 0); 1882 ov51x_led_control((struct sd *) gspca_dev, 0);
1828} 1883}
1829 1884
1830static void sd_stop0(struct gspca_dev *gspca_dev)
1831{
1832}
1833
1834static void sd_close(struct gspca_dev *gspca_dev)
1835{
1836}
1837
1838static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1885static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1839 struct gspca_frame *frame, /* target */ 1886 struct gspca_frame *frame, /* target */
1840 __u8 *data, /* isoc packet */ 1887 __u8 *data, /* isoc packet */
@@ -2091,11 +2138,9 @@ static const struct sd_desc sd_desc = {
2091 .ctrls = sd_ctrls, 2138 .ctrls = sd_ctrls,
2092 .nctrls = ARRAY_SIZE(sd_ctrls), 2139 .nctrls = ARRAY_SIZE(sd_ctrls),
2093 .config = sd_config, 2140 .config = sd_config,
2094 .open = sd_open, 2141 .init = sd_init,
2095 .start = sd_start, 2142 .start = sd_start,
2096 .stopN = sd_stopN, 2143 .stopN = sd_stopN,
2097 .stop0 = sd_stop0,
2098 .close = sd_close,
2099 .pkt_scan = sd_pkt_scan, 2144 .pkt_scan = sd_pkt_scan,
2100}; 2145};
2101 2146
@@ -2132,6 +2177,10 @@ static struct usb_driver sd_driver = {
2132 .id_table = device_table, 2177 .id_table = device_table,
2133 .probe = sd_probe, 2178 .probe = sd_probe,
2134 .disconnect = gspca_disconnect, 2179 .disconnect = gspca_disconnect,
2180#ifdef CONFIG_PM
2181 .suspend = gspca_suspend,
2182 .resume = gspca_resume,
2183#endif
2135}; 2184};
2136 2185
2137/* -- module insert / remove -- */ 2186/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 7ef18d578811..83b5f740c947 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -56,12 +56,6 @@ MODULE_LICENSE("GPL");
56#define PAC207_GAIN_KNEE 20 56#define PAC207_GAIN_KNEE 20
57 57
58#define PAC207_AUTOGAIN_DEADZONE 30 58#define PAC207_AUTOGAIN_DEADZONE 30
59/* We calculating the autogain at the end of the transfer of a frame, at this
60 moment a frame with the old settings is being transmitted, and a frame is
61 being captured with the old settings. So if we adjust the autogain we must
62 ignore atleast the 2 next frames for the new settings to come into effect
63 before doing any other adjustments */
64#define PAC207_AUTOGAIN_IGNORE_FRAMES 3
65 59
66/* specific webcam descriptor */ 60/* specific webcam descriptor */
67struct sd { 61struct sd {
@@ -131,7 +125,8 @@ static struct ctrl sd_ctrls[] = {
131 .minimum = 0, 125 .minimum = 0,
132 .maximum = 1, 126 .maximum = 1,
133 .step = 1, 127 .step = 1,
134 .default_value = 1, 128#define AUTOGAIN_DEF 1
129 .default_value = AUTOGAIN_DEF,
135 .flags = 0, 130 .flags = 0,
136 }, 131 },
137 .set = sd_setautogain, 132 .set = sd_setautogain,
@@ -181,9 +176,6 @@ static const __u8 pac207_sensor_init[][8] = {
181 /* 48 reg_72 Rate Control end BalSize_4a =0x36 */ 176 /* 48 reg_72 Rate Control end BalSize_4a =0x36 */
182static const __u8 PacReg72[] = { 0x00, 0x00, 0x36, 0x00 }; 177static const __u8 PacReg72[] = { 0x00, 0x00, 0x36, 0x00 };
183 178
184static const unsigned char pac207_sof_marker[5] =
185 { 0xff, 0xff, 0x00, 0xff, 0x96 };
186
187static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index, 179static int pac207_write_regs(struct gspca_dev *gspca_dev, u16 index,
188 const u8 *buffer, u16 length) 180 const u8 *buffer, u16 length)
189{ 181{
@@ -259,35 +251,32 @@ static int sd_config(struct gspca_dev *gspca_dev,
259 return -ENODEV; 251 return -ENODEV;
260 } 252 }
261 253
262 pac207_write_reg(gspca_dev, 0x41, 0x00);
263 /* Bit_0=Image Format,
264 * Bit_1=LED,
265 * Bit_2=Compression test mode enable */
266 pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
267 pac207_write_reg(gspca_dev, 0x11, 0x30); /* Analog Bias */
268
269 PDEBUG(D_PROBE, 254 PDEBUG(D_PROBE,
270 "Pixart PAC207BCA Image Processor and Control Chip detected" 255 "Pixart PAC207BCA Image Processor and Control Chip detected"
271 " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); 256 " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
272 257
273 cam = &gspca_dev->cam; 258 cam = &gspca_dev->cam;
274 cam->dev_name = (char *) id->driver_info;
275 cam->epaddr = 0x05; 259 cam->epaddr = 0x05;
276 cam->cam_mode = sif_mode; 260 cam->cam_mode = sif_mode;
277 cam->nmodes = ARRAY_SIZE(sif_mode); 261 cam->nmodes = ARRAY_SIZE(sif_mode);
278 sd->brightness = PAC207_BRIGHTNESS_DEFAULT; 262 sd->brightness = PAC207_BRIGHTNESS_DEFAULT;
279 sd->exposure = PAC207_EXPOSURE_DEFAULT; 263 sd->exposure = PAC207_EXPOSURE_DEFAULT;
280 sd->gain = PAC207_GAIN_DEFAULT; 264 sd->gain = PAC207_GAIN_DEFAULT;
265 sd->autogain = AUTOGAIN_DEF;
281 266
282 return 0; 267 return 0;
283} 268}
284 269
285/* this function is called at open time */ 270/* this function is called at probe and resume time */
286static int sd_open(struct gspca_dev *gspca_dev) 271static int sd_init(struct gspca_dev *gspca_dev)
287{ 272{
288 struct sd *sd = (struct sd *) gspca_dev; 273 pac207_write_reg(gspca_dev, 0x41, 0x00);
274 /* Bit_0=Image Format,
275 * Bit_1=LED,
276 * Bit_2=Compression test mode enable */
277 pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
278 pac207_write_reg(gspca_dev, 0x11, 0x30); /* Analog Bias */
289 279
290 sd->autogain = 1;
291 return 0; 280 return 0;
292} 281}
293 282
@@ -343,14 +332,8 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
343 pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */ 332 pac207_write_reg(gspca_dev, 0x0f, 0x00); /* Power Control */
344} 333}
345 334
346static void sd_stop0(struct gspca_dev *gspca_dev) 335/* Include pac common sof detection functions */
347{ 336#include "pac_common.h"
348}
349
350/* this function is called at close time */
351static void sd_close(struct gspca_dev *gspca_dev)
352{
353}
354 337
355static void pac207_do_auto_gain(struct gspca_dev *gspca_dev) 338static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
356{ 339{
@@ -365,33 +348,7 @@ static void pac207_do_auto_gain(struct gspca_dev *gspca_dev)
365 else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, 348 else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum,
366 100 + sd->brightness / 2, PAC207_AUTOGAIN_DEADZONE, 349 100 + sd->brightness / 2, PAC207_AUTOGAIN_DEADZONE,
367 PAC207_GAIN_KNEE, PAC207_EXPOSURE_KNEE)) 350 PAC207_GAIN_KNEE, PAC207_EXPOSURE_KNEE))
368 sd->autogain_ignore_frames = PAC207_AUTOGAIN_IGNORE_FRAMES; 351 sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
369}
370
371static unsigned char *pac207_find_sof(struct gspca_dev *gspca_dev,
372 unsigned char *m, int len)
373{
374 struct sd *sd = (struct sd *) gspca_dev;
375 int i;
376
377 /* Search for the SOF marker (fixed part) in the header */
378 for (i = 0; i < len; i++) {
379 if (m[i] == pac207_sof_marker[sd->sof_read]) {
380 sd->sof_read++;
381 if (sd->sof_read == sizeof(pac207_sof_marker)) {
382 PDEBUG(D_STREAM,
383 "SOF found, bytes to analyze: %u."
384 " Frame starts at byte #%u",
385 len, i + 1);
386 sd->sof_read = 0;
387 return m + i + 1;
388 }
389 } else {
390 sd->sof_read = 0;
391 }
392 }
393
394 return NULL;
395} 352}
396 353
397static void sd_pkt_scan(struct gspca_dev *gspca_dev, 354static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -402,14 +359,14 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
402 struct sd *sd = (struct sd *) gspca_dev; 359 struct sd *sd = (struct sd *) gspca_dev;
403 unsigned char *sof; 360 unsigned char *sof;
404 361
405 sof = pac207_find_sof(gspca_dev, data, len); 362 sof = pac_find_sof(gspca_dev, data, len);
406 if (sof) { 363 if (sof) {
407 int n; 364 int n;
408 365
409 /* finish decoding current frame */ 366 /* finish decoding current frame */
410 n = sof - data; 367 n = sof - data;
411 if (n > sizeof pac207_sof_marker) 368 if (n > sizeof pac_sof_marker)
412 n -= sizeof pac207_sof_marker; 369 n -= sizeof pac_sof_marker;
413 else 370 else
414 n = 0; 371 n = 0;
415 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 372 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
@@ -537,7 +494,7 @@ static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
537 sd->gain = PAC207_GAIN_DEFAULT; 494 sd->gain = PAC207_GAIN_DEFAULT;
538 if (gspca_dev->streaming) { 495 if (gspca_dev->streaming) {
539 sd->autogain_ignore_frames = 496 sd->autogain_ignore_frames =
540 PAC207_AUTOGAIN_IGNORE_FRAMES; 497 PAC_AUTOGAIN_IGNORE_FRAMES;
541 setexposure(gspca_dev); 498 setexposure(gspca_dev);
542 setgain(gspca_dev); 499 setgain(gspca_dev);
543 } 500 }
@@ -560,11 +517,9 @@ static const struct sd_desc sd_desc = {
560 .ctrls = sd_ctrls, 517 .ctrls = sd_ctrls,
561 .nctrls = ARRAY_SIZE(sd_ctrls), 518 .nctrls = ARRAY_SIZE(sd_ctrls),
562 .config = sd_config, 519 .config = sd_config,
563 .open = sd_open, 520 .init = sd_init,
564 .start = sd_start, 521 .start = sd_start,
565 .stopN = sd_stopN, 522 .stopN = sd_stopN,
566 .stop0 = sd_stop0,
567 .close = sd_close,
568 .dq_callback = pac207_do_auto_gain, 523 .dq_callback = pac207_do_auto_gain,
569 .pkt_scan = sd_pkt_scan, 524 .pkt_scan = sd_pkt_scan,
570}; 525};
@@ -597,6 +552,10 @@ static struct usb_driver sd_driver = {
597 .id_table = device_table, 552 .id_table = device_table,
598 .probe = sd_probe, 553 .probe = sd_probe,
599 .disconnect = gspca_disconnect, 554 .disconnect = gspca_disconnect,
555#ifdef CONFIG_PM
556 .suspend = gspca_suspend,
557 .resume = gspca_resume,
558#endif
600}; 559};
601 560
602/* -- module insert / remove -- */ 561/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 815bea6edc44..d4be51843286 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -19,6 +19,36 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22/* Some documentation about various registers as determined by trial and error.
23 When the register addresses differ between the 7202 and the 7311 the 2
24 different addresses are written as 7302addr/7311addr, when one of the 2
25 addresses is a - sign that register description is not valid for the
26 matching IC.
27
28 Register page 1:
29
30 Address Description
31 -/0x08 Unknown compressor related, must always be 8 except when not
32 in 640x480 resolution and page 4 reg 2 <= 3 then set it to 9 !
33 -/0x1b Auto white balance related, bit 0 is AWB enable (inverted)
34 bits 345 seem to toggle per color gains on/off (inverted)
35 0x78 Global control, bit 6 controls the LED (inverted)
36 -/0x80 JPEG compression ratio ? Best not touched
37
38 Register page 3/4:
39
40 Address Description
41 0x02 Clock divider 2-63, fps =~ 60 / val. Must be a multiple of 3 on
42 the 7302, so one of 3, 6, 9, ..., except when between 6 and 12?
43 -/0x0f Master gain 1-245, low value = high gain
44 0x10/- Master gain 0-31
45 -/0x10 Another gain 0-15, limited influence (1-2x gain I guess)
46 0x21 Bitfield: 0-1 unused, 2-3 vflip/hflip, 4-5 unknown, 6-7 unused
47 -/0x27 Seems to toggle various gains on / off, Setting bit 7 seems to
48 completely disable the analog amplification block. Set to 0x68
49 for max gain, 0x14 for minimal gain.
50*/
51
22#define MODULE_NAME "pac7311" 52#define MODULE_NAME "pac7311"
23 53
24#include "gspca.h" 54#include "gspca.h"
@@ -31,18 +61,23 @@ MODULE_LICENSE("GPL");
31struct sd { 61struct sd {
32 struct gspca_dev gspca_dev; /* !! must be the first item */ 62 struct gspca_dev gspca_dev; /* !! must be the first item */
33 63
34 int lum_sum;
35 atomic_t avg_lum;
36 atomic_t do_gain;
37
38 unsigned char brightness; 64 unsigned char brightness;
39 unsigned char contrast; 65 unsigned char contrast;
40 unsigned char colors; 66 unsigned char colors;
67 unsigned char gain;
68 unsigned char exposure;
41 unsigned char autogain; 69 unsigned char autogain;
70 __u8 hflip;
71 __u8 vflip;
72
73 __u8 sensor;
74#define SENSOR_PAC7302 0
75#define SENSOR_PAC7311 1
42 76
43 char ffseq; 77 u8 sof_read;
44 signed char ag_cnt; 78 u8 autogain_ignore_frames;
45#define AG_CNT_START 13 79
80 atomic_t avg_lum;
46}; 81};
47 82
48/* V4L2 controls supported by the driver */ 83/* V4L2 controls supported by the driver */
@@ -54,8 +89,18 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
54static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); 89static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
55static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val); 90static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
56static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val); 91static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
92static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
93static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
94static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
95static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
96static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
97static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
98static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
99static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
57 100
58static struct ctrl sd_ctrls[] = { 101static struct ctrl sd_ctrls[] = {
102/* This control is pac7302 only */
103#define BRIGHTNESS_IDX 0
59 { 104 {
60 { 105 {
61 .id = V4L2_CID_BRIGHTNESS, 106 .id = V4L2_CID_BRIGHTNESS,
@@ -71,13 +116,15 @@ static struct ctrl sd_ctrls[] = {
71 .set = sd_setbrightness, 116 .set = sd_setbrightness,
72 .get = sd_getbrightness, 117 .get = sd_getbrightness,
73 }, 118 },
119/* This control is for both the 7302 and the 7311 */
74 { 120 {
75 { 121 {
76 .id = V4L2_CID_CONTRAST, 122 .id = V4L2_CID_CONTRAST,
77 .type = V4L2_CTRL_TYPE_INTEGER, 123 .type = V4L2_CTRL_TYPE_INTEGER,
78 .name = "Contrast", 124 .name = "Contrast",
79 .minimum = 0, 125 .minimum = 0,
80 .maximum = 255, 126#define CONTRAST_MAX 255
127 .maximum = CONTRAST_MAX,
81 .step = 1, 128 .step = 1,
82#define CONTRAST_DEF 127 129#define CONTRAST_DEF 127
83 .default_value = CONTRAST_DEF, 130 .default_value = CONTRAST_DEF,
@@ -85,13 +132,16 @@ static struct ctrl sd_ctrls[] = {
85 .set = sd_setcontrast, 132 .set = sd_setcontrast,
86 .get = sd_getcontrast, 133 .get = sd_getcontrast,
87 }, 134 },
135/* This control is pac7302 only */
136#define SATURATION_IDX 2
88 { 137 {
89 { 138 {
90 .id = V4L2_CID_SATURATION, 139 .id = V4L2_CID_SATURATION,
91 .type = V4L2_CTRL_TYPE_INTEGER, 140 .type = V4L2_CTRL_TYPE_INTEGER,
92 .name = "Color", 141 .name = "Saturation",
93 .minimum = 0, 142 .minimum = 0,
94 .maximum = 255, 143#define COLOR_MAX 255
144 .maximum = COLOR_MAX,
95 .step = 1, 145 .step = 1,
96#define COLOR_DEF 127 146#define COLOR_DEF 127
97 .default_value = COLOR_DEF, 147 .default_value = COLOR_DEF,
@@ -99,6 +149,39 @@ static struct ctrl sd_ctrls[] = {
99 .set = sd_setcolors, 149 .set = sd_setcolors,
100 .get = sd_getcolors, 150 .get = sd_getcolors,
101 }, 151 },
152/* All controls below are for both the 7302 and the 7311 */
153 {
154 {
155 .id = V4L2_CID_GAIN,
156 .type = V4L2_CTRL_TYPE_INTEGER,
157 .name = "Gain",
158 .minimum = 0,
159#define GAIN_MAX 255
160 .maximum = GAIN_MAX,
161 .step = 1,
162#define GAIN_DEF 127
163#define GAIN_KNEE 255 /* Gain seems to cause little noise on the pac73xx */
164 .default_value = GAIN_DEF,
165 },
166 .set = sd_setgain,
167 .get = sd_getgain,
168 },
169 {
170 {
171 .id = V4L2_CID_EXPOSURE,
172 .type = V4L2_CTRL_TYPE_INTEGER,
173 .name = "Exposure",
174 .minimum = 0,
175#define EXPOSURE_MAX 255
176 .maximum = EXPOSURE_MAX,
177 .step = 1,
178#define EXPOSURE_DEF 16 /* 32 ms / 30 fps */
179#define EXPOSURE_KNEE 50 /* 100 ms / 10 fps */
180 .default_value = EXPOSURE_DEF,
181 },
182 .set = sd_setexposure,
183 .get = sd_getexposure,
184 },
102 { 185 {
103 { 186 {
104 .id = V4L2_CID_AUTOGAIN, 187 .id = V4L2_CID_AUTOGAIN,
@@ -113,101 +196,207 @@ static struct ctrl sd_ctrls[] = {
113 .set = sd_setautogain, 196 .set = sd_setautogain,
114 .get = sd_getautogain, 197 .get = sd_getautogain,
115 }, 198 },
199 {
200 {
201 .id = V4L2_CID_HFLIP,
202 .type = V4L2_CTRL_TYPE_BOOLEAN,
203 .name = "Mirror",
204 .minimum = 0,
205 .maximum = 1,
206 .step = 1,
207#define HFLIP_DEF 0
208 .default_value = HFLIP_DEF,
209 },
210 .set = sd_sethflip,
211 .get = sd_gethflip,
212 },
213 {
214 {
215 .id = V4L2_CID_VFLIP,
216 .type = V4L2_CTRL_TYPE_BOOLEAN,
217 .name = "Vflip",
218 .minimum = 0,
219 .maximum = 1,
220 .step = 1,
221#define VFLIP_DEF 0
222 .default_value = VFLIP_DEF,
223 },
224 .set = sd_setvflip,
225 .get = sd_getvflip,
226 },
116}; 227};
117 228
118static struct v4l2_pix_format vga_mode[] = { 229static struct v4l2_pix_format vga_mode[] = {
119 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 230 {160, 120, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
120 .bytesperline = 160, 231 .bytesperline = 160,
121 .sizeimage = 160 * 120 * 3 / 8 + 590, 232 .sizeimage = 160 * 120 * 3 / 8 + 590,
122 .colorspace = V4L2_COLORSPACE_JPEG, 233 .colorspace = V4L2_COLORSPACE_JPEG,
123 .priv = 2}, 234 .priv = 2},
124 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 235 {320, 240, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
125 .bytesperline = 320, 236 .bytesperline = 320,
126 .sizeimage = 320 * 240 * 3 / 8 + 590, 237 .sizeimage = 320 * 240 * 3 / 8 + 590,
127 .colorspace = V4L2_COLORSPACE_JPEG, 238 .colorspace = V4L2_COLORSPACE_JPEG,
128 .priv = 1}, 239 .priv = 1},
129 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 240 {640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
130 .bytesperline = 640, 241 .bytesperline = 640,
131 .sizeimage = 640 * 480 * 3 / 8 + 590, 242 .sizeimage = 640 * 480 * 3 / 8 + 590,
132 .colorspace = V4L2_COLORSPACE_JPEG, 243 .colorspace = V4L2_COLORSPACE_JPEG,
133 .priv = 0}, 244 .priv = 0},
134}; 245};
135 246
136#define PAC7311_JPEG_HEADER_SIZE (sizeof pac7311_jpeg_header) /* (594) */ 247/* pac 7302 */
137 248static const __u8 init_7302[] = {
138static const __u8 pac7311_jpeg_header[] = { 249/* index,value */
139 0xff, 0xd8, 250 0xff, 0x01, /* page 1 */
140 0xff, 0xe0, 0x00, 0x03, 0x20, 251 0x78, 0x00, /* deactivate */
141 0xff, 0xc0, 0x00, 0x11, 0x08, 252 0xff, 0x01,
142 0x01, 0xe0, /* 12: height */ 253 0x78, 0x40, /* led off */
143 0x02, 0x80, /* 14: width */ 254};
144 0x03, /* 16 */ 255static const __u8 start_7302[] = {
145 0x01, 0x21, 0x00, 256/* index, len, [value]* */
146 0x02, 0x11, 0x01, 257 0xff, 1, 0x00, /* page 0 */
147 0x03, 0x11, 0x01, 258 0x00, 12, 0x01, 0x40, 0x40, 0x40, 0x01, 0xe0, 0x02, 0x80,
148 0xff, 0xdb, 0x00, 0x84, 259 0x00, 0x00, 0x00, 0x00,
149 0x00, 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 260 0x0d, 24, 0x03, 0x01, 0x00, 0xb5, 0x07, 0xcb, 0x00, 0x00,
150 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 261 0x07, 0xc8, 0x00, 0xea, 0x07, 0xcf, 0x07, 0xf7,
151 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 262 0x07, 0x7e, 0x01, 0x0b, 0x00, 0x00, 0x00, 0x11,
152 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 263 0x26, 2, 0xaa, 0xaa,
153 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 264 0x2e, 1, 0x31,
154 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 265 0x38, 1, 0x01,
155 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, 0x11, 0x12, 0x12, 0x18, 266 0x3a, 3, 0x14, 0xff, 0x5a,
156 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 267 0x43, 11, 0x00, 0x0a, 0x18, 0x11, 0x01, 0x2c, 0x88, 0x11,
157 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 268 0x00, 0x54, 0x11,
158 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 269 0x55, 1, 0x00,
159 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 270 0x62, 4, 0x10, 0x1e, 0x1e, 0x18,
160 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 271 0x6b, 1, 0x00,
161 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 272 0x6e, 3, 0x08, 0x06, 0x00,
162 0xff, 0xc4, 0x01, 0xa2, 0x00, 0x00, 0x01, 0x05, 0x01, 0x01, 273 0x72, 3, 0x00, 0xff, 0x00,
163 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 274 0x7d, 23, 0x01, 0x01, 0x58, 0x46, 0x50, 0x3c, 0x50, 0x3c,
164 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 275 0x54, 0x46, 0x54, 0x56, 0x52, 0x50, 0x52, 0x50,
165 0x09, 0x0a, 0x0b, 0x10, 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 276 0x56, 0x64, 0xa4, 0x00, 0xda, 0x00, 0x00,
166 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d, 277 0xa2, 10, 0x22, 0x2c, 0x3c, 0x54, 0x69, 0x7c, 0x9c, 0xb9,
167 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 278 0xd2, 0xeb,
168 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 279 0xaf, 1, 0x02,
169 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 280 0xb5, 2, 0x08, 0x08,
170 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 281 0xb8, 2, 0x08, 0x88,
171 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 282 0xc4, 4, 0xae, 0x01, 0x04, 0x01,
172 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 283 0xcc, 1, 0x00,
173 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 284 0xd1, 11, 0x01, 0x30, 0x49, 0x5e, 0x6f, 0x7f, 0x8e, 0xa9,
174 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 285 0xc1, 0xd7, 0xec,
175 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 286 0xdc, 1, 0x01,
176 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 287 0xff, 1, 0x01, /* page 1 */
177 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 288 0x12, 3, 0x02, 0x00, 0x01,
178 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 289 0x3e, 2, 0x00, 0x00,
179 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 290 0x76, 5, 0x01, 0x20, 0x40, 0x00, 0xf2,
180 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 291 0x7c, 1, 0x00,
181 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 292 0x7f, 10, 0x4b, 0x0f, 0x01, 0x2c, 0x02, 0x58, 0x03, 0x20,
182 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 293 0x02, 0x00,
183 0xf9, 0xfa, 0x01, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 294 0x96, 5, 0x01, 0x10, 0x04, 0x01, 0x04,
184 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 295 0xc8, 14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00,
185 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 296 0x07, 0x00, 0x01, 0x07, 0x04, 0x01,
186 0x0b, 0x11, 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 297 0xd8, 1, 0x01,
187 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 298 0xdb, 2, 0x00, 0x01,
188 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 299 0xde, 7, 0x00, 0x01, 0x04, 0x04, 0x00, 0x00, 0x00,
189 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 300 0xe6, 4, 0x00, 0x00, 0x00, 0x01,
190 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 301 0xeb, 1, 0x00,
191 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 302 0xff, 1, 0x02, /* page 2 */
192 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 303 0x22, 1, 0x00,
193 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 304 0xff, 1, 0x03, /* page 3 */
194 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 305 0x00, 255, /* load the page 3 */
195 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 306 0x11, 1, 0x01,
196 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 307 0xff, 1, 0x02, /* page 2 */
197 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 308 0x13, 1, 0x00,
198 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 309 0x22, 4, 0x1f, 0xa4, 0xf0, 0x96,
199 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 310 0x27, 2, 0x14, 0x0c,
200 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 311 0x2a, 5, 0xc8, 0x00, 0x18, 0x12, 0x22,
201 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 312 0x64, 8, 0x00, 0x00, 0xf0, 0x01, 0x14, 0x44, 0x44, 0x44,
202 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 313 0x6e, 1, 0x08,
203 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 314 0xff, 1, 0x01, /* page 1 */
204 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 315 0x78, 1, 0x00,
205 0x11, 0x00, 0x3f, 0x00 316 0, 0 /* end of sequence */
317};
318
319/* page 3 - the value 0xaa says skip the index - see reg_w_page() */
320static const __u8 page3_7302[] = {
321 0x90, 0x40, 0x03, 0x50, 0xc2, 0x01, 0x14, 0x16,
322 0x14, 0x12, 0x00, 0x00, 0x00, 0x02, 0x33, 0x00,
323 0x0f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x47, 0x01, 0xb3, 0x01, 0x00,
325 0x00, 0x08, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x21,
326 0x00, 0x00, 0x00, 0x54, 0xf4, 0x02, 0x52, 0x54,
327 0xa4, 0xb8, 0xe0, 0x2a, 0xf6, 0x00, 0x00, 0x00,
328 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
329 0x00, 0xfc, 0x00, 0xf2, 0x1f, 0x04, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0xc0, 0xc0, 0x10, 0x00, 0x00,
331 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x40, 0xff, 0x03, 0x19, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
334 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc8, 0xc8,
335 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50,
336 0x08, 0x10, 0x24, 0x40, 0x00, 0x00, 0x00, 0x00,
337 0x01, 0x00, 0x02, 0x47, 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x02, 0xfa, 0x00, 0x64, 0x5a, 0x28, 0x00,
340 0x00
341};
342
343/* pac 7311 */
344static const __u8 init_7311[] = {
345 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
346 0x78, 0x40, /* Bit_0=start stream, Bit_6=LED */
347 0x78, 0x44, /* Bit_0=start stream, Bit_6=LED */
348 0xff, 0x04,
349 0x27, 0x80,
350 0x28, 0xca,
351 0x29, 0x53,
352 0x2a, 0x0e,
353 0xff, 0x01,
354 0x3e, 0x20,
355};
356
357static const __u8 start_7311[] = {
358/* index, len, [value]* */
359 0xff, 1, 0x01, /* page 1 */
360 0x02, 43, 0x48, 0x0a, 0x40, 0x08, 0x00, 0x00, 0x08, 0x00,
361 0x06, 0xff, 0x11, 0xff, 0x5a, 0x30, 0x90, 0x4c,
362 0x00, 0x07, 0x00, 0x0a, 0x10, 0x00, 0xa0, 0x10,
363 0x02, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x01, 0x00,
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00,
366 0x3e, 42, 0x00, 0x00, 0x78, 0x52, 0x4a, 0x52, 0x78, 0x6e,
367 0x48, 0x46, 0x48, 0x6e, 0x5f, 0x49, 0x42, 0x49,
368 0x5f, 0x5f, 0x49, 0x42, 0x49, 0x5f, 0x6e, 0x48,
369 0x46, 0x48, 0x6e, 0x78, 0x52, 0x4a, 0x52, 0x78,
370 0x00, 0x00, 0x09, 0x1b, 0x34, 0x49, 0x5c, 0x9b,
371 0xd0, 0xff,
372 0x78, 6, 0x44, 0x00, 0xf2, 0x01, 0x01, 0x80,
373 0x7f, 18, 0x2a, 0x1c, 0x00, 0xc8, 0x02, 0x58, 0x03, 0x84,
374 0x12, 0x00, 0x1a, 0x04, 0x08, 0x0c, 0x10, 0x14,
375 0x18, 0x20,
376 0x96, 3, 0x01, 0x08, 0x04,
377 0xa0, 4, 0x44, 0x44, 0x44, 0x04,
378 0xf0, 13, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x20, 0x00,
379 0x3f, 0x00, 0x0a, 0x01, 0x00,
380 0xff, 1, 0x04, /* page 4 */
381 0x00, 254, /* load the page 4 */
382 0x11, 1, 0x01,
383 0, 0 /* end of sequence */
384};
385
386/* page 4 - the value 0xaa says skip the index - see reg_w_page() */
387static const __u8 page4_7311[] = {
388 0xaa, 0xaa, 0x04, 0x54, 0x07, 0x2b, 0x09, 0x0f,
389 0x09, 0x00, 0xaa, 0xaa, 0x07, 0x00, 0x00, 0x62,
390 0x08, 0xaa, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
391 0x00, 0x00, 0x00, 0x03, 0xa0, 0x01, 0xf4, 0xaa,
392 0xaa, 0x00, 0x08, 0xaa, 0x03, 0xaa, 0x00, 0x68,
393 0xca, 0x10, 0x06, 0x78, 0x00, 0x00, 0x00, 0x00,
394 0x23, 0x28, 0x04, 0x11, 0x00, 0x00
206}; 395};
207 396
208static void reg_w_buf(struct gspca_dev *gspca_dev, 397static void reg_w_buf(struct gspca_dev *gspca_dev,
209 __u16 index, 398 __u8 index,
210 const char *buffer, __u16 len) 399 const char *buffer, int len)
211{ 400{
212 memcpy(gspca_dev->usb_buf, buffer, len); 401 memcpy(gspca_dev->usb_buf, buffer, len);
213 usb_control_msg(gspca_dev->dev, 402 usb_control_msg(gspca_dev->dev,
@@ -219,21 +408,9 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
219 500); 408 500);
220} 409}
221 410
222static __u8 reg_r(struct gspca_dev *gspca_dev,
223 __u16 index)
224{
225 usb_control_msg(gspca_dev->dev,
226 usb_rcvctrlpipe(gspca_dev->dev, 0),
227 0, /* request */
228 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
229 0, /* value */
230 index, gspca_dev->usb_buf, 1,
231 500);
232 return gspca_dev->usb_buf[0];
233}
234 411
235static void reg_w(struct gspca_dev *gspca_dev, 412static void reg_w(struct gspca_dev *gspca_dev,
236 __u16 index, 413 __u8 index,
237 __u8 value) 414 __u8 value)
238{ 415{
239 gspca_dev->usb_buf[0] = value; 416 gspca_dev->usb_buf[0] = value;
@@ -241,10 +418,78 @@ static void reg_w(struct gspca_dev *gspca_dev,
241 usb_sndctrlpipe(gspca_dev->dev, 0), 418 usb_sndctrlpipe(gspca_dev->dev, 0),
242 0, /* request */ 419 0, /* request */
243 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 420 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
244 value, index, gspca_dev->usb_buf, 1, 421 0, index, gspca_dev->usb_buf, 1,
245 500); 422 500);
246} 423}
247 424
425static void reg_w_seq(struct gspca_dev *gspca_dev,
426 const __u8 *seq, int len)
427{
428 while (--len >= 0) {
429 reg_w(gspca_dev, seq[0], seq[1]);
430 seq += 2;
431 }
432}
433
434/* load the beginning of a page */
435static void reg_w_page(struct gspca_dev *gspca_dev,
436 const __u8 *page, int len)
437{
438 int index;
439
440 for (index = 0; index < len; index++) {
441 if (page[index] == 0xaa) /* skip this index */
442 continue;
443 gspca_dev->usb_buf[0] = page[index];
444 usb_control_msg(gspca_dev->dev,
445 usb_sndctrlpipe(gspca_dev->dev, 0),
446 0, /* request */
447 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
448 0, index, gspca_dev->usb_buf, 1,
449 500);
450 }
451}
452
453/* output a variable sequence */
454static void reg_w_var(struct gspca_dev *gspca_dev,
455 const __u8 *seq)
456{
457 int index, len;
458
459 for (;;) {
460 index = *seq++;
461 len = *seq++;
462 switch (len) {
463 case 0:
464 return;
465 case 254:
466 reg_w_page(gspca_dev, page4_7311, sizeof page4_7311);
467 break;
468 case 255:
469 reg_w_page(gspca_dev, page3_7302, sizeof page3_7302);
470 break;
471 default:
472 if (len > 64) {
473 PDEBUG(D_ERR|D_STREAM,
474 "Incorrect variable sequence");
475 return;
476 }
477 while (len > 0) {
478 if (len < 8) {
479 reg_w_buf(gspca_dev, index, seq, len);
480 seq += len;
481 break;
482 }
483 reg_w_buf(gspca_dev, index, seq, 8);
484 seq += 8;
485 index += 8;
486 len -= 8;
487 }
488 }
489 }
490 /* not reached */
491}
492
248/* this function is called at probe time */ 493/* this function is called at probe time */
249static int sd_config(struct gspca_dev *gspca_dev, 494static int sd_config(struct gspca_dev *gspca_dev,
250 const struct usb_device_id *id) 495 const struct usb_device_id *id)
@@ -252,203 +497,245 @@ static int sd_config(struct gspca_dev *gspca_dev,
252 struct sd *sd = (struct sd *) gspca_dev; 497 struct sd *sd = (struct sd *) gspca_dev;
253 struct cam *cam; 498 struct cam *cam;
254 499
255 PDEBUG(D_CONF, "Find Sensor PAC7311");
256 reg_w(gspca_dev, 0x78, 0x40); /* Bit_0=start stream, Bit_7=LED */
257 reg_w(gspca_dev, 0x78, 0x40); /* Bit_0=start stream, Bit_7=LED */
258 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */
259 reg_w(gspca_dev, 0xff, 0x04);
260 reg_w(gspca_dev, 0x27, 0x80);
261 reg_w(gspca_dev, 0x28, 0xca);
262 reg_w(gspca_dev, 0x29, 0x53);
263 reg_w(gspca_dev, 0x2a, 0x0e);
264 reg_w(gspca_dev, 0xff, 0x01);
265 reg_w(gspca_dev, 0x3e, 0x20);
266
267 cam = &gspca_dev->cam; 500 cam = &gspca_dev->cam;
268 cam->epaddr = 0x05; 501 cam->epaddr = 0x05;
269 cam->cam_mode = vga_mode; 502
270 cam->nmodes = ARRAY_SIZE(vga_mode); 503 sd->sensor = id->driver_info;
504 if (sd->sensor == SENSOR_PAC7302) {
505 PDEBUG(D_CONF, "Find Sensor PAC7302");
506 cam->cam_mode = &vga_mode[2]; /* only 640x480 */
507 cam->nmodes = 1;
508 } else {
509 PDEBUG(D_CONF, "Find Sensor PAC7311");
510 cam->cam_mode = vga_mode;
511 cam->nmodes = ARRAY_SIZE(vga_mode);
512 gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX)
513 | (1 << SATURATION_IDX);
514 }
271 515
272 sd->brightness = BRIGHTNESS_DEF; 516 sd->brightness = BRIGHTNESS_DEF;
273 sd->contrast = CONTRAST_DEF; 517 sd->contrast = CONTRAST_DEF;
274 sd->colors = COLOR_DEF; 518 sd->colors = COLOR_DEF;
519 sd->gain = GAIN_DEF;
520 sd->exposure = EXPOSURE_DEF;
275 sd->autogain = AUTOGAIN_DEF; 521 sd->autogain = AUTOGAIN_DEF;
276 sd->ag_cnt = -1; 522 sd->hflip = HFLIP_DEF;
523 sd->vflip = VFLIP_DEF;
277 return 0; 524 return 0;
278} 525}
279 526
280static void setbrightness(struct gspca_dev *gspca_dev) 527/* This function is used by pac7302 only */
528static void setbrightcont(struct gspca_dev *gspca_dev)
529{
530 struct sd *sd = (struct sd *) gspca_dev;
531 int i, v;
532 static const __u8 max[10] =
533 {0x29, 0x33, 0x42, 0x5a, 0x6e, 0x80, 0x9f, 0xbb,
534 0xd4, 0xec};
535 static const __u8 delta[10] =
536 {0x35, 0x33, 0x33, 0x2f, 0x2a, 0x25, 0x1e, 0x17,
537 0x11, 0x0b};
538
539 reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
540 for (i = 0; i < 10; i++) {
541 v = max[i];
542 v += (sd->brightness - BRIGHTNESS_MAX)
543 * 150 / BRIGHTNESS_MAX; /* 200 ? */
544 v -= delta[i] * sd->contrast / CONTRAST_MAX;
545 if (v < 0)
546 v = 0;
547 else if (v > 0xff)
548 v = 0xff;
549 reg_w(gspca_dev, 0xa2 + i, v);
550 }
551 reg_w(gspca_dev, 0xdc, 0x01);
552}
553
554/* This function is used by pac7311 only */
555static void setcontrast(struct gspca_dev *gspca_dev)
281{ 556{
282 struct sd *sd = (struct sd *) gspca_dev; 557 struct sd *sd = (struct sd *) gspca_dev;
283 int brightness;
284 558
285/*jfm: inverted?*/
286 brightness = BRIGHTNESS_MAX - sd->brightness;
287 reg_w(gspca_dev, 0xff, 0x04); 559 reg_w(gspca_dev, 0xff, 0x04);
288/* reg_w(gspca_dev, 0x0e, 0x00); */ 560 reg_w(gspca_dev, 0x10, sd->contrast >> 4);
289 reg_w(gspca_dev, 0x0f, brightness);
290 /* load registers to sensor (Bit 0, auto clear) */ 561 /* load registers to sensor (Bit 0, auto clear) */
291 reg_w(gspca_dev, 0x11, 0x01); 562 reg_w(gspca_dev, 0x11, 0x01);
292 PDEBUG(D_CONF|D_STREAM, "brightness: %i", brightness);
293} 563}
294 564
295static void setcontrast(struct gspca_dev *gspca_dev) 565/* This function is used by pac7302 only */
566static void setcolors(struct gspca_dev *gspca_dev)
296{ 567{
297 struct sd *sd = (struct sd *) gspca_dev; 568 struct sd *sd = (struct sd *) gspca_dev;
569 int i, v;
570 static const int a[9] =
571 {217, -212, 0, -101, 170, -67, -38, -315, 355};
572 static const int b[9] =
573 {19, 106, 0, 19, 106, 1, 19, 106, 1};
298 574
299 reg_w(gspca_dev, 0xff, 0x01); 575 reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
300 reg_w(gspca_dev, 0x80, sd->contrast);
301 /* load registers to sensor (Bit 0, auto clear) */
302 reg_w(gspca_dev, 0x11, 0x01); 576 reg_w(gspca_dev, 0x11, 0x01);
303 PDEBUG(D_CONF|D_STREAM, "contrast: %i", sd->contrast); 577 reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
578 reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
579 for (i = 0; i < 9; i++) {
580 v = a[i] * sd->colors / COLOR_MAX + b[i];
581 reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07);
582 reg_w(gspca_dev, 0x0f + 2 * i + 1, v);
583 }
584 reg_w(gspca_dev, 0xdc, 0x01);
585 PDEBUG(D_CONF|D_STREAM, "color: %i", sd->colors);
304} 586}
305 587
306static void setcolors(struct gspca_dev *gspca_dev) 588static void setgain(struct gspca_dev *gspca_dev)
307{ 589{
308 struct sd *sd = (struct sd *) gspca_dev; 590 struct sd *sd = (struct sd *) gspca_dev;
309 591
310 reg_w(gspca_dev, 0xff, 0x01); 592 if (sd->sensor == SENSOR_PAC7302) {
311 reg_w(gspca_dev, 0x10, sd->colors); 593 reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
594 reg_w(gspca_dev, 0x10, sd->gain >> 3);
595 } else {
596 int gain = GAIN_MAX - sd->gain;
597 if (gain < 1)
598 gain = 1;
599 else if (gain > 245)
600 gain = 245;
601 reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
602 reg_w(gspca_dev, 0x0e, 0x00);
603 reg_w(gspca_dev, 0x0f, gain);
604 }
312 /* load registers to sensor (Bit 0, auto clear) */ 605 /* load registers to sensor (Bit 0, auto clear) */
313 reg_w(gspca_dev, 0x11, 0x01); 606 reg_w(gspca_dev, 0x11, 0x01);
314 PDEBUG(D_CONF|D_STREAM, "color: %i", sd->colors);
315} 607}
316 608
317static void setautogain(struct gspca_dev *gspca_dev) 609static void setexposure(struct gspca_dev *gspca_dev)
318{ 610{
319 struct sd *sd = (struct sd *) gspca_dev; 611 struct sd *sd = (struct sd *) gspca_dev;
612 __u8 reg;
613
614 /* register 2 of frame 3/4 contains the clock divider configuring the
615 no fps according to the formula: 60 / reg. sd->exposure is the
616 desired exposure time in ms. */
617 reg = 120 * sd->exposure / 1000;
618 if (reg < 2)
619 reg = 2;
620 else if (reg > 63)
621 reg = 63;
622
623 if (sd->sensor == SENSOR_PAC7302) {
624 /* On the pac7302 reg2 MUST be a multiple of 3, so round it to
625 the nearest multiple of 3, except when between 6 and 12? */
626 if (reg < 6 || reg > 12)
627 reg = ((reg + 1) / 3) * 3;
628 reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
629 reg_w(gspca_dev, 0x02, reg);
630 } else {
631 reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
632 reg_w(gspca_dev, 0x02, reg);
633 /* Page 1 register 8 must always be 0x08 except when not in
634 640x480 mode and Page3/4 reg 2 <= 3 then it must be 9 */
635 reg_w(gspca_dev, 0xff, 0x01);
636 if (gspca_dev->cam.cam_mode[(int)gspca_dev->curr_mode].priv &&
637 reg <= 3)
638 reg_w(gspca_dev, 0x08, 0x09);
639 else
640 reg_w(gspca_dev, 0x08, 0x08);
641 }
642 /* load registers to sensor (Bit 0, auto clear) */
643 reg_w(gspca_dev, 0x11, 0x01);
644}
320 645
321 if (sd->autogain) { 646static void sethvflip(struct gspca_dev *gspca_dev)
322 sd->lum_sum = 0; 647{
323 sd->ag_cnt = AG_CNT_START; 648 struct sd *sd = (struct sd *) gspca_dev;
649 __u8 data;
650
651 if (sd->sensor == SENSOR_PAC7302) {
652 reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
653 data = (sd->hflip ? 0x08 : 0x00)
654 | (sd->vflip ? 0x04 : 0x00);
324 } else { 655 } else {
325 sd->ag_cnt = -1; 656 reg_w(gspca_dev, 0xff, 0x04); /* page 4 */
657 data = (sd->hflip ? 0x04 : 0x00)
658 | (sd->vflip ? 0x08 : 0x00);
326 } 659 }
660 reg_w(gspca_dev, 0x21, data);
661 /* load registers to sensor (Bit 0, auto clear) */
662 reg_w(gspca_dev, 0x11, 0x01);
327} 663}
328 664
329/* this function is called at open time */ 665/* this function is called at probe and resume time */
330static int sd_open(struct gspca_dev *gspca_dev) 666static int sd_init(struct gspca_dev *gspca_dev)
331{ 667{
332 reg_w(gspca_dev, 0x78, 0x00); /* Turn on LED */ 668 struct sd *sd = (struct sd *) gspca_dev;
669
670 if (sd->sensor == SENSOR_PAC7302)
671 reg_w_seq(gspca_dev, init_7302, sizeof init_7302);
672 else
673 reg_w_seq(gspca_dev, init_7311, sizeof init_7311);
674
333 return 0; 675 return 0;
334} 676}
335 677
336static void sd_start(struct gspca_dev *gspca_dev) 678static void sd_start(struct gspca_dev *gspca_dev)
337{ 679{
338 reg_w(gspca_dev, 0xff, 0x01); 680 struct sd *sd = (struct sd *) gspca_dev;
339 reg_w_buf(gspca_dev, 0x0002, "\x48\x0a\x40\x08\x00\x00\x08\x00", 8);
340 reg_w_buf(gspca_dev, 0x000a, "\x06\xff\x11\xff\x5a\x30\x90\x4c", 8);
341 reg_w_buf(gspca_dev, 0x0012, "\x00\x07\x00\x0a\x10\x00\xa0\x10", 8);
342 reg_w_buf(gspca_dev, 0x001a, "\x02\x00\x00\x00\x00\x0b\x01\x00", 8);
343 reg_w_buf(gspca_dev, 0x0022, "\x00\x00\x00\x00\x00\x00\x00\x00", 8);
344 reg_w_buf(gspca_dev, 0x002a, "\x00\x00\x00", 3);
345 reg_w_buf(gspca_dev, 0x003e, "\x00\x00\x78\x52\x4a\x52\x78\x6e", 8);
346 reg_w_buf(gspca_dev, 0x0046, "\x48\x46\x48\x6e\x5f\x49\x42\x49", 8);
347 reg_w_buf(gspca_dev, 0x004e, "\x5f\x5f\x49\x42\x49\x5f\x6e\x48", 8);
348 reg_w_buf(gspca_dev, 0x0056, "\x46\x48\x6e\x78\x52\x4a\x52\x78", 8);
349 reg_w_buf(gspca_dev, 0x005e, "\x00\x00\x09\x1b\x34\x49\x5c\x9b", 8);
350 reg_w_buf(gspca_dev, 0x0066, "\xd0\xff", 2);
351 reg_w_buf(gspca_dev, 0x0078, "\x44\x00\xf2\x01\x01\x80", 6);
352 reg_w_buf(gspca_dev, 0x007f, "\x2a\x1c\x00\xc8\x02\x58\x03\x84", 8);
353 reg_w_buf(gspca_dev, 0x0087, "\x12\x00\x1a\x04\x08\x0c\x10\x14", 8);
354 reg_w_buf(gspca_dev, 0x008f, "\x18\x20", 2);
355 reg_w_buf(gspca_dev, 0x0096, "\x01\x08\x04", 3);
356 reg_w_buf(gspca_dev, 0x00a0, "\x44\x44\x44\x04", 4);
357 reg_w_buf(gspca_dev, 0x00f0, "\x01\x00\x00\x00\x22\x00\x20\x00", 8);
358 reg_w_buf(gspca_dev, 0x00f8, "\x3f\x00\x0a\x01\x00", 5);
359 681
360 reg_w(gspca_dev, 0xff, 0x04); 682 sd->sof_read = 0;
361 reg_w(gspca_dev, 0x02, 0x04); 683
362 reg_w(gspca_dev, 0x03, 0x54); 684 if (sd->sensor == SENSOR_PAC7302) {
363 reg_w(gspca_dev, 0x04, 0x07); 685 reg_w_var(gspca_dev, start_7302);
364 reg_w(gspca_dev, 0x05, 0x2b); 686 setbrightcont(gspca_dev);
365 reg_w(gspca_dev, 0x06, 0x09); 687 setcolors(gspca_dev);
366 reg_w(gspca_dev, 0x07, 0x0f); 688 } else {
367 reg_w(gspca_dev, 0x08, 0x09); 689 reg_w_var(gspca_dev, start_7311);
368 reg_w(gspca_dev, 0x09, 0x00); 690 setcontrast(gspca_dev);
369 reg_w(gspca_dev, 0x0c, 0x07); 691 }
370 reg_w(gspca_dev, 0x0d, 0x00); 692 setgain(gspca_dev);
371 reg_w(gspca_dev, 0x0e, 0x00); 693 setexposure(gspca_dev);
372 reg_w(gspca_dev, 0x0f, 0x62); 694 sethvflip(gspca_dev);
373 reg_w(gspca_dev, 0x10, 0x08);
374 reg_w(gspca_dev, 0x12, 0x07);
375 reg_w(gspca_dev, 0x13, 0x00);
376 reg_w(gspca_dev, 0x14, 0x00);
377 reg_w(gspca_dev, 0x15, 0x00);
378 reg_w(gspca_dev, 0x16, 0x00);
379 reg_w(gspca_dev, 0x17, 0x00);
380 reg_w(gspca_dev, 0x18, 0x00);
381 reg_w(gspca_dev, 0x19, 0x00);
382 reg_w(gspca_dev, 0x1a, 0x00);
383 reg_w(gspca_dev, 0x1b, 0x03);
384 reg_w(gspca_dev, 0x1c, 0xa0);
385 reg_w(gspca_dev, 0x1d, 0x01);
386 reg_w(gspca_dev, 0x1e, 0xf4);
387 reg_w(gspca_dev, 0x21, 0x00);
388 reg_w(gspca_dev, 0x22, 0x08);
389 reg_w(gspca_dev, 0x24, 0x03);
390 reg_w(gspca_dev, 0x26, 0x00);
391 reg_w(gspca_dev, 0x27, 0x01);
392 reg_w(gspca_dev, 0x28, 0xca);
393 reg_w(gspca_dev, 0x29, 0x10);
394 reg_w(gspca_dev, 0x2a, 0x06);
395 reg_w(gspca_dev, 0x2b, 0x78);
396 reg_w(gspca_dev, 0x2c, 0x00);
397 reg_w(gspca_dev, 0x2d, 0x00);
398 reg_w(gspca_dev, 0x2e, 0x00);
399 reg_w(gspca_dev, 0x2f, 0x00);
400 reg_w(gspca_dev, 0x30, 0x23);
401 reg_w(gspca_dev, 0x31, 0x28);
402 reg_w(gspca_dev, 0x32, 0x04);
403 reg_w(gspca_dev, 0x33, 0x11);
404 reg_w(gspca_dev, 0x34, 0x00);
405 reg_w(gspca_dev, 0x35, 0x00);
406 reg_w(gspca_dev, 0x11, 0x01);
407 setcontrast(gspca_dev);
408 setbrightness(gspca_dev);
409 setcolors(gspca_dev);
410 setautogain(gspca_dev);
411 695
412 /* set correct resolution */ 696 /* set correct resolution */
413 switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) { 697 switch (gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv) {
414 case 2: /* 160x120 */ 698 case 2: /* 160x120 pac7311 */
415 reg_w(gspca_dev, 0xff, 0x04);
416 reg_w(gspca_dev, 0x02, 0x03);
417 reg_w(gspca_dev, 0xff, 0x01); 699 reg_w(gspca_dev, 0xff, 0x01);
418 reg_w(gspca_dev, 0x08, 0x09);
419 reg_w(gspca_dev, 0x17, 0x20); 700 reg_w(gspca_dev, 0x17, 0x20);
420 reg_w(gspca_dev, 0x1b, 0x00);
421/* reg_w(gspca_dev, 0x80, 0x69); */
422 reg_w(gspca_dev, 0x87, 0x10); 701 reg_w(gspca_dev, 0x87, 0x10);
423 break; 702 break;
424 case 1: /* 320x240 */ 703 case 1: /* 320x240 pac7311 */
425 reg_w(gspca_dev, 0xff, 0x04);
426 reg_w(gspca_dev, 0x02, 0x03);
427 reg_w(gspca_dev, 0xff, 0x01); 704 reg_w(gspca_dev, 0xff, 0x01);
428 reg_w(gspca_dev, 0x08, 0x09);
429 reg_w(gspca_dev, 0x17, 0x30); 705 reg_w(gspca_dev, 0x17, 0x30);
430/* reg_w(gspca_dev, 0x80, 0x3f); */
431 reg_w(gspca_dev, 0x87, 0x11); 706 reg_w(gspca_dev, 0x87, 0x11);
432 break; 707 break;
433 case 0: /* 640x480 */ 708 case 0: /* 640x480 */
434 reg_w(gspca_dev, 0xff, 0x04); 709 if (sd->sensor == SENSOR_PAC7302)
435 reg_w(gspca_dev, 0x02, 0x03); 710 break;
436 reg_w(gspca_dev, 0xff, 0x01); 711 reg_w(gspca_dev, 0xff, 0x01);
437 reg_w(gspca_dev, 0x08, 0x08);
438 reg_w(gspca_dev, 0x17, 0x00); 712 reg_w(gspca_dev, 0x17, 0x00);
439/* reg_w(gspca_dev, 0x80, 0x1c); */
440 reg_w(gspca_dev, 0x87, 0x12); 713 reg_w(gspca_dev, 0x87, 0x12);
441 break; 714 break;
442 } 715 }
443 716
717 sd->sof_read = 0;
718 sd->autogain_ignore_frames = 0;
719 atomic_set(&sd->avg_lum, -1);
720
444 /* start stream */ 721 /* start stream */
445 reg_w(gspca_dev, 0xff, 0x01); 722 reg_w(gspca_dev, 0xff, 0x01);
446 reg_w(gspca_dev, 0x78, 0x04); 723 if (sd->sensor == SENSOR_PAC7302)
447 reg_w(gspca_dev, 0x78, 0x05); 724 reg_w(gspca_dev, 0x78, 0x01);
725 else
726 reg_w(gspca_dev, 0x78, 0x05);
448} 727}
449 728
450static void sd_stopN(struct gspca_dev *gspca_dev) 729static void sd_stopN(struct gspca_dev *gspca_dev)
451{ 730{
731 struct sd *sd = (struct sd *) gspca_dev;
732
733 if (sd->sensor == SENSOR_PAC7302) {
734 reg_w(gspca_dev, 0xff, 0x01);
735 reg_w(gspca_dev, 0x78, 0x00);
736 reg_w(gspca_dev, 0x78, 0x00);
737 return;
738 }
452 reg_w(gspca_dev, 0xff, 0x04); 739 reg_w(gspca_dev, 0xff, 0x04);
453 reg_w(gspca_dev, 0x27, 0x80); 740 reg_w(gspca_dev, 0x27, 0x80);
454 reg_w(gspca_dev, 0x28, 0xca); 741 reg_w(gspca_dev, 0x28, 0xca);
@@ -456,187 +743,147 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
456 reg_w(gspca_dev, 0x2a, 0x0e); 743 reg_w(gspca_dev, 0x2a, 0x0e);
457 reg_w(gspca_dev, 0xff, 0x01); 744 reg_w(gspca_dev, 0xff, 0x01);
458 reg_w(gspca_dev, 0x3e, 0x20); 745 reg_w(gspca_dev, 0x3e, 0x20);
459 reg_w(gspca_dev, 0x78, 0x04); /* Bit_0=start stream, Bit_7=LED */ 746 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
460 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */ 747 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
461 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */ 748 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_6=LED */
462} 749}
463 750
464static void sd_stop0(struct gspca_dev *gspca_dev) 751static void sd_stop0(struct gspca_dev *gspca_dev)
465{ 752{
466} 753 struct sd *sd = (struct sd *) gspca_dev;
467 754
468/* this function is called at close time */ 755 if (sd->sensor == SENSOR_PAC7302) {
469static void sd_close(struct gspca_dev *gspca_dev) 756 reg_w(gspca_dev, 0xff, 0x01);
470{ 757 reg_w(gspca_dev, 0x78, 0x40);
471 reg_w(gspca_dev, 0xff, 0x04); 758 }
472 reg_w(gspca_dev, 0x27, 0x80);
473 reg_w(gspca_dev, 0x28, 0xca);
474 reg_w(gspca_dev, 0x29, 0x53);
475 reg_w(gspca_dev, 0x2a, 0x0e);
476 reg_w(gspca_dev, 0xff, 0x01);
477 reg_w(gspca_dev, 0x3e, 0x20);
478 reg_w(gspca_dev, 0x78, 0x04); /* Bit_0=start stream, Bit_7=LED */
479 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */
480 reg_w(gspca_dev, 0x78, 0x44); /* Bit_0=start stream, Bit_7=LED */
481} 759}
482 760
761/* Include pac common sof detection functions */
762#include "pac_common.h"
763
483static void do_autogain(struct gspca_dev *gspca_dev) 764static void do_autogain(struct gspca_dev *gspca_dev)
484{ 765{
485 struct sd *sd = (struct sd *) gspca_dev; 766 struct sd *sd = (struct sd *) gspca_dev;
486 int luma; 767 int avg_lum = atomic_read(&sd->avg_lum);
487 int luma_mean = 128; 768 int desired_lum, deadzone;
488 int luma_delta = 20;
489 __u8 spring = 5;
490 int Gbright;
491 769
492 if (!atomic_read(&sd->do_gain)) 770 if (avg_lum == -1)
493 return; 771 return;
494 atomic_set(&sd->do_gain, 0); 772
495 773 if (sd->sensor == SENSOR_PAC7302) {
496 luma = atomic_read(&sd->avg_lum); 774 desired_lum = 270 + sd->brightness * 4;
497 Gbright = reg_r(gspca_dev, 0x02); 775 /* Hack hack, with the 7202 the first exposure step is
498 PDEBUG(D_FRAM, "luma mean %d", luma); 776 pretty large, so if we're about to make the first
499 if (luma < luma_mean - luma_delta || 777 exposure increase make the deadzone large to avoid
500 luma > luma_mean + luma_delta) { 778 oscilating */
501 Gbright += (luma_mean - luma) >> spring; 779 if (desired_lum > avg_lum && sd->gain == GAIN_DEF &&
502 if (Gbright > 0x1a) 780 sd->exposure > EXPOSURE_DEF &&
503 Gbright = 0x1a; 781 sd->exposure < 42)
504 else if (Gbright < 4) 782 deadzone = 90;
505 Gbright = 4; 783 else
506 PDEBUG(D_FRAM, "gbright %d", Gbright); 784 deadzone = 30;
507 reg_w(gspca_dev, 0xff, 0x04); 785 } else {
508 reg_w(gspca_dev, 0x0f, Gbright); 786 desired_lum = 200;
509 /* load registers to sensor (Bit 0, auto clear) */ 787 deadzone = 20;
510 reg_w(gspca_dev, 0x11, 0x01);
511 } 788 }
789
790 if (sd->autogain_ignore_frames > 0)
791 sd->autogain_ignore_frames--;
792 else if (gspca_auto_gain_n_exposure(gspca_dev, avg_lum, desired_lum,
793 deadzone, GAIN_KNEE, EXPOSURE_KNEE))
794 sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
512} 795}
513 796
797static const unsigned char pac7311_jpeg_header1[] = {
798 0xff, 0xd8, 0xff, 0xc0, 0x00, 0x11, 0x08
799};
800
801static const unsigned char pac7311_jpeg_header2[] = {
802 0x03, 0x01, 0x21, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01, 0xff, 0xda,
803 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00
804};
805
806/* this function is run at interrupt level */
514static void sd_pkt_scan(struct gspca_dev *gspca_dev, 807static void sd_pkt_scan(struct gspca_dev *gspca_dev,
515 struct gspca_frame *frame, /* target */ 808 struct gspca_frame *frame, /* target */
516 __u8 *data, /* isoc packet */ 809 __u8 *data, /* isoc packet */
517 int len) /* iso packet length */ 810 int len) /* iso packet length */
518{ 811{
519 struct sd *sd = (struct sd *) gspca_dev; 812 struct sd *sd = (struct sd *) gspca_dev;
520 unsigned char tmpbuf[4]; 813 unsigned char *sof;
521 int i, p, ffseq; 814
522 815 sof = pac_find_sof(gspca_dev, data, len);
523/* if (len < 5) { */ 816 if (sof) {
524 if (len < 6) { 817 unsigned char tmpbuf[4];
525/* gspca_dev->last_packet_type = DISCARD_PACKET; */ 818 int n, lum_offset, footer_length;
526 return; 819
527 } 820 if (sd->sensor == SENSOR_PAC7302) {
528 821 /* 6 bytes after the FF D9 EOF marker a number of lumination
529 ffseq = sd->ffseq; 822 bytes are send corresponding to different parts of the
530 823 image, the 14th and 15th byte after the EOF seem to
531 for (p = 0; p < len - 6; p++) { 824 correspond to the center of the image */
532 if ((data[0 + p] == 0xff) 825 lum_offset = 61 + sizeof pac_sof_marker;
533 && (data[1 + p] == 0xff) 826 footer_length = 74;
534 && (data[2 + p] == 0x00) 827 } else {
535 && (data[3 + p] == 0xff) 828 lum_offset = 24 + sizeof pac_sof_marker;
536 && (data[4 + p] == 0x96)) { 829 footer_length = 26;
537 830 }
538 /* start of frame */
539 if (sd->ag_cnt >= 0 && p > 28) {
540 sd->lum_sum += data[p - 23];
541 if (--sd->ag_cnt < 0) {
542 sd->ag_cnt = AG_CNT_START;
543 atomic_set(&sd->avg_lum,
544 sd->lum_sum / AG_CNT_START);
545 sd->lum_sum = 0;
546 atomic_set(&sd->do_gain, 1);
547 }
548 }
549 831
550 /* copy the end of data to the current frame */ 832 /* Finish decoding current frame */
833 n = (sof - data) - (footer_length + sizeof pac_sof_marker);
834 if (n < 0) {
835 frame->data_end += n;
836 n = 0;
837 }
838 frame = gspca_frame_add(gspca_dev, INTER_PACKET, frame,
839 data, n);
840 if (gspca_dev->last_packet_type != DISCARD_PACKET &&
841 frame->data_end[-2] == 0xff &&
842 frame->data_end[-1] == 0xd9)
551 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 843 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
552 data, p); 844 NULL, 0);
553 845
554 /* put the JPEG header in the new frame */ 846 n = sof - data;
555 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, 847 len -= n;
556 (unsigned char *) pac7311_jpeg_header, 848 data = sof;
557 12); 849
850 /* Get average lumination */
851 if (gspca_dev->last_packet_type == LAST_PACKET &&
852 n >= lum_offset)
853 atomic_set(&sd->avg_lum, data[-lum_offset] +
854 data[-lum_offset + 1]);
855 else
856 atomic_set(&sd->avg_lum, -1);
857
858 /* Start the new frame with the jpeg header */
859 gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
860 pac7311_jpeg_header1, sizeof(pac7311_jpeg_header1));
861 if (sd->sensor == SENSOR_PAC7302) {
862 /* The PAC7302 has the image rotated 90 degrees */
863 tmpbuf[0] = gspca_dev->width >> 8;
864 tmpbuf[1] = gspca_dev->width & 0xff;
865 tmpbuf[2] = gspca_dev->height >> 8;
866 tmpbuf[3] = gspca_dev->height & 0xff;
867 } else {
558 tmpbuf[0] = gspca_dev->height >> 8; 868 tmpbuf[0] = gspca_dev->height >> 8;
559 tmpbuf[1] = gspca_dev->height & 0xff; 869 tmpbuf[1] = gspca_dev->height & 0xff;
560 tmpbuf[2] = gspca_dev->width >> 8; 870 tmpbuf[2] = gspca_dev->width >> 8;
561 tmpbuf[3] = gspca_dev->width & 0xff; 871 tmpbuf[3] = gspca_dev->width & 0xff;
562 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
563 tmpbuf, 4);
564 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
565 (unsigned char *) &pac7311_jpeg_header[16],
566 PAC7311_JPEG_HEADER_SIZE - 16);
567
568 data += p + 7;
569 len -= p + 7;
570 ffseq = 0;
571 break;
572 } 872 }
873 gspca_frame_add(gspca_dev, INTER_PACKET, frame, tmpbuf, 4);
874 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
875 pac7311_jpeg_header2, sizeof(pac7311_jpeg_header2));
573 } 876 }
574
575 /* remove the 'ff ff ff xx' sequences */
576 switch (ffseq) {
577 case 3:
578 data += 1;
579 len -= 1;
580 break;
581 case 2:
582 if (data[0] == 0xff) {
583 data += 2;
584 len -= 2;
585 frame->data_end -= 2;
586 }
587 break;
588 case 1:
589 if (data[0] == 0xff
590 && data[1] == 0xff) {
591 data += 3;
592 len -= 3;
593 frame->data_end -= 1;
594 }
595 break;
596 }
597 for (i = 0; i < len - 4; i++) {
598 if (data[i] == 0xff
599 && data[i + 1] == 0xff
600 && data[i + 2] == 0xff) {
601 memmove(&data[i], &data[i + 4], len - i - 4);
602 len -= 4;
603 }
604 }
605 ffseq = 0;
606 if (data[len - 4] == 0xff) {
607 if (data[len - 3] == 0xff
608 && data[len - 2] == 0xff) {
609 len -= 4;
610 }
611 } else if (data[len - 3] == 0xff) {
612 if (data[len - 2] == 0xff
613 && data[len - 1] == 0xff)
614 ffseq = 3;
615 } else if (data[len - 2] == 0xff) {
616 if (data[len - 1] == 0xff)
617 ffseq = 2;
618 } else if (data[len - 1] == 0xff)
619 ffseq = 1;
620 sd->ffseq = ffseq;
621 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 877 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
622} 878}
623 879
624static void getbrightness(struct gspca_dev *gspca_dev)
625{
626/* sd->brightness = reg_r(gspca_dev, 0x08);
627 return sd->brightness; */
628/* PDEBUG(D_CONF, "Called pac7311_getbrightness: Not implemented yet"); */
629}
630
631
632
633static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 880static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
634{ 881{
635 struct sd *sd = (struct sd *) gspca_dev; 882 struct sd *sd = (struct sd *) gspca_dev;
636 883
637 sd->brightness = val; 884 sd->brightness = val;
638 if (gspca_dev->streaming) 885 if (gspca_dev->streaming)
639 setbrightness(gspca_dev); 886 setbrightcont(gspca_dev);
640 return 0; 887 return 0;
641} 888}
642 889
@@ -644,7 +891,6 @@ static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
644{ 891{
645 struct sd *sd = (struct sd *) gspca_dev; 892 struct sd *sd = (struct sd *) gspca_dev;
646 893
647 getbrightness(gspca_dev);
648 *val = sd->brightness; 894 *val = sd->brightness;
649 return 0; 895 return 0;
650} 896}
@@ -654,8 +900,12 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
654 struct sd *sd = (struct sd *) gspca_dev; 900 struct sd *sd = (struct sd *) gspca_dev;
655 901
656 sd->contrast = val; 902 sd->contrast = val;
657 if (gspca_dev->streaming) 903 if (gspca_dev->streaming) {
658 setcontrast(gspca_dev); 904 if (sd->sensor == SENSOR_PAC7302)
905 setbrightcont(gspca_dev);
906 else
907 setcontrast(gspca_dev);
908 }
659 return 0; 909 return 0;
660} 910}
661 911
@@ -663,7 +913,6 @@ static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
663{ 913{
664 struct sd *sd = (struct sd *) gspca_dev; 914 struct sd *sd = (struct sd *) gspca_dev;
665 915
666/* getcontrast(gspca_dev); */
667 *val = sd->contrast; 916 *val = sd->contrast;
668 return 0; 917 return 0;
669} 918}
@@ -682,18 +931,66 @@ static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
682{ 931{
683 struct sd *sd = (struct sd *) gspca_dev; 932 struct sd *sd = (struct sd *) gspca_dev;
684 933
685/* getcolors(gspca_dev); */
686 *val = sd->colors; 934 *val = sd->colors;
687 return 0; 935 return 0;
688} 936}
689 937
938static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
939{
940 struct sd *sd = (struct sd *) gspca_dev;
941
942 sd->gain = val;
943 if (gspca_dev->streaming)
944 setgain(gspca_dev);
945 return 0;
946}
947
948static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
949{
950 struct sd *sd = (struct sd *) gspca_dev;
951
952 *val = sd->gain;
953 return 0;
954}
955
956static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
957{
958 struct sd *sd = (struct sd *) gspca_dev;
959
960 sd->exposure = val;
961 if (gspca_dev->streaming)
962 setexposure(gspca_dev);
963 return 0;
964}
965
966static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
967{
968 struct sd *sd = (struct sd *) gspca_dev;
969
970 *val = sd->exposure;
971 return 0;
972}
973
690static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val) 974static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
691{ 975{
692 struct sd *sd = (struct sd *) gspca_dev; 976 struct sd *sd = (struct sd *) gspca_dev;
693 977
694 sd->autogain = val; 978 sd->autogain = val;
695 if (gspca_dev->streaming) 979 /* when switching to autogain set defaults to make sure
696 setautogain(gspca_dev); 980 we are on a valid point of the autogain gain /
981 exposure knee graph, and give this change time to
982 take effect before doing autogain. */
983 if (sd->autogain) {
984 sd->exposure = EXPOSURE_DEF;
985 sd->gain = GAIN_DEF;
986 if (gspca_dev->streaming) {
987 sd->autogain_ignore_frames =
988 PAC_AUTOGAIN_IGNORE_FRAMES;
989 setexposure(gspca_dev);
990 setgain(gspca_dev);
991 }
992 }
993
697 return 0; 994 return 0;
698} 995}
699 996
@@ -705,30 +1002,67 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
705 return 0; 1002 return 0;
706} 1003}
707 1004
1005static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
1006{
1007 struct sd *sd = (struct sd *) gspca_dev;
1008
1009 sd->hflip = val;
1010 if (gspca_dev->streaming)
1011 sethvflip(gspca_dev);
1012 return 0;
1013}
1014
1015static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
1016{
1017 struct sd *sd = (struct sd *) gspca_dev;
1018
1019 *val = sd->hflip;
1020 return 0;
1021}
1022
1023static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
1024{
1025 struct sd *sd = (struct sd *) gspca_dev;
1026
1027 sd->vflip = val;
1028 if (gspca_dev->streaming)
1029 sethvflip(gspca_dev);
1030 return 0;
1031}
1032
1033static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
1034{
1035 struct sd *sd = (struct sd *) gspca_dev;
1036
1037 *val = sd->vflip;
1038 return 0;
1039}
1040
708/* sub-driver description */ 1041/* sub-driver description */
709static struct sd_desc sd_desc = { 1042static struct sd_desc sd_desc = {
710 .name = MODULE_NAME, 1043 .name = MODULE_NAME,
711 .ctrls = sd_ctrls, 1044 .ctrls = sd_ctrls,
712 .nctrls = ARRAY_SIZE(sd_ctrls), 1045 .nctrls = ARRAY_SIZE(sd_ctrls),
713 .config = sd_config, 1046 .config = sd_config,
714 .open = sd_open, 1047 .init = sd_init,
715 .start = sd_start, 1048 .start = sd_start,
716 .stopN = sd_stopN, 1049 .stopN = sd_stopN,
717 .stop0 = sd_stop0, 1050 .stop0 = sd_stop0,
718 .close = sd_close,
719 .pkt_scan = sd_pkt_scan, 1051 .pkt_scan = sd_pkt_scan,
720 .dq_callback = do_autogain, 1052 .dq_callback = do_autogain,
721}; 1053};
722 1054
723/* -- module initialisation -- */ 1055/* -- module initialisation -- */
724static __devinitdata struct usb_device_id device_table[] = { 1056static __devinitdata struct usb_device_id device_table[] = {
725 {USB_DEVICE(0x093a, 0x2600)}, 1057 {USB_DEVICE(0x093a, 0x2600), .driver_info = SENSOR_PAC7311},
726 {USB_DEVICE(0x093a, 0x2601)}, 1058 {USB_DEVICE(0x093a, 0x2601), .driver_info = SENSOR_PAC7311},
727 {USB_DEVICE(0x093a, 0x2603)}, 1059 {USB_DEVICE(0x093a, 0x2603), .driver_info = SENSOR_PAC7311},
728 {USB_DEVICE(0x093a, 0x2608)}, 1060 {USB_DEVICE(0x093a, 0x2608), .driver_info = SENSOR_PAC7311},
729 {USB_DEVICE(0x093a, 0x260e)}, 1061 {USB_DEVICE(0x093a, 0x260e), .driver_info = SENSOR_PAC7311},
730 {USB_DEVICE(0x093a, 0x260f)}, 1062 {USB_DEVICE(0x093a, 0x260f), .driver_info = SENSOR_PAC7311},
731 {USB_DEVICE(0x093a, 0x2621)}, 1063 {USB_DEVICE(0x093a, 0x2621), .driver_info = SENSOR_PAC7302},
1064 {USB_DEVICE(0x093a, 0x2624), .driver_info = SENSOR_PAC7302},
1065 {USB_DEVICE(0x093a, 0x2626), .driver_info = SENSOR_PAC7302},
732 {} 1066 {}
733}; 1067};
734MODULE_DEVICE_TABLE(usb, device_table); 1068MODULE_DEVICE_TABLE(usb, device_table);
@@ -746,6 +1080,10 @@ static struct usb_driver sd_driver = {
746 .id_table = device_table, 1080 .id_table = device_table,
747 .probe = sd_probe, 1081 .probe = sd_probe,
748 .disconnect = gspca_disconnect, 1082 .disconnect = gspca_disconnect,
1083#ifdef CONFIG_PM
1084 .suspend = gspca_suspend,
1085 .resume = gspca_resume,
1086#endif
749}; 1087};
750 1088
751/* -- module insert / remove -- */ 1089/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/pac_common.h b/drivers/media/video/gspca/pac_common.h
new file mode 100644
index 000000000000..34d4b1494cd5
--- /dev/null
+++ b/drivers/media/video/gspca/pac_common.h
@@ -0,0 +1,60 @@
1/*
2 * Pixart PAC207BCA / PAC73xx common functions
3 *
4 * Copyright (C) 2008 Hans de Goede <j.w.r.degoede@hhs.nl>
5 * Copyright (C) 2005 Thomas Kaiser thomas@kaiser-linux.li
6 * Copyleft (C) 2005 Michel Xhaard mxhaard@magic.fr
7 *
8 * V4L2 by Jean-Francois Moine <http://moinejf.free.fr>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26/* We calculate the autogain at the end of the transfer of a frame, at this
27 moment a frame with the old settings is being transmitted, and a frame is
28 being captured with the old settings. So if we adjust the autogain we must
29 ignore atleast the 2 next frames for the new settings to come into effect
30 before doing any other adjustments */
31#define PAC_AUTOGAIN_IGNORE_FRAMES 3
32
33static const unsigned char pac_sof_marker[5] =
34 { 0xff, 0xff, 0x00, 0xff, 0x96 };
35
36static unsigned char *pac_find_sof(struct gspca_dev *gspca_dev,
37 unsigned char *m, int len)
38{
39 struct sd *sd = (struct sd *) gspca_dev;
40 int i;
41
42 /* Search for the SOF marker (fixed part) in the header */
43 for (i = 0; i < len; i++) {
44 if (m[i] == pac_sof_marker[sd->sof_read]) {
45 sd->sof_read++;
46 if (sd->sof_read == sizeof(pac_sof_marker)) {
47 PDEBUG(D_FRAM,
48 "SOF found, bytes to analyze: %u."
49 " Frame starts at byte #%u",
50 len, i + 1);
51 sd->sof_read = 0;
52 return m + i + 1;
53 }
54 } else {
55 sd->sof_read = 0;
56 }
57 }
58
59 return NULL;
60}
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 11210c71f66c..5dd78c6766ea 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -20,6 +20,26 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23/* Some documentation on known sonixb registers:
24
25Reg Use
260x10 high nibble red gain low nibble blue gain
270x11 low nibble green gain
280x12 hstart
290x13 vstart
300x15 hsize (hsize = register-value * 16)
310x16 vsize (vsize = register-value * 16)
320x17 bit 0 toggle compression quality (according to sn9c102 driver)
330x18 bit 7 enables compression, bit 4-5 set image down scaling:
34 00 scale 1, 01 scale 1/2, 10, scale 1/4
350x19 high-nibble is sensor clock divider, changes exposure on sensors which
36 use a clock generated by the bridge. Some sensors have their own clock.
370x1c auto_exposure area (for avg_lum) startx (startx = register-value * 32)
380x1d auto_exposure area (for avg_lum) starty (starty = register-value * 32)
390x1e auto_exposure area (for avg_lum) stopx (hsize = (0x1e - 0x1c) * 32)
400x1f auto_exposure area (for avg_lum) stopy (vsize = (0x1f - 0x1d) * 32)
41*/
42
23#define MODULE_NAME "sonixb" 43#define MODULE_NAME "sonixb"
24 44
25#include "gspca.h" 45#include "gspca.h"
@@ -31,10 +51,8 @@ MODULE_LICENSE("GPL");
31/* specific webcam descriptor */ 51/* specific webcam descriptor */
32struct sd { 52struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 53 struct gspca_dev gspca_dev; /* !! must be the first item */
34
35 struct sd_desc sd_desc; /* our nctrls differ dependend upon the
36 sensor, so we use a per cam copy */
37 atomic_t avg_lum; 54 atomic_t avg_lum;
55 int prev_avg_lum;
38 56
39 unsigned char gain; 57 unsigned char gain;
40 unsigned char exposure; 58 unsigned char exposure;
@@ -44,8 +62,12 @@ struct sd {
44 unsigned char frames_to_drop; 62 unsigned char frames_to_drop;
45 unsigned char freq; /* light freq filter setting */ 63 unsigned char freq; /* light freq filter setting */
46 64
47 unsigned char fr_h_sz; /* size of frame header */ 65 __u8 bridge; /* Type of bridge */
48 char sensor; /* Type of image sensor chip */ 66#define BRIDGE_101 0
67#define BRIDGE_102 0 /* We make no difference between 101 and 102 */
68#define BRIDGE_103 1
69
70 __u8 sensor; /* Type of image sensor chip */
49#define SENSOR_HV7131R 0 71#define SENSOR_HV7131R 0
50#define SENSOR_OV6650 1 72#define SENSOR_OV6650 1
51#define SENSOR_OV7630 2 73#define SENSOR_OV7630 2
@@ -53,16 +75,35 @@ struct sd {
53#define SENSOR_PAS202 4 75#define SENSOR_PAS202 4
54#define SENSOR_TAS5110 5 76#define SENSOR_TAS5110 5
55#define SENSOR_TAS5130CXX 6 77#define SENSOR_TAS5130CXX 6
56 char sensor_has_gain;
57 __u8 sensor_addr;
58 __u8 reg11; 78 __u8 reg11;
59}; 79};
60 80
61/* flags used in the device id table */ 81typedef const __u8 sensor_init_t[8];
82
83struct sensor_data {
84 const __u8 *bridge_init[2];
85 int bridge_init_size[2];
86 sensor_init_t *sensor_init;
87 int sensor_init_size;
88 sensor_init_t *sensor_bridge_init[2];
89 int sensor_bridge_init_size[2];
90 int flags;
91 unsigned ctrl_dis;
92 __u8 sensor_addr;
93};
94
95/* sensor_data flags */
62#define F_GAIN 0x01 /* has gain */ 96#define F_GAIN 0x01 /* has gain */
63#define F_AUTO 0x02 /* has autogain */ 97#define F_SIF 0x02 /* sif or vga */
64#define F_SIF 0x04 /* sif or vga */ 98
65#define F_H18 0x08 /* long (18 b) or short (12 b) frame header */ 99/* priv field of struct v4l2_pix_format flags (do not use low nibble!) */
100#define MODE_RAW 0x10 /* raw bayer mode */
101#define MODE_REDUCED_SIF 0x20 /* vga mode (320x240 / 160x120) on sif cam */
102
103/* ctrl_dis helper macros */
104#define NO_EXPO ((1 << EXPOSURE_IDX) | (1 << AUTOGAIN_IDX))
105#define NO_FREQ (1 << FREQ_IDX)
106#define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
66 107
67#define COMP2 0x8f 108#define COMP2 0x8f
68#define COMP 0xc7 /* 0x87 //0x07 */ 109#define COMP 0xc7 /* 0x87 //0x07 */
@@ -73,6 +114,18 @@ struct sd {
73 114
74#define SYS_CLK 0x04 115#define SYS_CLK 0x04
75 116
117#define SENS(bridge_1, bridge_3, sensor, sensor_1, \
118 sensor_3, _flags, _ctrl_dis, _sensor_addr) \
119{ \
120 .bridge_init = { bridge_1, bridge_3 }, \
121 .bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
122 .sensor_init = sensor, \
123 .sensor_init_size = sizeof(sensor), \
124 .sensor_bridge_init = { sensor_1, sensor_3,}, \
125 .sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
126 .flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
127}
128
76/* We calculate the autogain at the end of the transfer of a frame, at this 129/* We calculate the autogain at the end of the transfer of a frame, at this
77 moment a frame with the old settings is being transmitted, and a frame is 130 moment a frame with the old settings is being transmitted, and a frame is
78 being captured with the old settings. So if we adjust the autogain we must 131 being captured with the old settings. So if we adjust the autogain we must
@@ -95,6 +148,7 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
95static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); 148static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
96 149
97static struct ctrl sd_ctrls[] = { 150static struct ctrl sd_ctrls[] = {
151#define BRIGHTNESS_IDX 0
98 { 152 {
99 { 153 {
100 .id = V4L2_CID_BRIGHTNESS, 154 .id = V4L2_CID_BRIGHTNESS,
@@ -109,6 +163,7 @@ static struct ctrl sd_ctrls[] = {
109 .set = sd_setbrightness, 163 .set = sd_setbrightness,
110 .get = sd_getbrightness, 164 .get = sd_getbrightness,
111 }, 165 },
166#define GAIN_IDX 1
112 { 167 {
113 { 168 {
114 .id = V4L2_CID_GAIN, 169 .id = V4L2_CID_GAIN,
@@ -124,6 +179,7 @@ static struct ctrl sd_ctrls[] = {
124 .set = sd_setgain, 179 .set = sd_setgain,
125 .get = sd_getgain, 180 .get = sd_getgain,
126 }, 181 },
182#define EXPOSURE_IDX 2
127 { 183 {
128 { 184 {
129 .id = V4L2_CID_EXPOSURE, 185 .id = V4L2_CID_EXPOSURE,
@@ -140,6 +196,7 @@ static struct ctrl sd_ctrls[] = {
140 .set = sd_setexposure, 196 .set = sd_setexposure,
141 .get = sd_getexposure, 197 .get = sd_getexposure,
142 }, 198 },
199#define AUTOGAIN_IDX 3
143 { 200 {
144 { 201 {
145 .id = V4L2_CID_AUTOGAIN, 202 .id = V4L2_CID_AUTOGAIN,
@@ -155,6 +212,7 @@ static struct ctrl sd_ctrls[] = {
155 .set = sd_setautogain, 212 .set = sd_setautogain,
156 .get = sd_getautogain, 213 .get = sd_getautogain,
157 }, 214 },
215#define FREQ_IDX 4
158 { 216 {
159 { 217 {
160 .id = V4L2_CID_POWER_LINE_FREQUENCY, 218 .id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -172,31 +230,56 @@ static struct ctrl sd_ctrls[] = {
172}; 230};
173 231
174static struct v4l2_pix_format vga_mode[] = { 232static struct v4l2_pix_format vga_mode[] = {
233 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
234 .bytesperline = 160,
235 .sizeimage = 160 * 120 * 5 / 4,
236 .colorspace = V4L2_COLORSPACE_SRGB,
237 .priv = 2 | MODE_RAW},
175 {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 238 {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
176 .bytesperline = 160, 239 .bytesperline = 160,
177 .sizeimage = 160 * 120, 240 .sizeimage = 160 * 120 * 5 / 4,
178 .colorspace = V4L2_COLORSPACE_SRGB, 241 .colorspace = V4L2_COLORSPACE_SRGB,
179 .priv = 2}, 242 .priv = 2},
180 {320, 240, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 243 {320, 240, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
181 .bytesperline = 320, 244 .bytesperline = 320,
182 .sizeimage = 320 * 240, 245 .sizeimage = 320 * 240 * 5 / 4,
183 .colorspace = V4L2_COLORSPACE_SRGB, 246 .colorspace = V4L2_COLORSPACE_SRGB,
184 .priv = 1}, 247 .priv = 1},
185 {640, 480, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 248 {640, 480, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
186 .bytesperline = 640, 249 .bytesperline = 640,
187 .sizeimage = 640 * 480, 250 .sizeimage = 640 * 480 * 5 / 4,
188 .colorspace = V4L2_COLORSPACE_SRGB, 251 .colorspace = V4L2_COLORSPACE_SRGB,
189 .priv = 0}, 252 .priv = 0},
190}; 253};
191static struct v4l2_pix_format sif_mode[] = { 254static struct v4l2_pix_format sif_mode[] = {
255 {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
256 .bytesperline = 160,
257 .sizeimage = 160 * 120,
258 .colorspace = V4L2_COLORSPACE_SRGB,
259 .priv = 1 | MODE_RAW | MODE_REDUCED_SIF},
260 {160, 120, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
261 .bytesperline = 160,
262 .sizeimage = 160 * 120 * 5 / 4,
263 .colorspace = V4L2_COLORSPACE_SRGB,
264 .priv = 1 | MODE_REDUCED_SIF},
265 {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
266 .bytesperline = 176,
267 .sizeimage = 176 * 144 * 5 / 4,
268 .colorspace = V4L2_COLORSPACE_SRGB,
269 .priv = 1 | MODE_RAW},
192 {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 270 {176, 144, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
193 .bytesperline = 176, 271 .bytesperline = 176,
194 .sizeimage = 176 * 144, 272 .sizeimage = 176 * 144 * 5 / 4,
195 .colorspace = V4L2_COLORSPACE_SRGB, 273 .colorspace = V4L2_COLORSPACE_SRGB,
196 .priv = 1}, 274 .priv = 1},
275 {320, 240, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
276 .bytesperline = 320,
277 .sizeimage = 320 * 240 * 5 / 4,
278 .colorspace = V4L2_COLORSPACE_SRGB,
279 .priv = 0 | MODE_REDUCED_SIF},
197 {352, 288, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE, 280 {352, 288, V4L2_PIX_FMT_SN9C10X, V4L2_FIELD_NONE,
198 .bytesperline = 352, 281 .bytesperline = 352,
199 .sizeimage = 352 * 288, 282 .sizeimage = 352 * 288 * 5 / 4,
200 .colorspace = V4L2_COLORSPACE_SRGB, 283 .colorspace = V4L2_COLORSPACE_SRGB,
201 .priv = 0}, 284 .priv = 0},
202}; 285};
@@ -204,7 +287,7 @@ static struct v4l2_pix_format sif_mode[] = {
204static const __u8 initHv7131[] = { 287static const __u8 initHv7131[] = {
205 0x46, 0x77, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 288 0x46, 0x77, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00,
206 0x00, 0x00, 289 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, /* shift from 0x02 0x01 0x00 */ 290 0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
208 0x28, 0x1e, 0x60, 0x8a, 0x20, 291 0x28, 0x1e, 0x60, 0x8a, 0x20,
209 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c 292 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
210}; 293};
@@ -218,8 +301,8 @@ static const __u8 hv7131_sensor_init[][8] = {
218static const __u8 initOv6650[] = { 301static const __u8 initOv6650[] = {
219 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 302 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
220 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 303 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x02, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x0b, 304 0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
222 0x10, 0x1d, 0x10, 0x00, 0x06, 0x1f, 0x00 305 0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
223}; 306};
224static const __u8 ov6650_sensor_init[][8] = 307static const __u8 ov6650_sensor_init[][8] =
225{ 308{
@@ -257,15 +340,15 @@ static const __u8 ov6650_sensor_init[][8] =
257static const __u8 initOv7630[] = { 340static const __u8 initOv7630[] = {
258 0x04, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, /* r01 .. r08 */ 341 0x04, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, /* r01 .. r08 */
259 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */ 342 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* r09 .. r10 */
260 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */ 343 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */
261 0x28, 0x1e, /* H & V sizes r15 .. r16 */ 344 0x28, 0x1e, /* H & V sizes r15 .. r16 */
262 0x68, COMP1, MCK_INIT1, /* r17 .. r19 */ 345 0x68, COMP2, MCK_INIT1, /* r17 .. r19 */
263 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */ 346 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c /* r1a .. r1f */
264}; 347};
265static const __u8 initOv7630_3[] = { 348static const __u8 initOv7630_3[] = {
266 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */ 349 0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80, /* r01 .. r08 */
267 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, /* r09 .. r10 */ 350 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, /* r09 .. r10 */
268 0x00, 0x01, 0x01, 0x0a, /* r11 .. r14 */ 351 0x00, 0x02, 0x01, 0x0a, /* r11 .. r14 */
269 0x28, 0x1e, /* H & V sizes r15 .. r16 */ 352 0x28, 0x1e, /* H & V sizes r15 .. r16 */
270 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */ 353 0x68, 0x8f, MCK_INIT1, /* r17 .. r19 */
271 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00, /* r1a .. r20 */ 354 0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00, /* r1a .. r20 */
@@ -294,47 +377,65 @@ static const __u8 ov7630_sensor_init[][8] = {
294 {0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10}, 377 {0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
295}; 378};
296 379
380static const __u8 ov7630_sensor_init_3[][8] = {
381 {0xa0, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10},
382};
383
297static const __u8 initPas106[] = { 384static const __u8 initPas106[] = {
298 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00, 385 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
299 0x00, 0x00, 386 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x05, 0x01, 0x00, 387 0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
301 0x16, 0x12, 0x28, COMP1, MCK_INIT1, 388 0x16, 0x12, 0x24, COMP1, MCK_INIT1,
302 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c 389 0x18, 0x10, 0x02, 0x02, 0x09, 0x07
303}; 390};
304/* compression 0x86 mckinit1 0x2b */ 391/* compression 0x86 mckinit1 0x2b */
305static const __u8 pas106_data[][2] = { 392static const __u8 pas106_sensor_init[][8] = {
306 {0x02, 0x04}, /* Pixel Clock Divider 6 */ 393 /* Pixel Clock Divider 6 */
307 {0x03, 0x13}, /* Frame Time MSB */ 394 { 0xa1, 0x40, 0x02, 0x04, 0x00, 0x00, 0x00, 0x14 },
308/* {0x03, 0x12}, * Frame Time MSB */ 395 /* Frame Time MSB (also seen as 0x12) */
309 {0x04, 0x06}, /* Frame Time LSB */ 396 { 0xa1, 0x40, 0x03, 0x13, 0x00, 0x00, 0x00, 0x14 },
310/* {0x04, 0x05}, * Frame Time LSB */ 397 /* Frame Time LSB (also seen as 0x05) */
311 {0x05, 0x65}, /* Shutter Time Line Offset */ 398 { 0xa1, 0x40, 0x04, 0x06, 0x00, 0x00, 0x00, 0x14 },
312/* {0x05, 0x6d}, * Shutter Time Line Offset */ 399 /* Shutter Time Line Offset (also seen as 0x6d) */
313/* {0x06, 0xb1}, * Shutter Time Pixel Offset */ 400 { 0xa1, 0x40, 0x05, 0x65, 0x00, 0x00, 0x00, 0x14 },
314 {0x06, 0xcd}, /* Shutter Time Pixel Offset */ 401 /* Shutter Time Pixel Offset (also seen as 0xb1) */
315 {0x07, 0xc1}, /* Black Level Subtract Sign */ 402 { 0xa1, 0x40, 0x06, 0xcd, 0x00, 0x00, 0x00, 0x14 },
316/* {0x07, 0x00}, * Black Level Subtract Sign */ 403 /* Black Level Subtract Sign (also seen 0x00) */
317 {0x08, 0x06}, /* Black Level Subtract Level */ 404 { 0xa1, 0x40, 0x07, 0xc1, 0x00, 0x00, 0x00, 0x14 },
318 {0x08, 0x06}, /* Black Level Subtract Level */ 405 /* Black Level Subtract Level (also seen 0x01) */
319/* {0x08, 0x01}, * Black Level Subtract Level */ 406 { 0xa1, 0x40, 0x08, 0x06, 0x00, 0x00, 0x00, 0x14 },
320 {0x09, 0x05}, /* Color Gain B Pixel 5 a */ 407 { 0xa1, 0x40, 0x08, 0x06, 0x00, 0x00, 0x00, 0x14 },
321 {0x0a, 0x04}, /* Color Gain G1 Pixel 1 5 */ 408 /* Color Gain B Pixel 5 a */
322 {0x0b, 0x04}, /* Color Gain G2 Pixel 1 0 5 */ 409 { 0xa1, 0x40, 0x09, 0x05, 0x00, 0x00, 0x00, 0x14 },
323 {0x0c, 0x05}, /* Color Gain R Pixel 3 1 */ 410 /* Color Gain G1 Pixel 1 5 */
324 {0x0d, 0x00}, /* Color GainH Pixel */ 411 { 0xa1, 0x40, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x14 },
325 {0x0e, 0x0e}, /* Global Gain */ 412 /* Color Gain G2 Pixel 1 0 5 */
326 {0x0f, 0x00}, /* Contrast */ 413 { 0xa1, 0x40, 0x0b, 0x04, 0x00, 0x00, 0x00, 0x14 },
327 {0x10, 0x06}, /* H&V synchro polarity */ 414 /* Color Gain R Pixel 3 1 */
328 {0x11, 0x06}, /* ?default */ 415 { 0xa1, 0x40, 0x0c, 0x05, 0x00, 0x00, 0x00, 0x14 },
329 {0x12, 0x06}, /* DAC scale */ 416 /* Color GainH Pixel */
330 {0x14, 0x02}, /* ?default */ 417 { 0xa1, 0x40, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x14 },
331 {0x13, 0x01}, /* Validate Settings */ 418 /* Global Gain */
419 { 0xa1, 0x40, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x14 },
420 /* Contrast */
421 { 0xa1, 0x40, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x14 },
422 /* H&V synchro polarity */
423 { 0xa1, 0x40, 0x10, 0x06, 0x00, 0x00, 0x00, 0x14 },
424 /* ?default */
425 { 0xa1, 0x40, 0x11, 0x06, 0x00, 0x00, 0x00, 0x14 },
426 /* DAC scale */
427 { 0xa1, 0x40, 0x12, 0x06, 0x00, 0x00, 0x00, 0x14 },
428 /* ?default */
429 { 0xa1, 0x40, 0x14, 0x02, 0x00, 0x00, 0x00, 0x14 },
430 /* Validate Settings */
431 { 0xa1, 0x40, 0x13, 0x01, 0x00, 0x00, 0x00, 0x14 },
332}; 432};
433
333static const __u8 initPas202[] = { 434static const __u8 initPas202[] = {
334 0x44, 0x44, 0x21, 0x30, 0x00, 0x00, 0x00, 0x80, 0x40, 0x00, 0x00, 0x00, 435 0x44, 0x44, 0x21, 0x30, 0x00, 0x00, 0x00, 0x80, 0x40, 0x00, 0x00, 0x00,
335 0x00, 0x00, 436 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x07, 0x03, 0x0a, /* 6 */ 437 0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
337 0x28, 0x1e, 0x28, 0x89, 0x30, 438 0x28, 0x1e, 0x28, 0x89, 0x20,
338 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c 439 0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
339}; 440};
340static const __u8 pas202_sensor_init[][8] = { 441static const __u8 pas202_sensor_init[][8] = {
@@ -364,7 +465,7 @@ static const __u8 pas202_sensor_init[][8] = {
364static const __u8 initTas5110[] = { 465static const __u8 initTas5110[] = {
365 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, 466 0x44, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00,
366 0x00, 0x00, 467 0x00, 0x00,
367 0x00, 0x01, 0x00, 0x46, 0x09, 0x0a, /* shift from 0x45 0x09 0x0a */ 468 0x00, 0x01, 0x00, 0x45, 0x09, 0x0a,
368 0x16, 0x12, 0x60, 0x86, 0x2b, 469 0x16, 0x12, 0x60, 0x86, 0x2b,
369 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07 470 0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
370}; 471};
@@ -377,7 +478,7 @@ static const __u8 tas5110_sensor_init[][8] = {
377static const __u8 initTas5130[] = { 478static const __u8 initTas5130[] = {
378 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00, 479 0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x11, 0x00, 0x00, 0x00,
379 0x00, 0x00, 480 0x00, 0x00,
380 0x00, 0x01, 0x00, 0x69, 0x0c, 0x0a, 481 0x00, 0x01, 0x00, 0x68, 0x0c, 0x0a,
381 0x28, 0x1e, 0x60, COMP, MCK_INIT, 482 0x28, 0x1e, 0x60, COMP, MCK_INIT,
382 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c 483 0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
383}; 484};
@@ -389,6 +490,21 @@ static const __u8 tas5130_sensor_init[][8] = {
389 {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10}, 490 {0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10},
390}; 491};
391 492
493struct sensor_data sensor_data[] = {
494SENS(initHv7131, NULL, hv7131_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ, 0),
495SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60),
496SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3,
497 F_GAIN, 0, 0x21),
498SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_SIF, NO_EXPO|NO_FREQ,
499 0),
500SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, 0,
501 NO_EXPO|NO_FREQ, 0),
502SENS(initTas5110, NULL, tas5110_sensor_init, NULL, NULL, F_GAIN|F_SIF,
503 NO_BRIGHTNESS|NO_FREQ, 0),
504SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
505 0),
506};
507
392/* get one byte in gspca_dev->usb_buf */ 508/* get one byte in gspca_dev->usb_buf */
393static void reg_r(struct gspca_dev *gspca_dev, 509static void reg_r(struct gspca_dev *gspca_dev,
394 __u16 value) 510 __u16 value)
@@ -409,7 +525,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
409 int len) 525 int len)
410{ 526{
411#ifdef GSPCA_DEBUG 527#ifdef GSPCA_DEBUG
412 if (len > sizeof gspca_dev->usb_buf) { 528 if (len > USB_BUF_SZ) {
413 PDEBUG(D_ERR|D_PACK, "reg_w: buffer overflow"); 529 PDEBUG(D_ERR|D_PACK, "reg_w: buffer overflow");
414 return; 530 return;
415 } 531 }
@@ -425,26 +541,6 @@ static void reg_w(struct gspca_dev *gspca_dev,
425 500); 541 500);
426} 542}
427 543
428static void reg_w_big(struct gspca_dev *gspca_dev,
429 __u16 value,
430 const __u8 *buffer,
431 int len)
432{
433 __u8 *tmpbuf;
434
435 tmpbuf = kmalloc(len, GFP_KERNEL);
436 memcpy(tmpbuf, buffer, len);
437 usb_control_msg(gspca_dev->dev,
438 usb_sndctrlpipe(gspca_dev->dev, 0),
439 0x08, /* request */
440 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
441 value,
442 0, /* index */
443 tmpbuf, len,
444 500);
445 kfree(tmpbuf);
446}
447
448static int i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer) 544static int i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
449{ 545{
450 int retry = 60; 546 int retry = 60;
@@ -487,7 +583,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
487 {0xa0, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}; 583 {0xa0, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10};
488 584
489 /* change reg 0x06 */ 585 /* change reg 0x06 */
490 i2cOV[1] = sd->sensor_addr; 586 i2cOV[1] = sensor_data[sd->sensor].sensor_addr;
491 i2cOV[3] = sd->brightness; 587 i2cOV[3] = sd->brightness;
492 if (i2c_w(gspca_dev, i2cOV) < 0) 588 if (i2c_w(gspca_dev, i2cOV) < 0)
493 goto err; 589 goto err;
@@ -545,9 +641,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
545 goto err; 641 goto err;
546 break; 642 break;
547 } 643 }
548 case SENSOR_TAS5110:
549 /* FIXME figure out howto control brightness on TAS5110 */
550 break;
551 } 644 }
552 return; 645 return;
553err: 646err:
@@ -577,7 +670,7 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
577 case SENSOR_OV7630: { 670 case SENSOR_OV7630: {
578 __u8 i2c[] = {0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}; 671 __u8 i2c[] = {0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
579 672
580 i2c[1] = sd->sensor_addr; 673 i2c[1] = sensor_data[sd->sensor].sensor_addr;
581 i2c[3] = gain >> 2; 674 i2c[3] = gain >> 2;
582 if (i2c_w(gspca_dev, i2c) < 0) 675 if (i2c_w(gspca_dev, i2c) < 0)
583 goto err; 676 goto err;
@@ -604,7 +697,7 @@ static void setgain(struct gspca_dev *gspca_dev)
604 rgb_value = gain; 697 rgb_value = gain;
605 reg_w(gspca_dev, 0x11, &rgb_value, 1); 698 reg_w(gspca_dev, 0x11, &rgb_value, 1);
606 699
607 if (sd->sensor_has_gain) 700 if (sensor_data[sd->sensor].flags & F_GAIN)
608 setsensorgain(gspca_dev); 701 setsensorgain(gspca_dev);
609} 702}
610 703
@@ -665,6 +758,11 @@ static void setexposure(struct gspca_dev *gspca_dev)
665 else if (reg11 > 16) 758 else if (reg11 > 16)
666 reg11 = 16; 759 reg11 = 16;
667 760
761 /* In 640x480, if the reg11 has less than 3, the image is
762 unstable (not enough bandwidth). */
763 if (gspca_dev->width == 640 && reg11 < 3)
764 reg11 = 3;
765
668 /* frame exposure time in ms = 1000 * reg11 / 30 -> 766 /* frame exposure time in ms = 1000 * reg11 / 30 ->
669 reg10 = sd->exposure * 2 * reg10_max / (1000 * reg11 / 30) */ 767 reg10 = sd->exposure * 2 * reg10_max / (1000 * reg11 / 30) */
670 reg10 = (sd->exposure * 60 * reg10_max) / (1000 * reg11); 768 reg10 = (sd->exposure * 60 * reg10_max) / (1000 * reg11);
@@ -678,13 +776,8 @@ static void setexposure(struct gspca_dev *gspca_dev)
678 else if (reg10 > reg10_max) 776 else if (reg10 > reg10_max)
679 reg10 = reg10_max; 777 reg10 = reg10_max;
680 778
681 /* In 640x480, if the reg11 has less than 3, the image is
682 unstable (not enough bandwidth). */
683 if (gspca_dev->width == 640 && reg11 < 3)
684 reg11 = 3;
685
686 /* Write reg 10 and reg11 low nibble */ 779 /* Write reg 10 and reg11 low nibble */
687 i2c[1] = sd->sensor_addr; 780 i2c[1] = sensor_data[sd->sensor].sensor_addr;
688 i2c[3] = reg10; 781 i2c[3] = reg10;
689 i2c[4] |= reg11 - 1; 782 i2c[4] |= reg11 - 1;
690 783
@@ -724,7 +817,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
724 ? 0x4f : 0x8a; 817 ? 0x4f : 0x8a;
725 break; 818 break;
726 } 819 }
727 i2c[1] = sd->sensor_addr; 820 i2c[1] = sensor_data[sd->sensor].sensor_addr;
728 if (i2c_w(gspca_dev, i2c) < 0) 821 if (i2c_w(gspca_dev, i2c) < 0)
729 PDEBUG(D_ERR, "i2c error setfreq"); 822 PDEBUG(D_ERR, "i2c error setfreq");
730 break; 823 break;
@@ -757,30 +850,19 @@ static int sd_config(struct gspca_dev *gspca_dev,
757{ 850{
758 struct sd *sd = (struct sd *) gspca_dev; 851 struct sd *sd = (struct sd *) gspca_dev;
759 struct cam *cam; 852 struct cam *cam;
760 int sif = 0;
761 853
762 /* nctrls depends upon the sensor, so we use a per cam copy */ 854 reg_r(gspca_dev, 0x00);
763 memcpy(&sd->sd_desc, gspca_dev->sd_desc, sizeof(struct sd_desc)); 855 if (gspca_dev->usb_buf[0] != 0x10)
764 gspca_dev->sd_desc = &sd->sd_desc; 856 return -ENODEV;
765 857
766 /* copy the webcam info from the device id */ 858 /* copy the webcam info from the device id */
767 sd->sensor = (id->driver_info >> 24) & 0xff; 859 sd->sensor = id->driver_info >> 8;
768 if (id->driver_info & (F_GAIN << 16)) 860 sd->bridge = id->driver_info & 0xff;
769 sd->sensor_has_gain = 1; 861 gspca_dev->ctrl_dis = sensor_data[sd->sensor].ctrl_dis;
770 if (id->driver_info & (F_AUTO << 16))
771 sd->sd_desc.dq_callback = do_autogain;
772 if (id->driver_info & (F_SIF << 16))
773 sif = 1;
774 if (id->driver_info & (F_H18 << 16))
775 sd->fr_h_sz = 18; /* size of frame header */
776 else
777 sd->fr_h_sz = 12;
778 sd->sd_desc.nctrls = (id->driver_info >> 8) & 0xff;
779 sd->sensor_addr = id->driver_info & 0xff;
780 862
781 cam = &gspca_dev->cam; 863 cam = &gspca_dev->cam;
782 cam->epaddr = 0x01; 864 cam->epaddr = 0x01;
783 if (!sif) { 865 if (!(sensor_data[sd->sensor].flags & F_SIF)) {
784 cam->cam_mode = vga_mode; 866 cam->cam_mode = vga_mode;
785 cam->nmodes = ARRAY_SIZE(vga_mode); 867 cam->nmodes = ARRAY_SIZE(vga_mode);
786 } else { 868 } else {
@@ -790,157 +872,98 @@ static int sd_config(struct gspca_dev *gspca_dev,
790 sd->brightness = BRIGHTNESS_DEF; 872 sd->brightness = BRIGHTNESS_DEF;
791 sd->gain = GAIN_DEF; 873 sd->gain = GAIN_DEF;
792 sd->exposure = EXPOSURE_DEF; 874 sd->exposure = EXPOSURE_DEF;
793 sd->autogain = AUTOGAIN_DEF; 875 if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
876 sd->autogain = 0; /* Disable do_autogain callback */
877 else
878 sd->autogain = AUTOGAIN_DEF;
794 sd->freq = FREQ_DEF; 879 sd->freq = FREQ_DEF;
795 880
796 return 0; 881 return 0;
797} 882}
798 883
799/* this function is called at open time */ 884/* this function is called at probe and resume time */
800static int sd_open(struct gspca_dev *gspca_dev) 885static int sd_init(struct gspca_dev *gspca_dev)
801{ 886{
802 reg_r(gspca_dev, 0x00); 887 const __u8 stop = 0x09; /* Disable stream turn of LED */
803 if (gspca_dev->usb_buf[0] != 0x10)
804 return -ENODEV;
805 return 0;
806}
807 888
808static void pas106_i2cinit(struct gspca_dev *gspca_dev) 889 reg_w(gspca_dev, 0x01, &stop, 1);
809{ 890
810 int i; 891 return 0;
811 const __u8 *data;
812 __u8 i2c1[] = { 0xa1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14 };
813
814 i = ARRAY_SIZE(pas106_data);
815 data = pas106_data[0];
816 while (--i >= 0) {
817 memcpy(&i2c1[2], data, 2);
818 /* copy 2 bytes from the template */
819 if (i2c_w(gspca_dev, i2c1) < 0)
820 PDEBUG(D_ERR, "i2c error pas106");
821 data += 2;
822 }
823} 892}
824 893
825/* -- start the camera -- */ 894/* -- start the camera -- */
826static void sd_start(struct gspca_dev *gspca_dev) 895static void sd_start(struct gspca_dev *gspca_dev)
827{ 896{
828 struct sd *sd = (struct sd *) gspca_dev; 897 struct sd *sd = (struct sd *) gspca_dev;
829 int mode, l = 0x1f; 898 struct cam *cam = &gspca_dev->cam;
899 int mode, l;
830 const __u8 *sn9c10x; 900 const __u8 *sn9c10x;
831 __u8 reg17_19[3]; 901 __u8 reg12_19[8];
832 902
833 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 903 mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
904 sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge];
905 l = sensor_data[sd->sensor].bridge_init_size[sd->bridge];
906 memcpy(reg12_19, &sn9c10x[0x12 - 1], 8);
907 reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4);
908 /* Special cases where reg 17 and or 19 value depends on mode */
834 switch (sd->sensor) { 909 switch (sd->sensor) {
835 case SENSOR_HV7131R:
836 sn9c10x = initHv7131;
837 reg17_19[0] = 0x60;
838 reg17_19[1] = (mode << 4) | 0x8a;
839 reg17_19[2] = 0x20;
840 break;
841 case SENSOR_OV6650:
842 sn9c10x = initOv6650;
843 reg17_19[0] = 0x68;
844 reg17_19[1] = (mode << 4) | 0x8b;
845 reg17_19[2] = 0x20;
846 break;
847 case SENSOR_OV7630:
848 if (sd->fr_h_sz == 18) { /* SN9C103 */
849 sn9c10x = initOv7630_3;
850 l = sizeof initOv7630_3;
851 } else
852 sn9c10x = initOv7630;
853 reg17_19[0] = 0x68;
854 reg17_19[1] = (mode << 4) | COMP2;
855 reg17_19[2] = MCK_INIT1;
856 break;
857 case SENSOR_PAS106:
858 sn9c10x = initPas106;
859 reg17_19[0] = 0x24; /* 0x28 */
860 reg17_19[1] = (mode << 4) | COMP1;
861 reg17_19[2] = MCK_INIT1;
862 break;
863 case SENSOR_PAS202: 910 case SENSOR_PAS202:
864 sn9c10x = initPas202; 911 reg12_19[5] = mode ? 0x24 : 0x20;
865 reg17_19[0] = mode ? 0x24 : 0x20;
866 reg17_19[1] = (mode << 4) | 0x89;
867 reg17_19[2] = 0x20;
868 break; 912 break;
869 case SENSOR_TAS5110: 913 case SENSOR_TAS5130CXX:
870 sn9c10x = initTas5110; 914 /* probably not mode specific at all most likely the upper
871 reg17_19[0] = 0x60; 915 nibble of 0x19 is exposure (clock divider) just as with
872 reg17_19[1] = (mode << 4) | 0x86; 916 the tas5110, we need someone to test this. */
873 reg17_19[2] = 0x2b; /* 0xf3; */ 917 reg12_19[7] = mode ? 0x23 : 0x43;
874 break;
875 default:
876/* case SENSOR_TAS5130CXX: */
877 sn9c10x = initTas5130;
878 reg17_19[0] = 0x60;
879 reg17_19[1] = (mode << 4) | COMP;
880 reg17_19[2] = mode ? 0x23 : 0x43;
881 break; 918 break;
882 } 919 }
920 /* Disable compression when the raw bayer format has been selected */
921 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
922 reg12_19[6] &= ~0x80;
923
924 /* Vga mode emulation on SIF sensor? */
925 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
926 reg12_19[0] += 16; /* 0x12: hstart adjust */
927 reg12_19[1] += 24; /* 0x13: vstart adjust */
928 reg12_19[3] = 320 / 16; /* 0x15: hsize */
929 reg12_19[4] = 240 / 16; /* 0x16: vsize */
930 }
883 931
884 /* reg 0x01 bit 2 video transfert on */ 932 /* reg 0x01 bit 2 video transfert on */
885 reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1); 933 reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1);
886 /* reg 0x17 SensorClk enable inv Clk 0x60 */ 934 /* reg 0x17 SensorClk enable inv Clk 0x60 */
887 reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1); 935 reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1);
888 /* Set the registers from the template */ 936 /* Set the registers from the template */
889 reg_w_big(gspca_dev, 0x01, sn9c10x, l); 937 reg_w(gspca_dev, 0x01, sn9c10x, l);
890 switch (sd->sensor) { 938
891 case SENSOR_HV7131R: 939 /* Init the sensor */
892 i2c_w_vector(gspca_dev, hv7131_sensor_init, 940 i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
893 sizeof hv7131_sensor_init); 941 sensor_data[sd->sensor].sensor_init_size);
894 break; 942 if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
895 case SENSOR_OV6650: 943 i2c_w_vector(gspca_dev,
896 i2c_w_vector(gspca_dev, ov6650_sensor_init, 944 sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
897 sizeof ov6650_sensor_init); 945 sensor_data[sd->sensor].sensor_bridge_init_size[
898 break; 946 sd->bridge]);
899 case SENSOR_OV7630: 947
900 i2c_w_vector(gspca_dev, ov7630_sensor_init,
901 sizeof ov7630_sensor_init);
902 if (sd->fr_h_sz == 18) { /* SN9C103 */
903 const __u8 i2c[] = { 0xa0, 0x21, 0x13, 0x80, 0x00,
904 0x00, 0x00, 0x10 };
905 i2c_w(gspca_dev, i2c);
906 }
907 break;
908 case SENSOR_PAS106:
909 pas106_i2cinit(gspca_dev);
910 break;
911 case SENSOR_PAS202:
912 i2c_w_vector(gspca_dev, pas202_sensor_init,
913 sizeof pas202_sensor_init);
914 break;
915 case SENSOR_TAS5110:
916 i2c_w_vector(gspca_dev, tas5110_sensor_init,
917 sizeof tas5110_sensor_init);
918 break;
919 default:
920/* case SENSOR_TAS5130CXX: */
921 i2c_w_vector(gspca_dev, tas5130_sensor_init,
922 sizeof tas5130_sensor_init);
923 break;
924 }
925 /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */ 948 /* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
926 reg_w(gspca_dev, 0x15, &sn9c10x[0x15 - 1], 2); 949 reg_w(gspca_dev, 0x15, &reg12_19[3], 2);
927 /* compression register */ 950 /* compression register */
928 reg_w(gspca_dev, 0x18, &reg17_19[1], 1); 951 reg_w(gspca_dev, 0x18, &reg12_19[6], 1);
929 /* H_start */ 952 /* H_start */
930 reg_w(gspca_dev, 0x12, &sn9c10x[0x12 - 1], 1); 953 reg_w(gspca_dev, 0x12, &reg12_19[0], 1);
931 /* V_START */ 954 /* V_START */
932 reg_w(gspca_dev, 0x13, &sn9c10x[0x13 - 1], 1); 955 reg_w(gspca_dev, 0x13, &reg12_19[1], 1);
933 /* reset 0x17 SensorClk enable inv Clk 0x60 */ 956 /* reset 0x17 SensorClk enable inv Clk 0x60 */
934 /*fixme: ov7630 [17]=68 8f (+20 if 102)*/ 957 /*fixme: ov7630 [17]=68 8f (+20 if 102)*/
935 reg_w(gspca_dev, 0x17, &reg17_19[0], 1); 958 reg_w(gspca_dev, 0x17, &reg12_19[5], 1);
936 /*MCKSIZE ->3 */ /*fixme: not ov7630*/ 959 /*MCKSIZE ->3 */ /*fixme: not ov7630*/
937 reg_w(gspca_dev, 0x19, &reg17_19[2], 1); 960 reg_w(gspca_dev, 0x19, &reg12_19[7], 1);
938 /* AE_STRX AE_STRY AE_ENDX AE_ENDY */ 961 /* AE_STRX AE_STRY AE_ENDX AE_ENDY */
939 reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4); 962 reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4);
940 /* Enable video transfert */ 963 /* Enable video transfert */
941 reg_w(gspca_dev, 0x01, &sn9c10x[0], 1); 964 reg_w(gspca_dev, 0x01, &sn9c10x[0], 1);
942 /* Compression */ 965 /* Compression */
943 reg_w(gspca_dev, 0x18, &reg17_19[1], 2); 966 reg_w(gspca_dev, 0x18, &reg12_19[6], 2);
944 msleep(20); 967 msleep(20);
945 968
946 sd->reg11 = -1; 969 sd->reg11 = -1;
@@ -957,18 +980,7 @@ static void sd_start(struct gspca_dev *gspca_dev)
957 980
958static void sd_stopN(struct gspca_dev *gspca_dev) 981static void sd_stopN(struct gspca_dev *gspca_dev)
959{ 982{
960 __u8 ByteSend; 983 sd_init(gspca_dev);
961
962 ByteSend = 0x09; /* 0X00 */
963 reg_w(gspca_dev, 0x01, &ByteSend, 1);
964}
965
966static void sd_stop0(struct gspca_dev *gspca_dev)
967{
968}
969
970static void sd_close(struct gspca_dev *gspca_dev)
971{
972} 984}
973 985
974static void sd_pkt_scan(struct gspca_dev *gspca_dev, 986static void sd_pkt_scan(struct gspca_dev *gspca_dev,
@@ -978,6 +990,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
978{ 990{
979 int i; 991 int i;
980 struct sd *sd = (struct sd *) gspca_dev; 992 struct sd *sd = (struct sd *) gspca_dev;
993 struct cam *cam = &gspca_dev->cam;
981 994
982 /* frames start with: 995 /* frames start with:
983 * ff ff 00 c4 c4 96 synchro 996 * ff ff 00 c4 c4 96 synchro
@@ -998,20 +1011,31 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
998 && data[5 + i] == 0x96) { /* start of frame */ 1011 && data[5 + i] == 0x96) { /* start of frame */
999 int lum = -1; 1012 int lum = -1;
1000 int pkt_type = LAST_PACKET; 1013 int pkt_type = LAST_PACKET;
1014 int fr_h_sz = (sd->bridge == BRIDGE_103) ?
1015 18 : 12;
1001 1016
1002 if (len - i < sd->fr_h_sz) { 1017 if (len - i < fr_h_sz) {
1003 PDEBUG(D_STREAM, "packet too short to" 1018 PDEBUG(D_STREAM, "packet too short to"
1004 " get avg brightness"); 1019 " get avg brightness");
1005 } else if (sd->fr_h_sz == 12) { 1020 } else if (sd->bridge == BRIDGE_103) {
1006 lum = data[i + 8] + (data[i + 9] << 8);
1007 } else {
1008 lum = data[i + 9] + 1021 lum = data[i + 9] +
1009 (data[i + 10] << 8); 1022 (data[i + 10] << 8);
1023 } else {
1024 lum = data[i + 8] + (data[i + 9] << 8);
1010 } 1025 }
1011 if (lum == 0) { 1026 /* When exposure changes midway a frame we
1027 get a lum of 0 in this case drop 2 frames
1028 as the frames directly after an exposure
1029 change have an unstable image. Sometimes lum
1030 *really* is 0 (cam used in low light with
1031 low exposure setting), so do not drop frames
1032 if the previous lum was 0 too. */
1033 if (lum == 0 && sd->prev_avg_lum != 0) {
1012 lum = -1; 1034 lum = -1;
1013 sd->frames_to_drop = 2; 1035 sd->frames_to_drop = 2;
1014 } 1036 sd->prev_avg_lum = 0;
1037 } else
1038 sd->prev_avg_lum = lum;
1015 atomic_set(&sd->avg_lum, lum); 1039 atomic_set(&sd->avg_lum, lum);
1016 1040
1017 if (sd->frames_to_drop) { 1041 if (sd->frames_to_drop) {
@@ -1021,14 +1045,25 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1021 1045
1022 frame = gspca_frame_add(gspca_dev, pkt_type, 1046 frame = gspca_frame_add(gspca_dev, pkt_type,
1023 frame, data, 0); 1047 frame, data, 0);
1024 data += i + sd->fr_h_sz; 1048 data += i + fr_h_sz;
1025 len -= i + sd->fr_h_sz; 1049 len -= i + fr_h_sz;
1026 gspca_frame_add(gspca_dev, FIRST_PACKET, 1050 gspca_frame_add(gspca_dev, FIRST_PACKET,
1027 frame, data, len); 1051 frame, data, len);
1028 return; 1052 return;
1029 } 1053 }
1030 } 1054 }
1031 } 1055 }
1056
1057 if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW) {
1058 /* In raw mode we sometimes get some garbage after the frame
1059 ignore this */
1060 int used = frame->data_end - frame->data;
1061 int size = cam->cam_mode[gspca_dev->curr_mode].sizeimage;
1062
1063 if (used + len > size)
1064 len = size - used;
1065 }
1066
1032 gspca_frame_add(gspca_dev, INTER_PACKET, 1067 gspca_frame_add(gspca_dev, INTER_PACKET,
1033 frame, data, len); 1068 frame, data, len);
1034} 1069}
@@ -1162,58 +1197,45 @@ static const struct sd_desc sd_desc = {
1162 .ctrls = sd_ctrls, 1197 .ctrls = sd_ctrls,
1163 .nctrls = ARRAY_SIZE(sd_ctrls), 1198 .nctrls = ARRAY_SIZE(sd_ctrls),
1164 .config = sd_config, 1199 .config = sd_config,
1165 .open = sd_open, 1200 .init = sd_init,
1166 .start = sd_start, 1201 .start = sd_start,
1167 .stopN = sd_stopN, 1202 .stopN = sd_stopN,
1168 .stop0 = sd_stop0,
1169 .close = sd_close,
1170 .pkt_scan = sd_pkt_scan, 1203 .pkt_scan = sd_pkt_scan,
1171 .querymenu = sd_querymenu, 1204 .querymenu = sd_querymenu,
1205 .dq_callback = do_autogain,
1172}; 1206};
1173 1207
1174/* -- module initialisation -- */ 1208/* -- module initialisation -- */
1175#define SFCI(sensor, flags, nctrls, i2c_addr) \ 1209#define SB(sensor, bridge) \
1176 .driver_info = (SENSOR_ ## sensor << 24) \ 1210 .driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
1177 | ((flags) << 16) \ 1211
1178 | ((nctrls) << 8) \ 1212
1179 | (i2c_addr)
1180static __devinitdata struct usb_device_id device_table[] = { 1213static __devinitdata struct usb_device_id device_table[] = {
1181#ifndef CONFIG_USB_SN9C102 1214 {USB_DEVICE(0x0c45, 0x6001), SB(TAS5110, 102)}, /* TAS5110C1B */
1182 {USB_DEVICE(0x0c45, 0x6001), /* SN9C102 */ 1215 {USB_DEVICE(0x0c45, 0x6005), SB(TAS5110, 101)}, /* TAS5110C1B */
1183 SFCI(TAS5110, F_GAIN|F_AUTO|F_SIF, 4, 0)}, 1216#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1184 {USB_DEVICE(0x0c45, 0x6005), /* SN9C101 */ 1217 {USB_DEVICE(0x0c45, 0x6007), SB(TAS5110, 101)}, /* TAS5110D */
1185 SFCI(TAS5110, F_GAIN|F_AUTO|F_SIF, 4, 0)}, 1218 {USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
1186 {USB_DEVICE(0x0c45, 0x6007), /* SN9C101 */ 1219 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
1187 SFCI(TAS5110, F_GAIN|F_AUTO|F_SIF, 4, 0)},
1188 {USB_DEVICE(0x0c45, 0x6009), /* SN9C101 */
1189 SFCI(PAS106, F_SIF, 2, 0)},
1190 {USB_DEVICE(0x0c45, 0x600d), /* SN9C101 */
1191 SFCI(PAS106, F_SIF, 2, 0)},
1192#endif 1220#endif
1193 {USB_DEVICE(0x0c45, 0x6011), /* SN9C101 - SN9C101G */ 1221 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
1194 SFCI(OV6650, F_GAIN|F_AUTO|F_SIF, 5, 0x60)}, 1222#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1195#ifndef CONFIG_USB_SN9C102 1223 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
1196 {USB_DEVICE(0x0c45, 0x6019), /* SN9C101 */ 1224 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
1197 SFCI(OV7630, F_GAIN|F_AUTO, 5, 0x21)}, 1225 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1198 {USB_DEVICE(0x0c45, 0x6024), /* SN9C102 */ 1226 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
1199 SFCI(TAS5130CXX, 0, 2, 0)}, 1227 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
1200 {USB_DEVICE(0x0c45, 0x6025), /* SN9C102 */ 1228 {USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
1201 SFCI(TAS5130CXX, 0, 2, 0)},
1202 {USB_DEVICE(0x0c45, 0x6028), /* SN9C102 */
1203 SFCI(PAS202, 0, 2, 0)},
1204 {USB_DEVICE(0x0c45, 0x6029), /* SN9C101 */
1205 SFCI(PAS106, F_SIF, 2, 0)},
1206 {USB_DEVICE(0x0c45, 0x602c), /* SN9C102 */
1207 SFCI(OV7630, F_GAIN|F_AUTO, 5, 0x21)},
1208 {USB_DEVICE(0x0c45, 0x602d), /* SN9C102 */
1209 SFCI(HV7131R, 0, 2, 0)},
1210 {USB_DEVICE(0x0c45, 0x602e), /* SN9C102 */
1211 SFCI(OV7630, F_GAIN|F_AUTO, 5, 0x21)},
1212 {USB_DEVICE(0x0c45, 0x60af), /* SN9C103 */
1213 SFCI(PAS202, F_H18, 2, 0)},
1214 {USB_DEVICE(0x0c45, 0x60b0), /* SN9C103 */
1215 SFCI(OV7630, F_GAIN|F_AUTO|F_H18, 5, 0x21)},
1216#endif 1229#endif
1230 {USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
1231#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1232 {USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
1233#endif
1234 {USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
1235#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1236 {USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
1237#endif
1238 {USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
1217 {} 1239 {}
1218}; 1240};
1219MODULE_DEVICE_TABLE(usb, device_table); 1241MODULE_DEVICE_TABLE(usb, device_table);
@@ -1231,6 +1253,10 @@ static struct usb_driver sd_driver = {
1231 .id_table = device_table, 1253 .id_table = device_table,
1232 .probe = sd_probe, 1254 .probe = sd_probe,
1233 .disconnect = gspca_disconnect, 1255 .disconnect = gspca_disconnect,
1256#ifdef CONFIG_PM
1257 .suspend = gspca_suspend,
1258 .resume = gspca_resume,
1259#endif
1234}; 1260};
1235 1261
1236/* -- module insert / remove -- */ 1262/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 245a30ec5fb1..d75b1d20b318 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -54,8 +54,10 @@ struct sd {
54#define SENSOR_HV7131R 0 54#define SENSOR_HV7131R 0
55#define SENSOR_MI0360 1 55#define SENSOR_MI0360 1
56#define SENSOR_MO4000 2 56#define SENSOR_MO4000 2
57#define SENSOR_OV7648 3 57#define SENSOR_OM6802 3
58#define SENSOR_OV7660 4 58#define SENSOR_OV7630 4
59#define SENSOR_OV7648 5
60#define SENSOR_OV7660 6
59 unsigned char i2c_base; 61 unsigned char i2c_base;
60}; 62};
61 63
@@ -76,7 +78,8 @@ static struct ctrl sd_ctrls[] = {
76 .type = V4L2_CTRL_TYPE_INTEGER, 78 .type = V4L2_CTRL_TYPE_INTEGER,
77 .name = "Brightness", 79 .name = "Brightness",
78 .minimum = 0, 80 .minimum = 0,
79 .maximum = 0xffff, 81#define BRIGHTNESS_MAX 0xffff
82 .maximum = BRIGHTNESS_MAX,
80 .step = 1, 83 .step = 1,
81#define BRIGHTNESS_DEF 0x7fff 84#define BRIGHTNESS_DEF 0x7fff
82 .default_value = BRIGHTNESS_DEF, 85 .default_value = BRIGHTNESS_DEF,
@@ -90,7 +93,8 @@ static struct ctrl sd_ctrls[] = {
90 .type = V4L2_CTRL_TYPE_INTEGER, 93 .type = V4L2_CTRL_TYPE_INTEGER,
91 .name = "Contrast", 94 .name = "Contrast",
92 .minimum = 0, 95 .minimum = 0,
93 .maximum = 127, 96#define CONTRAST_MAX 127
97 .maximum = CONTRAST_MAX,
94 .step = 1, 98 .step = 1,
95#define CONTRAST_DEF 63 99#define CONTRAST_DEF 63
96 .default_value = CONTRAST_DEF, 100 .default_value = CONTRAST_DEF,
@@ -104,14 +108,15 @@ static struct ctrl sd_ctrls[] = {
104 .type = V4L2_CTRL_TYPE_INTEGER, 108 .type = V4L2_CTRL_TYPE_INTEGER,
105 .name = "Color", 109 .name = "Color",
106 .minimum = 0, 110 .minimum = 0,
107 .maximum = 255, 111 .maximum = 64,
108 .step = 1, 112 .step = 1,
109#define COLOR_DEF 127 113#define COLOR_DEF 32
110 .default_value = COLOR_DEF, 114 .default_value = COLOR_DEF,
111 }, 115 },
112 .set = sd_setcolors, 116 .set = sd_setcolors,
113 .get = sd_getcolors, 117 .get = sd_getcolors,
114 }, 118 },
119#define AUTOGAIN_IDX 3
115 { 120 {
116 { 121 {
117 .id = V4L2_CID_AUTOGAIN, 122 .id = V4L2_CID_AUTOGAIN,
@@ -131,7 +136,7 @@ static struct ctrl sd_ctrls[] = {
131static struct v4l2_pix_format vga_mode[] = { 136static struct v4l2_pix_format vga_mode[] = {
132 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 137 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
133 .bytesperline = 160, 138 .bytesperline = 160,
134 .sizeimage = 160 * 120 * 3 / 8 + 590, 139 .sizeimage = 160 * 120 * 4 / 8 + 590,
135 .colorspace = V4L2_COLORSPACE_JPEG, 140 .colorspace = V4L2_COLORSPACE_JPEG,
136 .priv = 2}, 141 .priv = 2},
137 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 142 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -180,6 +185,31 @@ static const __u8 sn_mo4000[] = {
180 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 185 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
181}; 186};
182 187
188static const __u8 sn_om6802[] = {
189/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
190 0x00, 0x23, 0x72, 0x00, 0x1a, 0x34, 0x27, 0x20,
191/* reg8 reg9 rega regb regc regd rege regf */
192 0x80, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
193/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
194 0x03, 0x00, 0x51, 0x01, 0x00, 0x28, 0x1e, 0x40,
195/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
196 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
197 0x08, 0x22, 0x44, 0x63, 0x7d, 0x92, 0xa3, 0xaf,
198 0xbc, 0xc4, 0xcd, 0xd5, 0xdc, 0xe1, 0xe8, 0xef,
199 0xf7
200};
201
202static const __u8 sn_ov7630[] = {
203/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
204 0x00, 0x21, 0x40, 0x00, 0x1a, 0x20, 0x1f, 0x20,
205/* reg8 reg9 rega regb regc regd rege regf */
206 0xa1, 0x21, 0x76, 0x21, 0x00, 0x00, 0x00, 0x10,
207/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
208 0x03, 0x00, 0x04, 0x01, 0x0a, 0x28, 0x1e, 0xc2,
209/* reg18 reg19 reg1a reg1b reg1c reg1d reg1e reg1f */
210 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00
211};
212
183static const __u8 sn_ov7648[] = { 213static const __u8 sn_ov7648[] = {
184/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 214/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
185 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20, 215 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20,
@@ -207,31 +237,22 @@ static const __u8 *sn_tb[] = {
207 sn_hv7131, 237 sn_hv7131,
208 sn_mi0360, 238 sn_mi0360,
209 sn_mo4000, 239 sn_mo4000,
240 sn_om6802,
241 sn_ov7630,
210 sn_ov7648, 242 sn_ov7648,
211 sn_ov7660 243 sn_ov7660
212}; 244};
213 245
214static const __u8 regsn20[] = { 246static const __u8 gamma_def[] = {
215 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99, 247 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99,
216 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff 248 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff
217}; 249};
218static const __u8 regsn20_sn9c325[] = {
219 0x0a, 0x3a, 0x56, 0x6c, 0x7e, 0x8d, 0x9a, 0xa4,
220 0xaf, 0xbb, 0xc5, 0xcd, 0xd5, 0xde, 0xe8, 0xed, 0xf5
221};
222 250
223static const __u8 reg84[] = { 251static const __u8 reg84[] = {
224 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, 0xe5, 0x0f, 252 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, 0xe5, 0x0f,
225 0xe4, 0x0f, 0x38, 0x00, 0x3e, 0x00, 0xc3, 0x0f, 253 0xe4, 0x0f, 0x38, 0x00, 0x3e, 0x00, 0xc3, 0x0f,
226/* 0x00, 0x00, 0x00, 0x00, 0x00 */ 254 0xf7, 0x0f, 0x00, 0x00, 0x00
227 0xf7, 0x0f, 0x0a, 0x00, 0x00
228};
229static const __u8 reg84_sn9c325[] = {
230 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, 0xe4, 0x0f,
231 0xd3, 0x0f, 0x4b, 0x00, 0x48, 0x00, 0xc0, 0x0f,
232 0xf8, 0x0f, 0x00, 0x00, 0x00
233}; 255};
234
235static const __u8 hv7131r_sensor_init[][8] = { 256static const __u8 hv7131r_sensor_init[][8] = {
236 {0xC1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10}, 257 {0xC1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10},
237 {0xB1, 0x11, 0x34, 0x17, 0x7F, 0x00, 0x00, 0x10}, 258 {0xB1, 0x11, 0x34, 0x17, 0x7F, 0x00, 0x00, 0x10},
@@ -340,6 +361,92 @@ static const __u8 mo4000_sensor_init[][8] = {
340 {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, 361 {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10},
341 {} 362 {}
342}; 363};
364static __u8 om6802_sensor_init[][8] = {
365 {0xa0, 0x34, 0x90, 0x05, 0x00, 0x00, 0x00, 0x10},
366 {0xa0, 0x34, 0x49, 0x85, 0x00, 0x00, 0x00, 0x10},
367 {0xa0, 0x34, 0x5a, 0xc0, 0x00, 0x00, 0x00, 0x10},
368 {0xa0, 0x34, 0xdd, 0x18, 0x00, 0x00, 0x00, 0x10},
369/* {0xa0, 0x34, 0xfb, 0x11, 0x00, 0x00, 0x00, 0x10}, */
370 {0xa0, 0x34, 0xf0, 0x04, 0x00, 0x00, 0x00, 0x10},
371 /* white balance & auto-exposure */
372/* {0xa0, 0x34, 0xf1, 0x02, 0x00, 0x00, 0x00, 0x10},
373 * set color mode */
374/* {0xa0, 0x34, 0xfe, 0x5b, 0x00, 0x00, 0x00, 0x10},
375 * max AGC value in AE */
376/* {0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10},
377 * preset AGC */
378/* {0xa0, 0x34, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x10},
379 * preset brightness */
380/* {0xa0, 0x34, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x10},
381 * preset contrast */
382/* {0xa0, 0x34, 0xe8, 0x31, 0x00, 0x00, 0x00, 0x10},
383 * preset gamma */
384 {0xa0, 0x34, 0xe9, 0x0f, 0x00, 0x00, 0x00, 0x10},
385 /* luminance mode (0x4f = AE) */
386 {0xa0, 0x34, 0xe4, 0xff, 0x00, 0x00, 0x00, 0x10},
387 /* preset shutter */
388/* {0xa0, 0x34, 0xef, 0x00, 0x00, 0x00, 0x00, 0x10},
389 * auto frame rate */
390/* {0xa0, 0x34, 0xfb, 0xee, 0x00, 0x00, 0x00, 0x10}, */
391
392/* {0xa0, 0x34, 0x71, 0x84, 0x00, 0x00, 0x00, 0x10}, */
393/* {0xa0, 0x34, 0x72, 0x05, 0x00, 0x00, 0x00, 0x10}, */
394/* {0xa0, 0x34, 0x68, 0x80, 0x00, 0x00, 0x00, 0x10}, */
395/* {0xa0, 0x34, 0x69, 0x01, 0x00, 0x00, 0x00, 0x10}, */
396 {}
397};
398static const __u8 ov7630_sensor_init[][8] = {
399 {0xa1, 0x21, 0x76, 0x01, 0x00, 0x00, 0x00, 0x10},
400 {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
401/* win: delay 20ms */
402 {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
403 {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10},
404/* win: delay 20ms */
405 {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
406/* win: i2c_r from 00 to 80 */
407 {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
408 {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10},
409 {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10},
410 {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10},
411 {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
412 {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
413 {0xd1, 0x21, 0x1f, 0x00, 0x80, 0x80, 0x80, 0x10},
414 {0xd1, 0x21, 0x23, 0xde, 0x10, 0x8a, 0xa0, 0x10},
415 {0xc1, 0x21, 0x27, 0xca, 0xa2, 0x74, 0x00, 0x10},
416 {0xd1, 0x21, 0x2a, 0x88, 0x00, 0x88, 0x01, 0x10},
417 {0xc1, 0x21, 0x2e, 0x80, 0x00, 0x18, 0x00, 0x10},
418 {0xa1, 0x21, 0x21, 0x08, 0x00, 0x00, 0x00, 0x10},
419 {0xa1, 0x21, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10},
420 {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10},
421 {0xb1, 0x21, 0x32, 0xc2, 0x08, 0x00, 0x00, 0x10},
422 {0xb1, 0x21, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x10},
423 {0xd1, 0x21, 0x60, 0x05, 0x40, 0x12, 0x57, 0x10},
424 {0xa1, 0x21, 0x64, 0x73, 0x00, 0x00, 0x00, 0x10},
425 {0xd1, 0x21, 0x65, 0x00, 0x55, 0x01, 0xac, 0x10},
426 {0xa1, 0x21, 0x69, 0x38, 0x00, 0x00, 0x00, 0x10},
427 {0xd1, 0x21, 0x6f, 0x1f, 0x01, 0x00, 0x10, 0x10},
428 {0xd1, 0x21, 0x73, 0x50, 0x20, 0x02, 0x01, 0x10},
429 {0xd1, 0x21, 0x77, 0xf3, 0x90, 0x98, 0x98, 0x10},
430 {0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10},
431 {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
432 {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
433/* */
434 {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
435 {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10},
436/*fixme: + 0x12, 0x04*/
437 {0xa1, 0x21, 0x75, 0x82, 0x00, 0x00, 0x00, 0x10},
438 {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10},
439 {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
440 {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10},
441/* */
442 {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10},
443 {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10},
444 {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10},
445/* */
446 {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10},
447/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
448 {}
449};
343static const __u8 ov7660_sensor_init[][8] = { 450static const __u8 ov7660_sensor_init[][8] = {
344 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */ 451 {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */
345/* (delay 20ms) */ 452/* (delay 20ms) */
@@ -506,10 +613,16 @@ static const __u8 qtable4[] = {
506 0x29, 0x29, 0x29, 0x29 613 0x29, 0x29, 0x29, 0x29
507}; 614};
508 615
509/* read <len> bytes (len < sizeof gspca_dev->usb_buf) to gspca_dev->usb_buf */ 616/* read <len> bytes to gspca_dev->usb_buf */
510static void reg_r(struct gspca_dev *gspca_dev, 617static void reg_r(struct gspca_dev *gspca_dev,
511 __u16 value, int len) 618 __u16 value, int len)
512{ 619{
620#ifdef GSPCA_DEBUG
621 if (len > USB_BUF_SZ) {
622 err("reg_r: buffer overflow");
623 return;
624 }
625#endif
513 usb_control_msg(gspca_dev->dev, 626 usb_control_msg(gspca_dev->dev,
514 usb_rcvctrlpipe(gspca_dev->dev, 0), 627 usb_rcvctrlpipe(gspca_dev->dev, 0),
515 0, 628 0,
@@ -542,29 +655,20 @@ static void reg_w(struct gspca_dev *gspca_dev,
542{ 655{
543 PDEBUG(D_USBO, "reg_w [%02x] = %02x %02x ..", 656 PDEBUG(D_USBO, "reg_w [%02x] = %02x %02x ..",
544 value, buffer[0], buffer[1]); 657 value, buffer[0], buffer[1]);
545 if (len <= sizeof gspca_dev->usb_buf) { 658#ifdef GSPCA_DEBUG
546 memcpy(gspca_dev->usb_buf, buffer, len); 659 if (len > USB_BUF_SZ) {
547 usb_control_msg(gspca_dev->dev, 660 err("reg_w: buffer overflow");
548 usb_sndctrlpipe(gspca_dev->dev, 0), 661 return;
549 0x08,
550 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
551 value, 0,
552 gspca_dev->usb_buf, len,
553 500);
554 } else {
555 __u8 *tmpbuf;
556
557 tmpbuf = kmalloc(len, GFP_KERNEL);
558 memcpy(tmpbuf, buffer, len);
559 usb_control_msg(gspca_dev->dev,
560 usb_sndctrlpipe(gspca_dev->dev, 0),
561 0x08,
562 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
563 value, 0,
564 tmpbuf, len,
565 500);
566 kfree(tmpbuf);
567 } 662 }
663#endif
664 memcpy(gspca_dev->usb_buf, buffer, len);
665 usb_control_msg(gspca_dev->dev,
666 usb_sndctrlpipe(gspca_dev->dev, 0),
667 0x08,
668 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
669 value, 0,
670 gspca_dev->usb_buf, len,
671 500);
568} 672}
569 673
570/* I2C write 1 byte */ 674/* I2C write 1 byte */
@@ -665,7 +769,7 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
665 static const __u8 regd4[] = {0x60, 0x00, 0x00}; 769 static const __u8 regd4[] = {0x60, 0x00, 0x00};
666 770
667 reg_w1(gspca_dev, 0xf1, 0x00); 771 reg_w1(gspca_dev, 0xf1, 0x00);
668 reg_w1(gspca_dev, 0x01, 0x00); /*jfm was sn9c1xx[1] in v1*/ 772 reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
669 773
670 /* configure gpio */ 774 /* configure gpio */
671 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2); 775 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2);
@@ -685,21 +789,41 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
685 789
686 reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); 790 reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f);
687 791
688 switch (sd->bridge) { 792 switch (sd->sensor) {
689 case BRIDGE_SN9C325: 793 case SENSOR_OM6802:
794 reg_w1(gspca_dev, 0x02, 0x71);
795 reg_w1(gspca_dev, 0x01, 0x42);
796 reg_w1(gspca_dev, 0x17, 0x64);
797 reg_w1(gspca_dev, 0x01, 0x42);
798 break;
799/*jfm: from win trace */
800 case SENSOR_OV7630:
801 reg_w1(gspca_dev, 0x01, 0x61);
802 reg_w1(gspca_dev, 0x17, 0xe2);
803 reg_w1(gspca_dev, 0x01, 0x60);
804 reg_w1(gspca_dev, 0x01, 0x40);
805 break;
806 case SENSOR_OV7648:
690 reg_w1(gspca_dev, 0x01, 0x43); 807 reg_w1(gspca_dev, 0x01, 0x43);
691 reg_w1(gspca_dev, 0x17, 0xae); 808 reg_w1(gspca_dev, 0x17, 0xae);
692 reg_w1(gspca_dev, 0x01, 0x42); 809 reg_w1(gspca_dev, 0x01, 0x42);
693 break; 810 break;
811/*jfm: from win trace */
812 case SENSOR_OV7660:
813 reg_w1(gspca_dev, 0x01, 0x61);
814 reg_w1(gspca_dev, 0x17, 0x20);
815 reg_w1(gspca_dev, 0x01, 0x60);
816 reg_w1(gspca_dev, 0x01, 0x40);
817 break;
694 default: 818 default:
695 reg_w1(gspca_dev, 0x01, 0x43); 819 reg_w1(gspca_dev, 0x01, 0x43);
696 reg_w1(gspca_dev, 0x17, 0x61); 820 reg_w1(gspca_dev, 0x17, 0x61);
697 reg_w1(gspca_dev, 0x01, 0x42); 821 reg_w1(gspca_dev, 0x01, 0x42);
698 } 822 if (sd->sensor == SENSOR_HV7131R) {
699 823 if (probesensor(gspca_dev) < 0)
700 if (sd->sensor == SENSOR_HV7131R) { 824 return -ENODEV;
701 if (probesensor(gspca_dev) < 0) 825 }
702 return -ENODEV; 826 break;
703 } 827 }
704 return 0; 828 return 0;
705} 829}
@@ -737,6 +861,40 @@ static void mo4000_InitSensor(struct gspca_dev *gspca_dev)
737 } 861 }
738} 862}
739 863
864static void om6802_InitSensor(struct gspca_dev *gspca_dev)
865{
866 int i = 0;
867
868 while (om6802_sensor_init[i][0]) {
869 i2c_w8(gspca_dev, om6802_sensor_init[i]);
870 i++;
871 }
872}
873
874static void ov7630_InitSensor(struct gspca_dev *gspca_dev)
875{
876 int i = 0;
877
878 i2c_w8(gspca_dev, ov7630_sensor_init[i]); /* 76 01 */
879 i++;
880 i2c_w8(gspca_dev, ov7630_sensor_init[i]); /* 12 c8 (RGB+SRST) */
881 i++;
882 msleep(20);
883 i2c_w8(gspca_dev, ov7630_sensor_init[i]); /* 12 48 */
884 i++;
885 i2c_w8(gspca_dev, ov7630_sensor_init[i]); /* 12 c8 */
886 i++;
887 msleep(20);
888 i2c_w8(gspca_dev, ov7630_sensor_init[i]); /* 12 48 */
889 i++;
890/*jfm:win i2c_r from 00 to 80*/
891
892 while (ov7630_sensor_init[i][0]) {
893 i2c_w8(gspca_dev, ov7630_sensor_init[i]);
894 i++;
895 }
896}
897
740static void ov7648_InitSensor(struct gspca_dev *gspca_dev) 898static void ov7648_InitSensor(struct gspca_dev *gspca_dev)
741{ 899{
742 int i = 0; 900 int i = 0;
@@ -783,11 +941,19 @@ static int sd_config(struct gspca_dev *gspca_dev,
783 sd->autogain = AUTOGAIN_DEF; 941 sd->autogain = AUTOGAIN_DEF;
784 sd->ag_cnt = -1; 942 sd->ag_cnt = -1;
785 943
944 switch (sd->sensor) {
945 case SENSOR_OV7630:
946 case SENSOR_OV7648:
947 case SENSOR_OV7660:
948 gspca_dev->ctrl_dis = (1 << AUTOGAIN_IDX);
949 break;
950 }
951
786 return 0; 952 return 0;
787} 953}
788 954
789/* this function is called at open time */ 955/* this function is called at probe and resume time */
790static int sd_open(struct gspca_dev *gspca_dev) 956static int sd_init(struct gspca_dev *gspca_dev)
791{ 957{
792 struct sd *sd = (struct sd *) gspca_dev; 958 struct sd *sd = (struct sd *) gspca_dev;
793/* const __u8 *sn9c1xx; */ 959/* const __u8 *sn9c1xx; */
@@ -891,16 +1057,53 @@ static unsigned int setexposure(struct gspca_dev *gspca_dev,
891 | ((expoMo10[3] & 0x30) >> 4)); 1057 | ((expoMo10[3] & 0x30) >> 4));
892 break; 1058 break;
893 } 1059 }
1060 case SENSOR_OM6802: {
1061 __u8 gainOm[] =
1062 { 0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10 };
1063
1064 if (expo > 0x03ff)
1065 expo = 0x03ff;
1066 if (expo < 0x0001)
1067 expo = 0x0001;
1068 gainOm[3] = expo >> 2;
1069 i2c_w8(gspca_dev, gainOm);
1070 reg_w1(gspca_dev, 0x96, (expo >> 5) & 0x1f);
1071 PDEBUG(D_CONF, "set exposure %d", gainOm[3]);
1072 break;
1073 }
894 } 1074 }
895 return expo; 1075 return expo;
896} 1076}
897 1077
1078/* this function is used for sensors o76xx only */
1079static void setbrightcont(struct gspca_dev *gspca_dev)
1080{
1081 struct sd *sd = (struct sd *) gspca_dev;
1082 unsigned val;
1083 __u8 reg84_full[0x15];
1084
1085 memset(reg84_full, 0, sizeof reg84_full);
1086 val = sd->contrast * 0x20 / CONTRAST_MAX + 0x10; /* 10..30 */
1087 reg84_full[2] = val;
1088 reg84_full[0] = (val + 1) / 2;
1089 reg84_full[4] = (val + 1) / 5;
1090 if (val > BRIGHTNESS_DEF)
1091 val = (sd->brightness - BRIGHTNESS_DEF) * 0x20
1092 / BRIGHTNESS_MAX;
1093 else
1094 val = 0;
1095 reg84_full[0x12] = val; /* 00..1f */
1096 reg_w(gspca_dev, 0x84, reg84_full, sizeof reg84_full);
1097}
1098
1099/* sensor != ov76xx */
898static void setbrightness(struct gspca_dev *gspca_dev) 1100static void setbrightness(struct gspca_dev *gspca_dev)
899{ 1101{
900 struct sd *sd = (struct sd *) gspca_dev; 1102 struct sd *sd = (struct sd *) gspca_dev;
901 unsigned int expo; 1103 unsigned int expo;
902 __u8 k2; 1104 __u8 k2;
903 1105
1106 k2 = sd->brightness >> 10;
904 switch (sd->sensor) { 1107 switch (sd->sensor) {
905 case SENSOR_HV7131R: 1108 case SENSOR_HV7131R:
906 expo = sd->brightness << 4; 1109 expo = sd->brightness << 4;
@@ -915,12 +1118,17 @@ static void setbrightness(struct gspca_dev *gspca_dev)
915 expo = sd->brightness >> 4; 1118 expo = sd->brightness >> 4;
916 sd->exposure = setexposure(gspca_dev, expo); 1119 sd->exposure = setexposure(gspca_dev, expo);
917 break; 1120 break;
1121 case SENSOR_OM6802:
1122 expo = sd->brightness >> 6;
1123 sd->exposure = setexposure(gspca_dev, expo);
1124 k2 = sd->brightness >> 11;
1125 break;
918 } 1126 }
919 1127
920 k2 = sd->brightness >> 10;
921 reg_w1(gspca_dev, 0x96, k2); 1128 reg_w1(gspca_dev, 0x96, k2);
922} 1129}
923 1130
1131/* sensor != ov76xx */
924static void setcontrast(struct gspca_dev *gspca_dev) 1132static void setcontrast(struct gspca_dev *gspca_dev)
925{ 1133{
926 struct sd *sd = (struct sd *) gspca_dev; 1134 struct sd *sd = (struct sd *) gspca_dev;
@@ -937,31 +1145,30 @@ static void setcontrast(struct gspca_dev *gspca_dev)
937static void setcolors(struct gspca_dev *gspca_dev) 1145static void setcolors(struct gspca_dev *gspca_dev)
938{ 1146{
939 struct sd *sd = (struct sd *) gspca_dev; 1147 struct sd *sd = (struct sd *) gspca_dev;
940 __u8 data; 1148 __u8 blue, red;
941 int colour;
942 1149
943 colour = sd->colors - 128; 1150 if (sd->colors >= 32) {
944 if (colour > 0) 1151 red = 32 + (sd->colors - 32) / 2;
945 data = (colour + 32) & 0x7f; /* blue */ 1152 blue = 64 - sd->colors;
946 else 1153 } else {
947 data = (-colour + 32) & 0x7f; /* red */ 1154 red = sd->colors;
948 reg_w1(gspca_dev, 0x05, data); 1155 blue = 32 + (32 - sd->colors) / 2;
1156 }
1157 reg_w1(gspca_dev, 0x05, red);
1158/* reg_w1(gspca_dev, 0x07, 32); */
1159 reg_w1(gspca_dev, 0x06, blue);
949} 1160}
950 1161
951static void setautogain(struct gspca_dev *gspca_dev) 1162static void setautogain(struct gspca_dev *gspca_dev)
952{ 1163{
953 struct sd *sd = (struct sd *) gspca_dev; 1164 struct sd *sd = (struct sd *) gspca_dev;
954 1165
955 switch (sd->sensor) { 1166 if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
956 case SENSOR_HV7131R: 1167 return;
957 case SENSOR_MO4000: 1168 if (sd->autogain)
958 case SENSOR_MI0360: 1169 sd->ag_cnt = AG_CNT_START;
959 if (sd->autogain) 1170 else
960 sd->ag_cnt = AG_CNT_START; 1171 sd->ag_cnt = -1;
961 else
962 sd->ag_cnt = -1;
963 break;
964 }
965} 1172}
966 1173
967/* -- start the camera -- */ 1174/* -- start the camera -- */
@@ -975,13 +1182,12 @@ static void sd_start(struct gspca_dev *gspca_dev)
975 static const __u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; 1182 static const __u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f };
976 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; 1183 static const __u8 CA[] = { 0x28, 0xd8, 0x14, 0xec };
977 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ 1184 static const __u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */
978 static const __u8 CE_sn9c325[] = 1185 static const __u8 CE_ov76xx[] =
979 { 0x32, 0xdd, 0x32, 0xdd }; /* OV7648 - SN9C325 */ 1186 { 0x32, 0xdd, 0x32, 0xdd }; /* OV7630/48 */
980 1187
981 sn9c1xx = sn_tb[(int) sd->sensor]; 1188 sn9c1xx = sn_tb[(int) sd->sensor];
982 configure_gpio(gspca_dev, sn9c1xx); 1189 configure_gpio(gspca_dev, sn9c1xx);
983 1190
984/* reg_w1(gspca_dev, 0x01, 0x44); jfm from win trace*/
985 reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]); 1191 reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]);
986 reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]); 1192 reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]);
987 reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]); 1193 reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]);
@@ -994,10 +1200,17 @@ static void sd_start(struct gspca_dev *gspca_dev)
994 reg_w1(gspca_dev, 0xc8, 0x50); 1200 reg_w1(gspca_dev, 0xc8, 0x50);
995 reg_w1(gspca_dev, 0xc9, 0x3c); 1201 reg_w1(gspca_dev, 0xc9, 0x3c);
996 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); 1202 reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]);
997 switch (sd->bridge) { 1203 switch (sd->sensor) {
998 case BRIDGE_SN9C325: 1204 case SENSOR_OV7630:
1205 reg17 = 0xe2;
1206 break;
1207 case SENSOR_OV7648:
999 reg17 = 0xae; 1208 reg17 = 0xae;
1000 break; 1209 break;
1210/*jfm: from win trace */
1211 case SENSOR_OV7660:
1212 reg17 = 0xa0;
1213 break;
1001 default: 1214 default:
1002 reg17 = 0x60; 1215 reg17 = 0x60;
1003 break; 1216 break;
@@ -1007,24 +1220,11 @@ static void sd_start(struct gspca_dev *gspca_dev)
1007 reg_w1(gspca_dev, 0x07, sn9c1xx[7]); 1220 reg_w1(gspca_dev, 0x07, sn9c1xx[7]);
1008 reg_w1(gspca_dev, 0x06, sn9c1xx[6]); 1221 reg_w1(gspca_dev, 0x06, sn9c1xx[6]);
1009 reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]); 1222 reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]);
1010 switch (sd->bridge) { 1223 reg_w(gspca_dev, 0x20, gamma_def, sizeof gamma_def);
1011 case BRIDGE_SN9C325: 1224 for (i = 0; i < 8; i++)
1012 reg_w(gspca_dev, 0x20, regsn20_sn9c325, 1225 reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
1013 sizeof regsn20_sn9c325);
1014 for (i = 0; i < 8; i++)
1015 reg_w(gspca_dev, 0x84, reg84_sn9c325,
1016 sizeof reg84_sn9c325);
1017 reg_w1(gspca_dev, 0x9a, 0x0a);
1018 reg_w1(gspca_dev, 0x99, 0x60);
1019 break;
1020 default:
1021 reg_w(gspca_dev, 0x20, regsn20, sizeof regsn20);
1022 for (i = 0; i < 8; i++)
1023 reg_w(gspca_dev, 0x84, reg84, sizeof reg84);
1024 reg_w1(gspca_dev, 0x9a, 0x08); 1226 reg_w1(gspca_dev, 0x9a, 0x08);
1025 reg_w1(gspca_dev, 0x99, 0x59); 1227 reg_w1(gspca_dev, 0x99, 0x59);
1026 break;
1027 }
1028 1228
1029 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 1229 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
1030 if (mode) 1230 if (mode)
@@ -1049,6 +1249,15 @@ static void sd_start(struct gspca_dev *gspca_dev)
1049/* reg1 = 0x06; * 640 clk 24Mz (done) */ 1249/* reg1 = 0x06; * 640 clk 24Mz (done) */
1050 } 1250 }
1051 break; 1251 break;
1252 case SENSOR_OM6802:
1253 om6802_InitSensor(gspca_dev);
1254 reg17 = 0x64; /* 640 MCKSIZE */
1255 break;
1256 case SENSOR_OV7630:
1257 ov7630_InitSensor(gspca_dev);
1258 reg17 = 0xe2;
1259 reg1 = 0x44;
1260 break;
1052 case SENSOR_OV7648: 1261 case SENSOR_OV7648:
1053 ov7648_InitSensor(gspca_dev); 1262 ov7648_InitSensor(gspca_dev);
1054 reg17 = 0xa2; 1263 reg17 = 0xa2;
@@ -1073,9 +1282,10 @@ static void sd_start(struct gspca_dev *gspca_dev)
1073 } 1282 }
1074 reg_w(gspca_dev, 0xc0, C0, 6); 1283 reg_w(gspca_dev, 0xc0, C0, 6);
1075 reg_w(gspca_dev, 0xca, CA, 4); 1284 reg_w(gspca_dev, 0xca, CA, 4);
1076 switch (sd->bridge) { 1285 switch (sd->sensor) {
1077 case BRIDGE_SN9C325: 1286 case SENSOR_OV7630:
1078 reg_w(gspca_dev, 0xce, CE_sn9c325, 4); 1287 case SENSOR_OV7648:
1288 reg_w(gspca_dev, 0xce, CE_ov76xx, 4);
1079 break; 1289 break;
1080 default: 1290 default:
1081 reg_w(gspca_dev, 0xce, CE, 4); 1291 reg_w(gspca_dev, 0xce, CE, 4);
@@ -1093,10 +1303,20 @@ static void sd_start(struct gspca_dev *gspca_dev)
1093 reg_w1(gspca_dev, 0x18, reg18); 1303 reg_w1(gspca_dev, 0x18, reg18);
1094 1304
1095 reg_w1(gspca_dev, 0x17, reg17); 1305 reg_w1(gspca_dev, 0x17, reg17);
1096 reg_w1(gspca_dev, 0x01, reg1); 1306 switch (sd->sensor) {
1097 setbrightness(gspca_dev); 1307 case SENSOR_HV7131R:
1098 setcontrast(gspca_dev); 1308 case SENSOR_MI0360:
1309 case SENSOR_MO4000:
1310 case SENSOR_OM6802:
1311 setbrightness(gspca_dev);
1312 setcontrast(gspca_dev);
1313 break;
1314 default: /* OV76xx */
1315 setbrightcont(gspca_dev);
1316 break;
1317 }
1099 setautogain(gspca_dev); 1318 setautogain(gspca_dev);
1319 reg_w1(gspca_dev, 0x01, reg1);
1100} 1320}
1101 1321
1102static void sd_stopN(struct gspca_dev *gspca_dev) 1322static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1119,6 +1339,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1119 i2c_w8(gspca_dev, stopmi0360); 1339 i2c_w8(gspca_dev, stopmi0360);
1120 data = 0x29; 1340 data = 0x29;
1121 break; 1341 break;
1342 case SENSOR_OV7630:
1122 case SENSOR_OV7648: 1343 case SENSOR_OV7648:
1123 data = 0x29; 1344 data = 0x29;
1124 break; 1345 break;
@@ -1132,15 +1353,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1132 reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]); 1353 reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]);
1133 reg_w1(gspca_dev, 0x01, sn9c1xx[1]); 1354 reg_w1(gspca_dev, 0x01, sn9c1xx[1]);
1134 reg_w1(gspca_dev, 0x01, data); 1355 reg_w1(gspca_dev, 0x01, data);
1135 reg_w1(gspca_dev, 0xf1, 0x01); 1356 reg_w1(gspca_dev, 0xf1, 0x00);
1136}
1137
1138static void sd_stop0(struct gspca_dev *gspca_dev)
1139{
1140}
1141
1142static void sd_close(struct gspca_dev *gspca_dev)
1143{
1144} 1357}
1145 1358
1146static void do_autogain(struct gspca_dev *gspca_dev) 1359static void do_autogain(struct gspca_dev *gspca_dev)
@@ -1174,6 +1387,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
1174 default: 1387 default:
1175/* case SENSOR_MO4000: */ 1388/* case SENSOR_MO4000: */
1176/* case SENSOR_MI0360: */ 1389/* case SENSOR_MI0360: */
1390/* case SENSOR_OM6802: */
1177 expotimes = sd->exposure; 1391 expotimes = sd->exposure;
1178 expotimes += (luma_mean - delta) >> 6; 1392 expotimes += (luma_mean - delta) >> 6;
1179 if (expotimes < 0) 1393 if (expotimes < 0)
@@ -1229,69 +1443,24 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1229 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 1443 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
1230} 1444}
1231 1445
1232static unsigned int getexposure(struct gspca_dev *gspca_dev)
1233{
1234 struct sd *sd = (struct sd *) gspca_dev;
1235 __u8 hexpo, mexpo, lexpo;
1236
1237 switch (sd->sensor) {
1238 case SENSOR_HV7131R:
1239 /* read sensor exposure */
1240 i2c_r5(gspca_dev, 0x25);
1241 return (gspca_dev->usb_buf[0] << 16)
1242 | (gspca_dev->usb_buf[1] << 8)
1243 | gspca_dev->usb_buf[2];
1244 case SENSOR_MI0360:
1245 /* read sensor exposure */
1246 i2c_r5(gspca_dev, 0x09);
1247 return (gspca_dev->usb_buf[0] << 8)
1248 | gspca_dev->usb_buf[1];
1249 case SENSOR_MO4000:
1250 i2c_r5(gspca_dev, 0x0e);
1251 hexpo = 0; /* gspca_dev->usb_buf[1] & 0x07; */
1252 mexpo = 0x40; /* gspca_dev->usb_buf[2] & 0xff; */
1253 lexpo = (gspca_dev->usb_buf[1] & 0x30) >> 4;
1254 PDEBUG(D_CONF, "exposure %d",
1255 (hexpo << 10) | (mexpo << 2) | lexpo);
1256 return (hexpo << 10) | (mexpo << 2) | lexpo;
1257 default:
1258/* case SENSOR_OV7648: * jfm: is it ok for 7648? */
1259/* case SENSOR_OV7660: */
1260 /* read sensor exposure */
1261 i2c_r5(gspca_dev, 0x04);
1262 hexpo = gspca_dev->usb_buf[3] & 0x2f;
1263 lexpo = gspca_dev->usb_buf[0] & 0x02;
1264 i2c_r5(gspca_dev, 0x08);
1265 mexpo = gspca_dev->usb_buf[2];
1266 return (hexpo << 10) | (mexpo << 2) | lexpo;
1267 }
1268}
1269
1270static void getbrightness(struct gspca_dev *gspca_dev)
1271{
1272 struct sd *sd = (struct sd *) gspca_dev;
1273
1274 /* hardcoded registers seem not readable */
1275 switch (sd->sensor) {
1276 case SENSOR_HV7131R:
1277 sd->brightness = getexposure(gspca_dev) >> 4;
1278 break;
1279 case SENSOR_MI0360:
1280 sd->brightness = getexposure(gspca_dev) << 4;
1281 break;
1282 case SENSOR_MO4000:
1283 sd->brightness = getexposure(gspca_dev) << 4;
1284 break;
1285 }
1286}
1287
1288static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 1446static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
1289{ 1447{
1290 struct sd *sd = (struct sd *) gspca_dev; 1448 struct sd *sd = (struct sd *) gspca_dev;
1291 1449
1292 sd->brightness = val; 1450 sd->brightness = val;
1293 if (gspca_dev->streaming) 1451 if (gspca_dev->streaming) {
1294 setbrightness(gspca_dev); 1452 switch (sd->sensor) {
1453 case SENSOR_HV7131R:
1454 case SENSOR_MI0360:
1455 case SENSOR_MO4000:
1456 case SENSOR_OM6802:
1457 setbrightness(gspca_dev);
1458 break;
1459 default: /* OV76xx */
1460 setbrightcont(gspca_dev);
1461 break;
1462 }
1463 }
1295 return 0; 1464 return 0;
1296} 1465}
1297 1466
@@ -1299,7 +1468,6 @@ static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
1299{ 1468{
1300 struct sd *sd = (struct sd *) gspca_dev; 1469 struct sd *sd = (struct sd *) gspca_dev;
1301 1470
1302 getbrightness(gspca_dev);
1303 *val = sd->brightness; 1471 *val = sd->brightness;
1304 return 0; 1472 return 0;
1305} 1473}
@@ -1309,8 +1477,19 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
1309 struct sd *sd = (struct sd *) gspca_dev; 1477 struct sd *sd = (struct sd *) gspca_dev;
1310 1478
1311 sd->contrast = val; 1479 sd->contrast = val;
1312 if (gspca_dev->streaming) 1480 if (gspca_dev->streaming) {
1313 setcontrast(gspca_dev); 1481 switch (sd->sensor) {
1482 case SENSOR_HV7131R:
1483 case SENSOR_MI0360:
1484 case SENSOR_MO4000:
1485 case SENSOR_OM6802:
1486 setcontrast(gspca_dev);
1487 break;
1488 default: /* OV76xx */
1489 setbrightcont(gspca_dev);
1490 break;
1491 }
1492 }
1314 return 0; 1493 return 0;
1315} 1494}
1316 1495
@@ -1364,11 +1543,9 @@ static const struct sd_desc sd_desc = {
1364 .ctrls = sd_ctrls, 1543 .ctrls = sd_ctrls,
1365 .nctrls = ARRAY_SIZE(sd_ctrls), 1544 .nctrls = ARRAY_SIZE(sd_ctrls),
1366 .config = sd_config, 1545 .config = sd_config,
1367 .open = sd_open, 1546 .init = sd_init,
1368 .start = sd_start, 1547 .start = sd_start,
1369 .stopN = sd_stopN, 1548 .stopN = sd_stopN,
1370 .stop0 = sd_stop0,
1371 .close = sd_close,
1372 .pkt_scan = sd_pkt_scan, 1549 .pkt_scan = sd_pkt_scan,
1373 .dq_callback = do_autogain, 1550 .dq_callback = do_autogain,
1374}; 1551};
@@ -1379,7 +1556,7 @@ static const struct sd_desc sd_desc = {
1379 | (SENSOR_ ## sensor << 8) \ 1556 | (SENSOR_ ## sensor << 8) \
1380 | (i2c_addr) 1557 | (i2c_addr)
1381static const __devinitdata struct usb_device_id device_table[] = { 1558static const __devinitdata struct usb_device_id device_table[] = {
1382#ifndef CONFIG_USB_SN9C102 1559#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1383 {USB_DEVICE(0x0458, 0x7025), BSI(SN9C120, MI0360, 0x5d)}, 1560 {USB_DEVICE(0x0458, 0x7025), BSI(SN9C120, MI0360, 0x5d)},
1384 {USB_DEVICE(0x045e, 0x00f5), BSI(SN9C105, OV7660, 0x21)}, 1561 {USB_DEVICE(0x045e, 0x00f5), BSI(SN9C105, OV7660, 0x21)},
1385 {USB_DEVICE(0x045e, 0x00f7), BSI(SN9C105, OV7660, 0x21)}, 1562 {USB_DEVICE(0x045e, 0x00f7), BSI(SN9C105, OV7660, 0x21)},
@@ -1406,15 +1583,17 @@ static const __devinitdata struct usb_device_id device_table[] = {
1406/* {USB_DEVICE(0x0c45, 0x6108), BSI(SN9C120, OM6801, 0x??)}, */ 1583/* {USB_DEVICE(0x0c45, 0x6108), BSI(SN9C120, OM6801, 0x??)}, */
1407/* {USB_DEVICE(0x0c45, 0x6122), BSI(SN9C110, ICM105C, 0x??)}, */ 1584/* {USB_DEVICE(0x0c45, 0x6122), BSI(SN9C110, ICM105C, 0x??)}, */
1408/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */ 1585/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */
1409 {USB_DEVICE(0x0c45, 0x612a), BSI(SN9C325, OV7648, 0x21)}, 1586 {USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/
1410/* bw600.inf: 1587/*bw600.inf:*/
1411 {USB_DEVICE(0x0c45, 0x612a), BSI(SN9C110, OV7648, 0x21)}, */ 1588 {USB_DEVICE(0x0c45, 0x612a), BSI(SN9C110, OV7648, 0x21)}, /*sn9c325?*/
1412 {USB_DEVICE(0x0c45, 0x612c), BSI(SN9C110, MO4000, 0x21)}, 1589 {USB_DEVICE(0x0c45, 0x612c), BSI(SN9C110, MO4000, 0x21)},
1413/* {USB_DEVICE(0x0c45, 0x612e), BSI(SN9C110, OV7630, 0x??)}, */ 1590 {USB_DEVICE(0x0c45, 0x612e), BSI(SN9C110, OV7630, 0x21)},
1414/* {USB_DEVICE(0x0c45, 0x612f), BSI(SN9C110, ICM105C, 0x??)}, */ 1591/* {USB_DEVICE(0x0c45, 0x612f), BSI(SN9C110, ICM105C, 0x??)}, */
1415#ifndef CONFIG_USB_SN9C102 1592#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1416 {USB_DEVICE(0x0c45, 0x6130), BSI(SN9C120, MI0360, 0x5d)}, 1593 {USB_DEVICE(0x0c45, 0x6130), BSI(SN9C120, MI0360, 0x5d)},
1594#endif
1417 {USB_DEVICE(0x0c45, 0x6138), BSI(SN9C120, MO4000, 0x21)}, 1595 {USB_DEVICE(0x0c45, 0x6138), BSI(SN9C120, MO4000, 0x21)},
1596#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
1418/* {USB_DEVICE(0x0c45, 0x613a), BSI(SN9C120, OV7648, 0x??)}, */ 1597/* {USB_DEVICE(0x0c45, 0x613a), BSI(SN9C120, OV7648, 0x??)}, */
1419 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)}, 1598 {USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
1420 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)}, 1599 {USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
@@ -1438,6 +1617,10 @@ static struct usb_driver sd_driver = {
1438 .id_table = device_table, 1617 .id_table = device_table,
1439 .probe = sd_probe, 1618 .probe = sd_probe,
1440 .disconnect = gspca_disconnect, 1619 .disconnect = gspca_disconnect,
1620#ifdef CONFIG_PM
1621 .suspend = gspca_suspend,
1622 .resume = gspca_resume,
1623#endif
1441}; 1624};
1442 1625
1443/* -- module insert / remove -- */ 1626/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 17fe2c2a440d..6e733901fcca 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -645,8 +645,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
645 return 0; 645 return 0;
646} 646}
647 647
648/* this function is called at open time */ 648/* this function is called at probe and resume time */
649static int sd_open(struct gspca_dev *gspca_dev) 649static int sd_init(struct gspca_dev *gspca_dev)
650{ 650{
651 struct sd *sd = (struct sd *) gspca_dev; 651 struct sd *sd = (struct sd *) gspca_dev;
652 652
@@ -880,14 +880,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
880 gspca_dev->usb_buf[0]); 880 gspca_dev->usb_buf[0]);
881} 881}
882 882
883static void sd_stop0(struct gspca_dev *gspca_dev)
884{
885}
886
887static void sd_close(struct gspca_dev *gspca_dev)
888{
889}
890
891static void sd_pkt_scan(struct gspca_dev *gspca_dev, 883static void sd_pkt_scan(struct gspca_dev *gspca_dev,
892 struct gspca_frame *frame, /* target */ 884 struct gspca_frame *frame, /* target */
893 __u8 *data, /* isoc packet */ 885 __u8 *data, /* isoc packet */
@@ -1051,11 +1043,9 @@ static struct sd_desc sd_desc = {
1051 .ctrls = sd_ctrls, 1043 .ctrls = sd_ctrls,
1052 .nctrls = ARRAY_SIZE(sd_ctrls), 1044 .nctrls = ARRAY_SIZE(sd_ctrls),
1053 .config = sd_config, 1045 .config = sd_config,
1054 .open = sd_open, 1046 .init = sd_init,
1055 .start = sd_start, 1047 .start = sd_start,
1056 .stopN = sd_stopN, 1048 .stopN = sd_stopN,
1057 .stop0 = sd_stop0,
1058 .close = sd_close,
1059 .pkt_scan = sd_pkt_scan, 1049 .pkt_scan = sd_pkt_scan,
1060}; 1050};
1061 1051
@@ -1093,6 +1083,10 @@ static struct usb_driver sd_driver = {
1093 .id_table = device_table, 1083 .id_table = device_table,
1094 .probe = sd_probe, 1084 .probe = sd_probe,
1095 .disconnect = gspca_disconnect, 1085 .disconnect = gspca_disconnect,
1086#ifdef CONFIG_PM
1087 .suspend = gspca_suspend,
1088 .resume = gspca_resume,
1089#endif
1096}; 1090};
1097 1091
1098/* -- module insert / remove -- */ 1092/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 51a3c3429ef0..e9eb59bae4fb 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -1953,8 +1953,8 @@ error:
1953 return -EINVAL; 1953 return -EINVAL;
1954} 1954}
1955 1955
1956/* this function is called at open time */ 1956/* this function is called at probe and resume time */
1957static int sd_open(struct gspca_dev *gspca_dev) 1957static int sd_init(struct gspca_dev *gspca_dev)
1958{ 1958{
1959 struct sd *sd = (struct sd *) gspca_dev; 1959 struct sd *sd = (struct sd *) gspca_dev;
1960 1960
@@ -2023,11 +2023,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
2023 2023
2024static void sd_stop0(struct gspca_dev *gspca_dev) 2024static void sd_stop0(struct gspca_dev *gspca_dev)
2025{ 2025{
2026}
2027
2028/* this function is called at close time */
2029static void sd_close(struct gspca_dev *gspca_dev)
2030{
2031 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x05, 0x00); 2026 reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x05, 0x00);
2032} 2027}
2033 2028
@@ -2120,11 +2115,10 @@ static const struct sd_desc sd_desc = {
2120 .ctrls = sd_ctrls, 2115 .ctrls = sd_ctrls,
2121 .nctrls = ARRAY_SIZE(sd_ctrls), 2116 .nctrls = ARRAY_SIZE(sd_ctrls),
2122 .config = sd_config, 2117 .config = sd_config,
2123 .open = sd_open, 2118 .init = sd_init,
2124 .start = sd_start, 2119 .start = sd_start,
2125 .stopN = sd_stopN, 2120 .stopN = sd_stopN,
2126 .stop0 = sd_stop0, 2121 .stop0 = sd_stop0,
2127 .close = sd_close,
2128 .pkt_scan = sd_pkt_scan, 2122 .pkt_scan = sd_pkt_scan,
2129}; 2123};
2130 2124
@@ -2154,6 +2148,10 @@ static struct usb_driver sd_driver = {
2154 .id_table = device_table, 2148 .id_table = device_table,
2155 .probe = sd_probe, 2149 .probe = sd_probe,
2156 .disconnect = gspca_disconnect, 2150 .disconnect = gspca_disconnect,
2151#ifdef CONFIG_PM
2152 .suspend = gspca_suspend,
2153 .resume = gspca_resume,
2154#endif
2157}; 2155};
2158 2156
2159/* -- module insert / remove -- */ 2157/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index eda29d609359..f601daf19ebe 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -655,8 +655,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
655 return 0; 655 return 0;
656} 656}
657 657
658/* this function is called at open time */ 658/* this function is called at probe and resume time */
659static int sd_open(struct gspca_dev *gspca_dev) 659static int sd_init(struct gspca_dev *gspca_dev)
660{ 660{
661 struct sd *sd = (struct sd *) gspca_dev; 661 struct sd *sd = (struct sd *) gspca_dev;
662 int ret; 662 int ret;
@@ -743,11 +743,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
743 743
744static void sd_stop0(struct gspca_dev *gspca_dev) 744static void sd_stop0(struct gspca_dev *gspca_dev)
745{ 745{
746}
747
748/* this function is called at close time */
749static void sd_close(struct gspca_dev *gspca_dev)
750{
751 /* This maybe reset or power control */ 746 /* This maybe reset or power control */
752 reg_write(gspca_dev->dev, 0x03, 0x03, 0x20); 747 reg_write(gspca_dev->dev, 0x03, 0x03, 0x20);
753 reg_write(gspca_dev->dev, 0x03, 0x01, 0x0); 748 reg_write(gspca_dev->dev, 0x03, 0x01, 0x0);
@@ -825,11 +820,10 @@ static const struct sd_desc sd_desc = {
825 .ctrls = sd_ctrls, 820 .ctrls = sd_ctrls,
826 .nctrls = ARRAY_SIZE(sd_ctrls), 821 .nctrls = ARRAY_SIZE(sd_ctrls),
827 .config = sd_config, 822 .config = sd_config,
828 .open = sd_open, 823 .init = sd_init,
829 .start = sd_start, 824 .start = sd_start,
830 .stopN = sd_stopN, 825 .stopN = sd_stopN,
831 .stop0 = sd_stop0, 826 .stop0 = sd_stop0,
832 .close = sd_close,
833 .pkt_scan = sd_pkt_scan, 827 .pkt_scan = sd_pkt_scan,
834}; 828};
835 829
@@ -855,6 +849,10 @@ static struct usb_driver sd_driver = {
855 .id_table = device_table, 849 .id_table = device_table,
856 .probe = sd_probe, 850 .probe = sd_probe,
857 .disconnect = gspca_disconnect, 851 .disconnect = gspca_disconnect,
852#ifdef CONFIG_PM
853 .suspend = gspca_suspend,
854 .resume = gspca_resume,
855#endif
858}; 856};
859 857
860/* -- module insert / remove -- */ 858/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index f622fa75766d..195dce96ef06 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -313,8 +313,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
313 return 0; 313 return 0;
314} 314}
315 315
316/* this function is called at open time */ 316/* this function is called at probe and resume time */
317static int sd_open(struct gspca_dev *gspca_dev) 317static int sd_init(struct gspca_dev *gspca_dev)
318{ 318{
319 struct usb_device *dev = gspca_dev->dev; 319 struct usb_device *dev = gspca_dev->dev;
320 320
@@ -560,14 +560,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
560 reg_w(dev, 0x03, 0x00, 0x0003); 560 reg_w(dev, 0x03, 0x00, 0x0003);
561} 561}
562 562
563static void sd_stop0(struct gspca_dev *gspca_dev)
564{
565}
566
567static void sd_close(struct gspca_dev *gspca_dev)
568{
569}
570
571static void sd_pkt_scan(struct gspca_dev *gspca_dev, 563static void sd_pkt_scan(struct gspca_dev *gspca_dev,
572 struct gspca_frame *frame, /* target */ 564 struct gspca_frame *frame, /* target */
573 __u8 *data, /* isoc packet */ 565 __u8 *data, /* isoc packet */
@@ -740,11 +732,9 @@ static struct sd_desc sd_desc = {
740 .ctrls = sd_ctrls, 732 .ctrls = sd_ctrls,
741 .nctrls = ARRAY_SIZE(sd_ctrls), 733 .nctrls = ARRAY_SIZE(sd_ctrls),
742 .config = sd_config, 734 .config = sd_config,
743 .open = sd_open, 735 .init = sd_init,
744 .start = sd_start, 736 .start = sd_start,
745 .stopN = sd_stopN, 737 .stopN = sd_stopN,
746 .stop0 = sd_stop0,
747 .close = sd_close,
748 .pkt_scan = sd_pkt_scan, 738 .pkt_scan = sd_pkt_scan,
749}; 739};
750 740
@@ -772,6 +762,10 @@ static struct usb_driver sd_driver = {
772 .id_table = device_table, 762 .id_table = device_table,
773 .probe = sd_probe, 763 .probe = sd_probe,
774 .disconnect = gspca_disconnect, 764 .disconnect = gspca_disconnect,
765#ifdef CONFIG_PM
766 .suspend = gspca_suspend,
767 .resume = gspca_resume,
768#endif
775}; 769};
776 770
777/* -- module insert / remove -- */ 771/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 699340c17dea..281ce02103a3 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1521,8 +1521,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
1521 return 0; /* success */ 1521 return 0; /* success */
1522} 1522}
1523 1523
1524/* this function is called at open time */ 1524/* this function is called at probe and resume time */
1525static int sd_open(struct gspca_dev *gspca_dev) 1525static int sd_init(struct gspca_dev *gspca_dev)
1526{ 1526{
1527/* write_vector(gspca_dev, spca508_open_data); */ 1527/* write_vector(gspca_dev, spca508_open_data); */
1528 return 0; 1528 return 0;
@@ -1554,15 +1554,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1554 reg_write(gspca_dev->dev, 0x8112, 0x20); 1554 reg_write(gspca_dev->dev, 0x8112, 0x20);
1555} 1555}
1556 1556
1557static void sd_stop0(struct gspca_dev *gspca_dev)
1558{
1559}
1560
1561/* this function is called at close time */
1562static void sd_close(struct gspca_dev *gspca_dev)
1563{
1564}
1565
1566static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1557static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1567 struct gspca_frame *frame, /* target */ 1558 struct gspca_frame *frame, /* target */
1568 __u8 *data, /* isoc packet */ 1559 __u8 *data, /* isoc packet */
@@ -1633,11 +1624,9 @@ static const struct sd_desc sd_desc = {
1633 .ctrls = sd_ctrls, 1624 .ctrls = sd_ctrls,
1634 .nctrls = ARRAY_SIZE(sd_ctrls), 1625 .nctrls = ARRAY_SIZE(sd_ctrls),
1635 .config = sd_config, 1626 .config = sd_config,
1636 .open = sd_open, 1627 .init = sd_init,
1637 .start = sd_start, 1628 .start = sd_start,
1638 .stopN = sd_stopN, 1629 .stopN = sd_stopN,
1639 .stop0 = sd_stop0,
1640 .close = sd_close,
1641 .pkt_scan = sd_pkt_scan, 1630 .pkt_scan = sd_pkt_scan,
1642}; 1631};
1643 1632
@@ -1667,6 +1656,10 @@ static struct usb_driver sd_driver = {
1667 .id_table = device_table, 1656 .id_table = device_table,
1668 .probe = sd_probe, 1657 .probe = sd_probe,
1669 .disconnect = gspca_disconnect, 1658 .disconnect = gspca_disconnect,
1659#ifdef CONFIG_PM
1660 .suspend = gspca_suspend,
1661 .resume = gspca_resume,
1662#endif
1670}; 1663};
1671 1664
1672/* -- module insert / remove -- */ 1665/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 1073ac3d2ec6..cfbc9ebc5c5d 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -32,69 +32,48 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 unsigned short contrast; 35 __u16 contrast; /* rev72a only */
36 __u8 brightness; 36#define CONTRAST_MIN 0x0000
37#define CONTRAST_DEF 0x2000
38#define CONTRAST_MAX 0x3fff
39
40 __u16 exposure; /* rev12a only */
41#define EXPOSURE_MIN 1
42#define EXPOSURE_DEF 200
43#define EXPOSURE_MAX (4095 - 900) /* see set_exposure */
44
45 __u8 brightness; /* rev72a only */
46#define BRIGHTNESS_MIN 0
47#define BRIGHTNESS_DEF 32
48#define BRIGHTNESS_MAX 63
49
50 __u8 white; /* rev12a only */
51#define WHITE_MIN 1
52#define WHITE_DEF 0x40
53#define WHITE_MAX 0x7f
54
37 __u8 autogain; 55 __u8 autogain;
56#define AUTOGAIN_MIN 0
57#define AUTOGAIN_DEF 1
58#define AUTOGAIN_MAX 1
59
60 __u8 gain; /* rev12a only */
61#define GAIN_MIN 0x0
62#define GAIN_DEF 0x24
63#define GAIN_MAX 0x24
64
65#define EXPO12A_DEF 3
66 __u8 expo12a; /* expo/gain? for rev 12a */
38 67
39 __u8 chip_revision; 68 __u8 chip_revision;
69#define Rev012A 0
70#define Rev072A 1
71
40 signed char ag_cnt; 72 signed char ag_cnt;
41#define AG_CNT_START 13 73#define AG_CNT_START 13
42}; 74};
43 75
44/* V4L2 controls supported by the driver */ 76static struct v4l2_pix_format sif_012a_mode[] = {
45static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
46static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
47static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
48static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
49static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
50static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
51
52static struct ctrl sd_ctrls[] = {
53#define SD_BRIGHTNESS 0
54 {
55 {
56 .id = V4L2_CID_BRIGHTNESS,
57 .type = V4L2_CTRL_TYPE_INTEGER,
58 .name = "Brightness",
59 .minimum = 0,
60 .maximum = 63,
61 .step = 1,
62 .default_value = 32,
63 },
64 .set = sd_setbrightness,
65 .get = sd_getbrightness,
66 },
67#define SD_CONTRAST 1
68 {
69 {
70 .id = V4L2_CID_CONTRAST,
71 .type = V4L2_CTRL_TYPE_INTEGER,
72 .name = "Contrast",
73 .minimum = 0,
74 .maximum = 0x3fff,
75 .step = 1,
76 .default_value = 0x2000,
77 },
78 .set = sd_setcontrast,
79 .get = sd_getcontrast,
80 },
81#define SD_AUTOGAIN 2
82 {
83 {
84 .id = V4L2_CID_AUTOGAIN,
85 .type = V4L2_CTRL_TYPE_BOOLEAN,
86 .name = "Auto Gain",
87 .minimum = 0,
88 .maximum = 1,
89 .step = 1,
90 .default_value = 1,
91 },
92 .set = sd_setautogain,
93 .get = sd_getautogain,
94 },
95};
96
97static struct v4l2_pix_format sif_mode[] = {
98 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, 77 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
99 .bytesperline = 160, 78 .bytesperline = 160,
100 .sizeimage = 160 * 120, 79 .sizeimage = 160 * 120,
@@ -117,6 +96,29 @@ static struct v4l2_pix_format sif_mode[] = {
117 .priv = 0}, 96 .priv = 0},
118}; 97};
119 98
99static struct v4l2_pix_format sif_072a_mode[] = {
100 {160, 120, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
101 .bytesperline = 160,
102 .sizeimage = 160 * 120,
103 .colorspace = V4L2_COLORSPACE_SRGB,
104 .priv = 3},
105 {176, 144, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
106 .bytesperline = 176,
107 .sizeimage = 176 * 144,
108 .colorspace = V4L2_COLORSPACE_SRGB,
109 .priv = 2},
110 {320, 240, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
111 .bytesperline = 320,
112 .sizeimage = 320 * 240,
113 .colorspace = V4L2_COLORSPACE_SRGB,
114 .priv = 1},
115 {352, 288, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE,
116 .bytesperline = 352,
117 .sizeimage = 352 * 288,
118 .colorspace = V4L2_COLORSPACE_SRGB,
119 .priv = 0},
120};
121
120/* 122/*
121 * Initialization data 123 * Initialization data
122 * I'm not very sure how to split initialization from open data 124 * I'm not very sure how to split initialization from open data
@@ -143,12 +145,8 @@ static struct v4l2_pix_format sif_mode[] = {
143#define SPCA561_INDEX_I2C_BASE 0x8800 145#define SPCA561_INDEX_I2C_BASE 0x8800
144#define SPCA561_SNAPBIT 0x20 146#define SPCA561_SNAPBIT 0x20
145#define SPCA561_SNAPCTRL 0x40 147#define SPCA561_SNAPCTRL 0x40
146enum {
147 Rev072A = 0,
148 Rev012A,
149};
150 148
151static void reg_w_val(struct usb_device *dev, __u16 index, __u16 value) 149static void reg_w_val(struct usb_device *dev, __u16 index, __u8 value)
152{ 150{
153 int ret; 151 int ret;
154 152
@@ -198,12 +196,6 @@ static void reg_w_buf(struct gspca_dev *gspca_dev,
198 index, gspca_dev->usb_buf, len, 500); 196 index, gspca_dev->usb_buf, len, 500);
199} 197}
200 198
201static void i2c_init(struct gspca_dev *gspca_dev, __u8 mode)
202{
203 reg_w_val(gspca_dev->dev, 0x92, 0x8804);
204 reg_w_val(gspca_dev->dev, mode, 0x8802);
205}
206
207static void i2c_write(struct gspca_dev *gspca_dev, __u16 valeur, __u16 reg) 199static void i2c_write(struct gspca_dev *gspca_dev, __u16 valeur, __u16 reg)
208{ 200{
209 int retry = 60; 201 int retry = 60;
@@ -212,9 +204,9 @@ static void i2c_write(struct gspca_dev *gspca_dev, __u16 valeur, __u16 reg)
212 204
213 DataLow = valeur; 205 DataLow = valeur;
214 DataHight = valeur >> 8; 206 DataHight = valeur >> 8;
215 reg_w_val(gspca_dev->dev, reg, 0x8801); 207 reg_w_val(gspca_dev->dev, 0x8801, reg);
216 reg_w_val(gspca_dev->dev, DataLow, 0x8805); 208 reg_w_val(gspca_dev->dev, 0x8805, DataLow);
217 reg_w_val(gspca_dev->dev, DataHight, 0x8800); 209 reg_w_val(gspca_dev->dev, 0x8800, DataHight);
218 while (retry--) { 210 while (retry--) {
219 reg_r(gspca_dev, 0x8803, 1); 211 reg_r(gspca_dev, 0x8803, 1);
220 if (!gspca_dev->usb_buf[0]) 212 if (!gspca_dev->usb_buf[0])
@@ -228,14 +220,14 @@ static int i2c_read(struct gspca_dev *gspca_dev, __u16 reg, __u8 mode)
228 __u8 value; 220 __u8 value;
229 __u8 vallsb; 221 __u8 vallsb;
230 222
231 reg_w_val(gspca_dev->dev, 0x92, 0x8804); 223 reg_w_val(gspca_dev->dev, 0x8804, 0x92);
232 reg_w_val(gspca_dev->dev, reg, 0x8801); 224 reg_w_val(gspca_dev->dev, 0x8801, reg);
233 reg_w_val(gspca_dev->dev, (mode | 0x01), 0x8802); 225 reg_w_val(gspca_dev->dev, 0x8802, (mode | 0x01));
234 while (retry--) { 226 do {
235 reg_r(gspca_dev, 0x8803, 1); 227 reg_r(gspca_dev, 0x8803, 1);
236 if (!gspca_dev->usb_buf) 228 if (!gspca_dev->usb_buf)
237 break; 229 break;
238 } 230 } while (--retry);
239 if (retry == 0) 231 if (retry == 0)
240 return -1; 232 return -1;
241 reg_r(gspca_dev, 0x8800, 1); 233 reg_r(gspca_dev, 0x8800, 1);
@@ -438,21 +430,10 @@ static const __u16 spca561_init_data[][2] = {
438 {0x0035, 0x8801}, /* 0x14 - set gain general */ 430 {0x0035, 0x8801}, /* 0x14 - set gain general */
439 {0x001f, 0x8805}, /* 0x14 */ 431 {0x001f, 0x8805}, /* 0x14 */
440 {0x0000, 0x8800}, 432 {0x0000, 0x8800},
441 {0x0030, 0x8112}, 433 {0x000e, 0x8112}, /* white balance - was 30 */
442 {} 434 {}
443}; 435};
444 436
445static void sensor_reset(struct gspca_dev *gspca_dev)
446{
447 reg_w_val(gspca_dev->dev, 0x8631, 0xc8);
448 reg_w_val(gspca_dev->dev, 0x8634, 0xc8);
449 reg_w_val(gspca_dev->dev, 0x8112, 0x00);
450 reg_w_val(gspca_dev->dev, 0x8114, 0x00);
451 reg_w_val(gspca_dev->dev, 0x8118, 0x21);
452 i2c_init(gspca_dev, 0x14);
453 i2c_write(gspca_dev, 1, 0x0d);
454 i2c_write(gspca_dev, 0, 0x0d);
455}
456 437
457/******************** QC Express etch2 stuff ********************/ 438/******************** QC Express etch2 stuff ********************/
458static const __u16 Pb100_1map8300[][2] = { 439static const __u16 Pb100_1map8300[][2] = {
@@ -462,9 +443,9 @@ static const __u16 Pb100_1map8300[][2] = {
462 {0x8303, 0x0125}, /* image area */ 443 {0x8303, 0x0125}, /* image area */
463 {0x8304, 0x0169}, 444 {0x8304, 0x0169},
464 {0x8328, 0x000b}, 445 {0x8328, 0x000b},
465 {0x833c, 0x0001}, 446 {0x833c, 0x0001}, /*fixme: win:07*/
466 447
467 {0x832f, 0x0419}, 448 {0x832f, 0x1904}, /*fixme: was 0419*/
468 {0x8307, 0x00aa}, 449 {0x8307, 0x00aa},
469 {0x8301, 0x0003}, 450 {0x8301, 0x0003},
470 {0x8302, 0x000e}, 451 {0x8302, 0x000e},
@@ -478,9 +459,10 @@ static const __u16 Pb100_2map8300[][2] = {
478}; 459};
479 460
480static const __u16 spca561_161rev12A_data1[][2] = { 461static const __u16 spca561_161rev12A_data1[][2] = {
481 {0x21, 0x8118}, 462 {0x29, 0x8118}, /* white balance - was 21 */
482 {0x01, 0x8114}, 463 {0x08, 0x8114}, /* white balance - was 01 */
483 {0x00, 0x8112}, 464 {0x0e, 0x8112}, /* white balance - was 00 */
465 {0x00, 0x8102}, /* white balance - new */
484 {0x92, 0x8804}, 466 {0x92, 0x8804},
485 {0x04, 0x8802}, /* windows uses 08 */ 467 {0x04, 0x8802}, /* windows uses 08 */
486 {} 468 {}
@@ -505,14 +487,16 @@ static const __u16 spca561_161rev12A_data2[][2] = {
505 {0xb0, 0x8603}, 487 {0xb0, 0x8603},
506 488
507 /* sensor gains */ 489 /* sensor gains */
490 {0x07, 0x8601}, /* white balance - new */
491 {0x07, 0x8602}, /* white balance - new */
508 {0x00, 0x8610}, /* *red */ 492 {0x00, 0x8610}, /* *red */
509 {0x00, 0x8611}, /* 3f *green */ 493 {0x00, 0x8611}, /* 3f *green */
510 {0x00, 0x8612}, /* green *blue */ 494 {0x00, 0x8612}, /* green *blue */
511 {0x00, 0x8613}, /* blue *green */ 495 {0x00, 0x8613}, /* blue *green */
512 {0x35, 0x8614}, /* green *red */ 496 {0x43, 0x8614}, /* green *red - white balance - was 0x35 */
513 {0x35, 0x8615}, /* 40 *green */ 497 {0x40, 0x8615}, /* 40 *green - white balance - was 0x35 */
514 {0x35, 0x8616}, /* 7a *blue */ 498 {0x71, 0x8616}, /* 7a *blue - white balance - was 0x35 */
515 {0x35, 0x8617}, /* 40 *green */ 499 {0x40, 0x8617}, /* 40 *green - white balance - was 0x35 */
516 500
517 {0x0c, 0x8620}, /* 0c */ 501 {0x0c, 0x8620}, /* 0c */
518 {0xc8, 0x8631}, /* c8 */ 502 {0xc8, 0x8631}, /* c8 */
@@ -527,6 +511,7 @@ static const __u16 spca561_161rev12A_data2[][2] = {
527 {0xdf, 0x863c}, /* df */ 511 {0xdf, 0x863c}, /* df */
528 {0xf0, 0x8505}, 512 {0xf0, 0x8505},
529 {0x32, 0x850a}, 513 {0x32, 0x850a},
514/* {0x99, 0x8700}, * - white balance - new (removed) */
530 {} 515 {}
531}; 516};
532 517
@@ -545,9 +530,10 @@ static void sensor_mapwrite(struct gspca_dev *gspca_dev,
545} 530}
546static void init_161rev12A(struct gspca_dev *gspca_dev) 531static void init_161rev12A(struct gspca_dev *gspca_dev)
547{ 532{
548 sensor_reset(gspca_dev); 533/* sensor_reset(gspca_dev); (not in win) */
549 write_vector(gspca_dev, spca561_161rev12A_data1); 534 write_vector(gspca_dev, spca561_161rev12A_data1);
550 sensor_mapwrite(gspca_dev, Pb100_1map8300); 535 sensor_mapwrite(gspca_dev, Pb100_1map8300);
536/*fixme: should be in sd_start*/
551 write_vector(gspca_dev, spca561_161rev12A_data2); 537 write_vector(gspca_dev, spca561_161rev12A_data2);
552 sensor_mapwrite(gspca_dev, Pb100_2map8300); 538 sensor_mapwrite(gspca_dev, Pb100_2map8300);
553} 539}
@@ -581,35 +567,38 @@ static int sd_config(struct gspca_dev *gspca_dev,
581 } 567 }
582 568
583 cam = &gspca_dev->cam; 569 cam = &gspca_dev->cam;
584 cam->dev_name = (char *) id->driver_info;
585 cam->epaddr = 0x01; 570 cam->epaddr = 0x01;
586 gspca_dev->nbalt = 7 + 1; /* choose alternate 7 first */ 571 gspca_dev->nbalt = 7 + 1; /* choose alternate 7 first */
587 cam->cam_mode = sif_mode;
588 cam->nmodes = sizeof sif_mode / sizeof sif_mode[0];
589 572
590 sd->chip_revision = id->driver_info; 573 sd->chip_revision = id->driver_info;
591 sd->brightness = sd_ctrls[SD_BRIGHTNESS].qctrl.default_value; 574 if (sd->chip_revision == Rev012A) {
592 sd->contrast = sd_ctrls[SD_CONTRAST].qctrl.default_value; 575 cam->cam_mode = sif_012a_mode;
593 sd->autogain = sd_ctrls[SD_AUTOGAIN].qctrl.default_value; 576 cam->nmodes = ARRAY_SIZE(sif_012a_mode);
577 } else {
578 cam->cam_mode = sif_072a_mode;
579 cam->nmodes = ARRAY_SIZE(sif_072a_mode);
580 }
581 sd->brightness = BRIGHTNESS_DEF;
582 sd->contrast = CONTRAST_DEF;
583 sd->white = WHITE_DEF;
584 sd->exposure = EXPOSURE_DEF;
585 sd->autogain = AUTOGAIN_DEF;
586 sd->gain = GAIN_DEF;
587 sd->expo12a = EXPO12A_DEF;
594 return 0; 588 return 0;
595} 589}
596 590
597/* this function is called at open time */ 591/* this function is called at probe and resume time */
598static int sd_open(struct gspca_dev *gspca_dev) 592static int sd_init_12a(struct gspca_dev *gspca_dev)
599{ 593{
600 struct sd *sd = (struct sd *) gspca_dev; 594 PDEBUG(D_STREAM, "Chip revision: 012a");
601 595 init_161rev12A(gspca_dev);
602 switch (sd->chip_revision) { 596 return 0;
603 case Rev072A: 597}
604 PDEBUG(D_STREAM, "Chip revision id: 072a"); 598static int sd_init_72a(struct gspca_dev *gspca_dev)
605 write_vector(gspca_dev, spca561_init_data); 599{
606 break; 600 PDEBUG(D_STREAM, "Chip revision: 072a");
607 default: 601 write_vector(gspca_dev, spca561_init_data);
608/* case Rev012A: */
609 PDEBUG(D_STREAM, "Chip revision id: 012a");
610 init_161rev12A(gspca_dev);
611 break;
612 }
613 return 0; 602 return 0;
614} 603}
615 604
@@ -618,25 +607,20 @@ static void setcontrast(struct gspca_dev *gspca_dev)
618 struct sd *sd = (struct sd *) gspca_dev; 607 struct sd *sd = (struct sd *) gspca_dev;
619 struct usb_device *dev = gspca_dev->dev; 608 struct usb_device *dev = gspca_dev->dev;
620 __u8 lowb; 609 __u8 lowb;
621 int expotimes;
622 610
623 switch (sd->chip_revision) { 611 switch (sd->chip_revision) {
624 case Rev072A: 612 case Rev072A:
625 lowb = sd->contrast >> 8; 613 lowb = sd->contrast >> 8;
626 reg_w_val(dev, lowb, 0x8651); 614 reg_w_val(dev, 0x8651, lowb);
627 reg_w_val(dev, lowb, 0x8652); 615 reg_w_val(dev, 0x8652, lowb);
628 reg_w_val(dev, lowb, 0x8653); 616 reg_w_val(dev, 0x8653, lowb);
629 reg_w_val(dev, lowb, 0x8654); 617 reg_w_val(dev, 0x8654, lowb);
630 break; 618 break;
631 case Rev012A: { 619 default: {
632 __u8 Reg8391[] = 620/* case Rev012A: { */
633 { 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00 }; 621 static const __u8 Reg8391[] =
634 622 { 0x92, 0x30, 0x20, 0x00, 0x0c, 0x00, 0x00, 0x00 };
635 /* Write camera sensor settings */ 623
636 expotimes = (sd->contrast >> 5) & 0x07ff;
637 Reg8391[0] = expotimes & 0xff; /* exposure */
638 Reg8391[1] = 0x18 | (expotimes >> 8);
639 Reg8391[2] = sd->brightness; /* gain */
640 reg_w_buf(gspca_dev, 0x8391, Reg8391, 8); 624 reg_w_buf(gspca_dev, 0x8391, Reg8391, 8);
641 reg_w_buf(gspca_dev, 0x8390, Reg8391, 8); 625 reg_w_buf(gspca_dev, 0x8390, Reg8391, 8);
642 break; 626 break;
@@ -644,93 +628,151 @@ static void setcontrast(struct gspca_dev *gspca_dev)
644 } 628 }
645} 629}
646 630
647static void setautogain(struct gspca_dev *gspca_dev) 631/* rev12a only */
632static void setwhite(struct gspca_dev *gspca_dev)
648{ 633{
649 struct sd *sd = (struct sd *) gspca_dev; 634 struct sd *sd = (struct sd *) gspca_dev;
635 __u16 white;
636 __u8 reg8614, reg8616;
637
638 white = sd->white;
639 /* try to emulate MS-win as possible */
640 reg8616 = 0x90 - white * 5 / 8;
641 reg_w_val(gspca_dev->dev, 0x8616, reg8616);
642 reg8614 = 0x20 + white * 3 / 8;
643 reg_w_val(gspca_dev->dev, 0x8614, reg8614);
644}
650 645
651 if (sd->chip_revision == Rev072A) { 646/* rev 12a only */
652 if (sd->autogain) 647static void setexposure(struct gspca_dev *gspca_dev)
653 sd->ag_cnt = AG_CNT_START; 648{
654 else 649 struct sd *sd = (struct sd *) gspca_dev;
655 sd->ag_cnt = -1; 650 int expo;
651 int clock_divider;
652 __u8 data[2];
653
654 /* Register 0x8309 controls exposure for the spca561,
655 the basic exposure setting goes from 1-2047, where 1 is completely
656 dark and 2047 is very bright. It not only influences exposure but
657 also the framerate (to allow for longer exposure) from 1 - 300 it
658 only raises the exposure time then from 300 - 600 it halves the
659 framerate to be able to further raise the exposure time and for every
660 300 more it halves the framerate again. This allows for a maximum
661 exposure time of circa 0.2 - 0.25 seconds (30 / (2000/3000) fps).
662 Sometimes this is not enough, the 1-2047 uses bits 0-10, bits 11-12
663 configure a divider for the base framerate which us used at the
664 exposure setting of 1-300. These bits configure the base framerate
665 according to the following formula: fps = 60 / (value + 2) */
666 if (sd->exposure < 2048) {
667 expo = sd->exposure;
668 clock_divider = 0;
669 } else {
670 /* Add 900 to make the 0 setting of the second part of the
671 exposure equal to the 2047 setting of the first part. */
672 expo = (sd->exposure - 2048) + 900;
673 clock_divider = 3;
656 } 674 }
675 expo |= clock_divider << 11;
676 data[0] = expo;
677 data[1] = expo >> 8;
678 reg_w_buf(gspca_dev, 0x8309, data, 2);
657} 679}
658 680
659static void sd_start(struct gspca_dev *gspca_dev) 681/* rev 12a only */
682static void setgain(struct gspca_dev *gspca_dev)
660{ 683{
661 struct sd *sd = (struct sd *) gspca_dev; 684 struct sd *sd = (struct sd *) gspca_dev;
685 __u8 data[2];
686
687 data[0] = sd->gain;
688 data[1] = 0;
689 reg_w_buf(gspca_dev, 0x8335, data, 2);
690}
691
692static void setautogain(struct gspca_dev *gspca_dev)
693{
694 struct sd *sd = (struct sd *) gspca_dev;
695
696 if (sd->autogain)
697 sd->ag_cnt = AG_CNT_START;
698 else
699 sd->ag_cnt = -1;
700}
701
702static void sd_start_12a(struct gspca_dev *gspca_dev)
703{
662 struct usb_device *dev = gspca_dev->dev; 704 struct usb_device *dev = gspca_dev->dev;
663 int Clck; 705 int Clck = 0x8a; /* lower 0x8X values lead to fps > 30 */
664 __u8 Reg8307[] = { 0xaa, 0x00 }; 706 __u8 Reg8307[] = { 0xaa, 0x00 };
665 int mode; 707 int mode;
666 708
667 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 709 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
668 switch (sd->chip_revision) { 710 if (mode <= 1) {
669 case Rev072A: 711 /* Use compression on 320x240 and above */
670 switch (mode) { 712 reg_w_val(dev, 0x8500, 0x10 | mode);
671 default: 713 } else {
672/* case 0: 714 /* I couldn't get the compression to work below 320x240
673 case 1: */ 715 * Fortunately at these resolutions the bandwidth
674 Clck = 0x25; 716 * is sufficient to push raw frames at ~20fps */
675 break; 717 reg_w_val(dev, 0x8500, mode);
676 case 2: 718 } /* -- qq@kuku.eu.org */
677 Clck = 0x22; 719 reg_w_buf(gspca_dev, 0x8307, Reg8307, 2);
678 break; 720 reg_w_val(gspca_dev->dev, 0x8700, Clck);
679 case 3: 721 /* 0x8f 0x85 0x27 clock */
680 Clck = 0x21; 722 reg_w_val(gspca_dev->dev, 0x8112, 0x1e | 0x20);
681 break; 723 reg_w_val(gspca_dev->dev, 0x850b, 0x03);
682 } 724 setcontrast(gspca_dev);
683 reg_w_val(dev, 0x8500, mode); /* mode */ 725 setwhite(gspca_dev);
684 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */ 726 setautogain(gspca_dev);
685 reg_w_val(dev, 0x8112, 0x10 | 0x20); 727 setexposure(gspca_dev);
686 setautogain(gspca_dev); 728}
687 break; 729static void sd_start_72a(struct gspca_dev *gspca_dev)
730{
731 struct usb_device *dev = gspca_dev->dev;
732 int Clck;
733 int mode;
734
735 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
736 switch (mode) {
688 default: 737 default:
689/* case Rev012A: */ 738/* case 0:
690 switch (mode) { 739 case 1: */
691 case 0: 740 Clck = 0x25;
692 case 1: 741 break;
693 Clck = 0x8a; 742 case 2:
694 break; 743 Clck = 0x22;
695 case 2: 744 break;
696 Clck = 0x85; 745 case 3:
697 break; 746 Clck = 0x21;
698 default:
699 Clck = 0x83;
700 break;
701 }
702 if (mode <= 1) {
703 /* Use compression on 320x240 and above */
704 reg_w_val(dev, 0x8500, 0x10 | mode);
705 } else {
706 /* I couldn't get the compression to work below 320x240
707 * Fortunately at these resolutions the bandwidth
708 * is sufficient to push raw frames at ~20fps */
709 reg_w_val(dev, 0x8500, mode);
710 } /* -- qq@kuku.eu.org */
711 reg_w_buf(gspca_dev, 0x8307, Reg8307, 2);
712 reg_w_val(gspca_dev->dev, 0x8700, Clck);
713 /* 0x8f 0x85 0x27 clock */
714 reg_w_val(gspca_dev->dev, 0x8112, 0x1e | 0x20);
715 reg_w_val(gspca_dev->dev, 0x850b, 0x03);
716 setcontrast(gspca_dev);
717 break; 747 break;
718 } 748 }
749 reg_w_val(dev, 0x8500, mode); /* mode */
750 reg_w_val(dev, 0x8700, Clck); /* 0x27 clock */
751 reg_w_val(dev, 0x8112, 0x10 | 0x20);
752 setautogain(gspca_dev);
719} 753}
720 754
721static void sd_stopN(struct gspca_dev *gspca_dev) 755static void sd_stopN(struct gspca_dev *gspca_dev)
722{ 756{
723 reg_w_val(gspca_dev->dev, 0x8112, 0x20); 757 struct sd *sd = (struct sd *) gspca_dev;
758
759 if (sd->chip_revision == Rev012A) {
760 reg_w_val(gspca_dev->dev, 0x8112, 0x0e);
761 } else {
762 reg_w_val(gspca_dev->dev, 0x8112, 0x20);
763/* reg_w_val(gspca_dev->dev, 0x8102, 0x00); ?? */
764 }
724} 765}
725 766
726static void sd_stop0(struct gspca_dev *gspca_dev) 767static void sd_stop0(struct gspca_dev *gspca_dev)
727{ 768{
728} 769 struct sd *sd = (struct sd *) gspca_dev;
729 770
730/* this function is called at close time */ 771 if (sd->chip_revision == Rev012A) {
731static void sd_close(struct gspca_dev *gspca_dev) 772 reg_w_val(gspca_dev->dev, 0x8118, 0x29);
732{ 773 reg_w_val(gspca_dev->dev, 0x8114, 0x08);
733 reg_w_val(gspca_dev->dev, 0x8114, 0); 774 }
775/* reg_w_val(gspca_dev->dev, 0x8114, 0); */
734} 776}
735 777
736static void do_autogain(struct gspca_dev *gspca_dev) 778static void do_autogain(struct gspca_dev *gspca_dev)
@@ -744,6 +786,7 @@ static void do_autogain(struct gspca_dev *gspca_dev)
744 __u8 luma_mean = 110; 786 __u8 luma_mean = 110;
745 __u8 luma_delta = 20; 787 __u8 luma_delta = 20;
746 __u8 spring = 4; 788 __u8 spring = 4;
789 __u8 reg8339[2];
747 790
748 if (sd->ag_cnt < 0) 791 if (sd->ag_cnt < 0)
749 return; 792 return;
@@ -798,13 +841,16 @@ static void do_autogain(struct gspca_dev *gspca_dev)
798 } 841 }
799 break; 842 break;
800 case Rev012A: 843 case Rev012A:
801 /* sensor registers is access and memory mapped to 0x8300 */ 844 reg_r(gspca_dev, 0x8330, 2);
802 /* readind all 0x83xx block the sensor */ 845 if (gspca_dev->usb_buf[1] > 0x08) {
803 /* 846 reg8339[0] = ++sd->expo12a;
804 * The data from the header seem wrong where is the luma 847 reg8339[1] = 0;
805 * and chroma mean value 848 reg_w_buf(gspca_dev, 0x8339, reg8339, 2);
806 * at the moment set exposure in contrast set 849 } else if (gspca_dev->usb_buf[1] < 0x02) {
807 */ 850 reg8339[0] = --sd->expo12a;
851 reg8339[1] = 0;
852 reg_w_buf(gspca_dev, 0x8339, reg8339, 2);
853 }
808 break; 854 break;
809 } 855 }
810} 856}
@@ -814,6 +860,8 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
814 __u8 *data, /* isoc packet */ 860 __u8 *data, /* isoc packet */
815 int len) /* iso packet length */ 861 int len) /* iso packet length */
816{ 862{
863 struct sd *sd = (struct sd *) gspca_dev;
864
817 switch (data[0]) { 865 switch (data[0]) {
818 case 0: /* start of frame */ 866 case 0: /* start of frame */
819 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 867 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
@@ -826,8 +874,13 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
826 frame, data, len); 874 frame, data, len);
827 } else { 875 } else {
828 /* raw bayer (with a header, which we skip) */ 876 /* raw bayer (with a header, which we skip) */
829 data += 20; 877 if (sd->chip_revision == Rev012A) {
830 len -= 20; 878 data += 20;
879 len -= 20;
880 } else {
881 data += 16;
882 len -= 16;
883 }
831 gspca_frame_add(gspca_dev, FIRST_PACKET, 884 gspca_frame_add(gspca_dev, FIRST_PACKET,
832 frame, data, len); 885 frame, data, len);
833 } 886 }
@@ -841,24 +894,17 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
841 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 894 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
842} 895}
843 896
897/* rev 72a only */
844static void setbrightness(struct gspca_dev *gspca_dev) 898static void setbrightness(struct gspca_dev *gspca_dev)
845{ 899{
846 struct sd *sd = (struct sd *) gspca_dev; 900 struct sd *sd = (struct sd *) gspca_dev;
847 __u8 value; 901 __u8 value;
848 902
849 switch (sd->chip_revision) { 903 value = sd->brightness;
850 case Rev072A: 904 reg_w_val(gspca_dev->dev, 0x8611, value);
851 value = sd->brightness; 905 reg_w_val(gspca_dev->dev, 0x8612, value);
852 reg_w_val(gspca_dev->dev, value, 0x8611); 906 reg_w_val(gspca_dev->dev, 0x8613, value);
853 reg_w_val(gspca_dev->dev, value, 0x8612); 907 reg_w_val(gspca_dev->dev, 0x8614, value);
854 reg_w_val(gspca_dev->dev, value, 0x8613);
855 reg_w_val(gspca_dev->dev, value, 0x8614);
856 break;
857 default:
858/* case Rev012A: */
859 setcontrast(gspca_dev);
860 break;
861 }
862} 908}
863 909
864static void getbrightness(struct gspca_dev *gspca_dev) 910static void getbrightness(struct gspca_dev *gspca_dev)
@@ -866,52 +912,38 @@ static void getbrightness(struct gspca_dev *gspca_dev)
866 struct sd *sd = (struct sd *) gspca_dev; 912 struct sd *sd = (struct sd *) gspca_dev;
867 __u16 tot; 913 __u16 tot;
868 914
869 switch (sd->chip_revision) { 915 tot = 0;
870 case Rev072A: 916 reg_r(gspca_dev, 0x8611, 1);
871 tot = 0; 917 tot += gspca_dev->usb_buf[0];
872 reg_r(gspca_dev, 0x8611, 1); 918 reg_r(gspca_dev, 0x8612, 1);
873 tot += gspca_dev->usb_buf[0]; 919 tot += gspca_dev->usb_buf[0];
874 reg_r(gspca_dev, 0x8612, 1); 920 reg_r(gspca_dev, 0x8613, 1);
875 tot += gspca_dev->usb_buf[0]; 921 tot += gspca_dev->usb_buf[0];
876 reg_r(gspca_dev, 0x8613, 1); 922 reg_r(gspca_dev, 0x8614, 1);
877 tot += gspca_dev->usb_buf[0]; 923 tot += gspca_dev->usb_buf[0];
878 reg_r(gspca_dev, 0x8614, 1); 924 sd->brightness = tot >> 2;
879 tot += gspca_dev->usb_buf[0];
880 sd->brightness = tot >> 2;
881 break;
882 default:
883/* case Rev012A: */
884 /* no way to read sensor settings */
885 break;
886 }
887} 925}
888 926
927/* rev72a only */
889static void getcontrast(struct gspca_dev *gspca_dev) 928static void getcontrast(struct gspca_dev *gspca_dev)
890{ 929{
891 struct sd *sd = (struct sd *) gspca_dev; 930 struct sd *sd = (struct sd *) gspca_dev;
892 __u16 tot; 931 __u16 tot;
893 932
894 switch (sd->chip_revision) { 933 tot = 0;
895 case Rev072A: 934 reg_r(gspca_dev, 0x8651, 1);
896 tot = 0; 935 tot += gspca_dev->usb_buf[0];
897 reg_r(gspca_dev, 0x8651, 1); 936 reg_r(gspca_dev, 0x8652, 1);
898 tot += gspca_dev->usb_buf[0]; 937 tot += gspca_dev->usb_buf[0];
899 reg_r(gspca_dev, 0x8652, 1); 938 reg_r(gspca_dev, 0x8653, 1);
900 tot += gspca_dev->usb_buf[0]; 939 tot += gspca_dev->usb_buf[0];
901 reg_r(gspca_dev, 0x8653, 1); 940 reg_r(gspca_dev, 0x8654, 1);
902 tot += gspca_dev->usb_buf[0]; 941 tot += gspca_dev->usb_buf[0];
903 reg_r(gspca_dev, 0x8654, 1); 942 sd->contrast = tot << 6;
904 tot += gspca_dev->usb_buf[0];
905 sd->contrast = tot << 6;
906 break;
907 default:
908/* case Rev012A: */
909 /* no way to read sensor settings */
910 break;
911 }
912 PDEBUG(D_CONF, "get contrast %d", sd->contrast); 943 PDEBUG(D_CONF, "get contrast %d", sd->contrast);
913} 944}
914 945
946/* rev 72a only */
915static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) 947static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
916{ 948{
917 struct sd *sd = (struct sd *) gspca_dev; 949 struct sd *sd = (struct sd *) gspca_dev;
@@ -931,6 +963,7 @@ static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
931 return 0; 963 return 0;
932} 964}
933 965
966/* rev 72a only */
934static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) 967static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
935{ 968{
936 struct sd *sd = (struct sd *) gspca_dev; 969 struct sd *sd = (struct sd *) gspca_dev;
@@ -968,20 +1001,190 @@ static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
968 return 0; 1001 return 0;
969} 1002}
970 1003
1004/* rev12a only */
1005static int sd_setwhite(struct gspca_dev *gspca_dev, __s32 val)
1006{
1007 struct sd *sd = (struct sd *) gspca_dev;
1008
1009 sd->white = val;
1010 if (gspca_dev->streaming)
1011 setwhite(gspca_dev);
1012 return 0;
1013}
1014
1015static int sd_getwhite(struct gspca_dev *gspca_dev, __s32 *val)
1016{
1017 struct sd *sd = (struct sd *) gspca_dev;
1018
1019 *val = sd->white;
1020 return 0;
1021}
1022
1023/* rev12a only */
1024static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
1025{
1026 struct sd *sd = (struct sd *) gspca_dev;
1027
1028 sd->exposure = val;
1029 if (gspca_dev->streaming)
1030 setexposure(gspca_dev);
1031 return 0;
1032}
1033
1034static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
1035{
1036 struct sd *sd = (struct sd *) gspca_dev;
1037
1038 *val = sd->exposure;
1039 return 0;
1040}
1041
1042/* rev12a only */
1043static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
1044{
1045 struct sd *sd = (struct sd *) gspca_dev;
1046
1047 sd->gain = val;
1048 if (gspca_dev->streaming)
1049 setgain(gspca_dev);
1050 return 0;
1051}
1052
1053static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
1054{
1055 struct sd *sd = (struct sd *) gspca_dev;
1056
1057 *val = sd->gain;
1058 return 0;
1059}
1060
1061/* control tables */
1062static struct ctrl sd_ctrls_12a[] = {
1063 {
1064 {
1065 .id = V4L2_CID_DO_WHITE_BALANCE,
1066 .type = V4L2_CTRL_TYPE_INTEGER,
1067 .name = "While Balance",
1068 .minimum = WHITE_MIN,
1069 .maximum = WHITE_MAX,
1070 .step = 1,
1071 .default_value = WHITE_DEF,
1072 },
1073 .set = sd_setwhite,
1074 .get = sd_getwhite,
1075 },
1076 {
1077 {
1078 .id = V4L2_CID_EXPOSURE,
1079 .type = V4L2_CTRL_TYPE_INTEGER,
1080 .name = "Exposure",
1081 .minimum = EXPOSURE_MIN,
1082 .maximum = EXPOSURE_MAX,
1083 .step = 1,
1084 .default_value = EXPOSURE_DEF,
1085 },
1086 .set = sd_setexposure,
1087 .get = sd_getexposure,
1088 },
1089 {
1090 {
1091 .id = V4L2_CID_AUTOGAIN,
1092 .type = V4L2_CTRL_TYPE_BOOLEAN,
1093 .name = "Auto Gain",
1094 .minimum = AUTOGAIN_MIN,
1095 .maximum = AUTOGAIN_MAX,
1096 .step = 1,
1097 .default_value = AUTOGAIN_DEF,
1098 },
1099 .set = sd_setautogain,
1100 .get = sd_getautogain,
1101 },
1102 {
1103 {
1104 .id = V4L2_CID_GAIN,
1105 .type = V4L2_CTRL_TYPE_INTEGER,
1106 .name = "Gain",
1107 .minimum = GAIN_MIN,
1108 .maximum = GAIN_MAX,
1109 .step = 1,
1110 .default_value = GAIN_DEF,
1111 },
1112 .set = sd_setgain,
1113 .get = sd_getgain,
1114 },
1115};
1116
1117static struct ctrl sd_ctrls_72a[] = {
1118 {
1119 {
1120 .id = V4L2_CID_BRIGHTNESS,
1121 .type = V4L2_CTRL_TYPE_INTEGER,
1122 .name = "Brightness",
1123 .minimum = BRIGHTNESS_MIN,
1124 .maximum = BRIGHTNESS_MAX,
1125 .step = 1,
1126 .default_value = BRIGHTNESS_DEF,
1127 },
1128 .set = sd_setbrightness,
1129 .get = sd_getbrightness,
1130 },
1131 {
1132 {
1133 .id = V4L2_CID_CONTRAST,
1134 .type = V4L2_CTRL_TYPE_INTEGER,
1135 .name = "Contrast",
1136 .minimum = CONTRAST_MIN,
1137 .maximum = CONTRAST_MAX,
1138 .step = 1,
1139 .default_value = CONTRAST_DEF,
1140 },
1141 .set = sd_setcontrast,
1142 .get = sd_getcontrast,
1143 },
1144 {
1145 {
1146 .id = V4L2_CID_AUTOGAIN,
1147 .type = V4L2_CTRL_TYPE_BOOLEAN,
1148 .name = "Auto Gain",
1149 .minimum = AUTOGAIN_MIN,
1150 .maximum = AUTOGAIN_MAX,
1151 .step = 1,
1152 .default_value = AUTOGAIN_DEF,
1153 },
1154 .set = sd_setautogain,
1155 .get = sd_getautogain,
1156 },
1157};
1158
971/* sub-driver description */ 1159/* sub-driver description */
972static const struct sd_desc sd_desc = { 1160static const struct sd_desc sd_desc_12a = {
1161 .name = MODULE_NAME,
1162 .ctrls = sd_ctrls_12a,
1163 .nctrls = ARRAY_SIZE(sd_ctrls_12a),
1164 .config = sd_config,
1165 .init = sd_init_12a,
1166 .start = sd_start_12a,
1167 .stopN = sd_stopN,
1168 .stop0 = sd_stop0,
1169 .pkt_scan = sd_pkt_scan,
1170/* .dq_callback = do_autogain, * fixme */
1171};
1172static const struct sd_desc sd_desc_72a = {
973 .name = MODULE_NAME, 1173 .name = MODULE_NAME,
974 .ctrls = sd_ctrls, 1174 .ctrls = sd_ctrls_72a,
975 .nctrls = ARRAY_SIZE(sd_ctrls), 1175 .nctrls = ARRAY_SIZE(sd_ctrls_72a),
976 .config = sd_config, 1176 .config = sd_config,
977 .open = sd_open, 1177 .init = sd_init_72a,
978 .start = sd_start, 1178 .start = sd_start_72a,
979 .stopN = sd_stopN, 1179 .stopN = sd_stopN,
980 .stop0 = sd_stop0, 1180 .stop0 = sd_stop0,
981 .close = sd_close,
982 .pkt_scan = sd_pkt_scan, 1181 .pkt_scan = sd_pkt_scan,
983 .dq_callback = do_autogain, 1182 .dq_callback = do_autogain,
984}; 1183};
1184static const struct sd_desc *sd_desc[2] = {
1185 &sd_desc_12a,
1186 &sd_desc_72a
1187};
985 1188
986/* -- module initialisation -- */ 1189/* -- module initialisation -- */
987static const __devinitdata struct usb_device_id device_table[] = { 1190static const __devinitdata struct usb_device_id device_table[] = {
@@ -1009,7 +1212,9 @@ MODULE_DEVICE_TABLE(usb, device_table);
1009static int sd_probe(struct usb_interface *intf, 1212static int sd_probe(struct usb_interface *intf,
1010 const struct usb_device_id *id) 1213 const struct usb_device_id *id)
1011{ 1214{
1012 return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), 1215 return gspca_dev_probe(intf, id,
1216 sd_desc[id->driver_info],
1217 sizeof(struct sd),
1013 THIS_MODULE); 1218 THIS_MODULE);
1014} 1219}
1015 1220
@@ -1018,6 +1223,10 @@ static struct usb_driver sd_driver = {
1018 .id_table = device_table, 1223 .id_table = device_table,
1019 .probe = sd_probe, 1224 .probe = sd_probe,
1020 .disconnect = gspca_disconnect, 1225 .disconnect = gspca_disconnect,
1226#ifdef CONFIG_PM
1227 .suspend = gspca_suspend,
1228 .resume = gspca_resume,
1229#endif
1021}; 1230};
1022 1231
1023/* -- module insert / remove -- */ 1232/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 16219cf6a6d5..2f2de429e273 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -306,8 +306,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
306 return 0; 306 return 0;
307} 307}
308 308
309/* this function is called at open time */ 309/* this function is called at probe and resume time */
310static int sd_open(struct gspca_dev *gspca_dev) 310static int sd_init(struct gspca_dev *gspca_dev)
311{ 311{
312 int ret; 312 int ret;
313 313
@@ -398,14 +398,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
398 PDEBUG(D_STREAM, "camera stopped"); 398 PDEBUG(D_STREAM, "camera stopped");
399} 399}
400 400
401static void sd_stop0(struct gspca_dev *gspca_dev)
402{
403}
404
405static void sd_close(struct gspca_dev *gspca_dev)
406{
407}
408
409static void sd_pkt_scan(struct gspca_dev *gspca_dev, 401static void sd_pkt_scan(struct gspca_dev *gspca_dev,
410 struct gspca_frame *frame, /* target */ 402 struct gspca_frame *frame, /* target */
411 __u8 *data, /* isoc packet */ 403 __u8 *data, /* isoc packet */
@@ -535,11 +527,9 @@ static const struct sd_desc sd_desc = {
535 .ctrls = sd_ctrls, 527 .ctrls = sd_ctrls,
536 .nctrls = ARRAY_SIZE(sd_ctrls), 528 .nctrls = ARRAY_SIZE(sd_ctrls),
537 .config = sd_config, 529 .config = sd_config,
538 .open = sd_open, 530 .init = sd_init,
539 .start = sd_start, 531 .start = sd_start,
540 .stopN = sd_stopN, 532 .stopN = sd_stopN,
541 .stop0 = sd_stop0,
542 .close = sd_close,
543 .pkt_scan = sd_pkt_scan, 533 .pkt_scan = sd_pkt_scan,
544 .querymenu = sd_querymenu, 534 .querymenu = sd_querymenu,
545}; 535};
@@ -564,6 +554,10 @@ static struct usb_driver sd_driver = {
564 .id_table = device_table, 554 .id_table = device_table,
565 .probe = sd_probe, 555 .probe = sd_probe,
566 .disconnect = gspca_disconnect, 556 .disconnect = gspca_disconnect,
557#ifdef CONFIG_PM
558 .suspend = gspca_suspend,
559 .resume = gspca_resume,
560#endif
567}; 561};
568 562
569/* -- module insert / remove -- */ 563/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 54efa48bee01..1cfcc6c49558 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -449,31 +449,47 @@ static const __u8 qtable_spca504_default[2][64] = {
449 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e} 449 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e}
450}; 450};
451 451
452static void reg_r(struct usb_device *dev, 452/* read <len> bytes to gspca_dev->usb_buf */
453 __u16 req, 453static void reg_r(struct gspca_dev *gspca_dev,
454 __u16 index, 454 __u16 req,
455 __u8 *buffer, __u16 length) 455 __u16 index,
456 __u16 len)
456{ 457{
457 usb_control_msg(dev, 458#ifdef GSPCA_DEBUG
458 usb_rcvctrlpipe(dev, 0), 459 if (len > USB_BUF_SZ) {
460 err("reg_r: buffer overflow");
461 return;
462 }
463#endif
464 usb_control_msg(gspca_dev->dev,
465 usb_rcvctrlpipe(gspca_dev->dev, 0),
459 req, 466 req,
460 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 467 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
461 0, /* value */ 468 0, /* value */
462 index, buffer, length, 469 index,
470 len ? gspca_dev->usb_buf : NULL, len,
463 500); 471 500);
464} 472}
465 473
466static void reg_w(struct usb_device *dev, 474/* write <len> bytes from gspca_dev->usb_buf */
467 __u16 req, 475static void reg_w(struct gspca_dev *gspca_dev,
468 __u16 value, 476 __u16 req,
469 __u16 index, 477 __u16 value,
470 __u8 *buffer, __u16 length) 478 __u16 index,
479 __u16 len)
471{ 480{
472 usb_control_msg(dev, 481#ifdef GSPCA_DEBUG
473 usb_sndctrlpipe(dev, 0), 482 if (len > USB_BUF_SZ) {
483 err("reg_w: buffer overflow");
484 return;
485 }
486#endif
487 usb_control_msg(gspca_dev->dev,
488 usb_sndctrlpipe(gspca_dev->dev, 0),
474 req, 489 req,
475 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 490 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
476 value, index, buffer, length, 491 value, index,
492 len ? gspca_dev->usb_buf : NULL, len,
477 500); 493 500);
478} 494}
479 495
@@ -634,7 +650,7 @@ static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
634 int count = 10; 650 int count = 10;
635 651
636 while (--count > 0) { 652 while (--count > 0) {
637 reg_r(gspca_dev->dev, 0x21, 0, gspca_dev->usb_buf, 1); 653 reg_r(gspca_dev, 0x21, 0, 1);
638 if ((gspca_dev->usb_buf[0] & 0x01) == 0) 654 if ((gspca_dev->usb_buf[0] & 0x01) == 0)
639 break; 655 break;
640 msleep(10); 656 msleep(10);
@@ -644,15 +660,14 @@ static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
644 660
645static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev) 661static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev)
646{ 662{
647 struct usb_device *dev = gspca_dev->dev;
648 int count = 50; 663 int count = 50;
649 664
650 while (--count > 0) { 665 while (--count > 0) {
651 reg_r(dev, 0x21, 1, gspca_dev->usb_buf, 1); 666 reg_r(gspca_dev, 0x21, 1, 1);
652 if (gspca_dev->usb_buf[0] != 0) { 667 if (gspca_dev->usb_buf[0] != 0) {
653 gspca_dev->usb_buf[0] = 0; 668 gspca_dev->usb_buf[0] = 0;
654 reg_w(dev, 0x21, 0, 1, gspca_dev->usb_buf, 1); 669 reg_w(gspca_dev, 0x21, 0, 1, 1);
655 reg_r(dev, 0x21, 1, gspca_dev->usb_buf, 1); 670 reg_r(gspca_dev, 0x21, 1, 1);
656 spca504B_PollingDataReady(gspca_dev); 671 spca504B_PollingDataReady(gspca_dev);
657 break; 672 break;
658 } 673 }
@@ -662,16 +677,14 @@ static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev)
662 677
663static void spca50x_GetFirmware(struct gspca_dev *gspca_dev) 678static void spca50x_GetFirmware(struct gspca_dev *gspca_dev)
664{ 679{
665 struct usb_device *dev = gspca_dev->dev;
666 __u8 *data; 680 __u8 *data;
667 681
668 data = kmalloc(64, GFP_KERNEL); 682 data = gspca_dev->usb_buf;
669 reg_r(dev, 0x20, 0, data, 5); 683 reg_r(gspca_dev, 0x20, 0, 5);
670 PDEBUG(D_STREAM, "FirmWare : %d %d %d %d %d ", 684 PDEBUG(D_STREAM, "FirmWare : %d %d %d %d %d ",
671 data[0], data[1], data[2], data[3], data[4]); 685 data[0], data[1], data[2], data[3], data[4]);
672 reg_r(dev, 0x23, 0, data, 64); 686 reg_r(gspca_dev, 0x23, 0, 64);
673 reg_r(dev, 0x23, 1, data, 64); 687 reg_r(gspca_dev, 0x23, 1, 64);
674 kfree(data);
675} 688}
676 689
677static void spca504B_SetSizeType(struct gspca_dev *gspca_dev) 690static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
@@ -686,21 +699,21 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
686 Type = 0; 699 Type = 0;
687 switch (sd->bridge) { 700 switch (sd->bridge) {
688 case BRIDGE_SPCA533: 701 case BRIDGE_SPCA533:
689 reg_w(dev, 0x31, 0, 0, NULL, 0); 702 reg_w(gspca_dev, 0x31, 0, 0, 0);
690 spca504B_WaitCmdStatus(gspca_dev); 703 spca504B_WaitCmdStatus(gspca_dev);
691 rc = spca504B_PollingDataReady(gspca_dev); 704 rc = spca504B_PollingDataReady(gspca_dev);
692 spca50x_GetFirmware(gspca_dev); 705 spca50x_GetFirmware(gspca_dev);
693 gspca_dev->usb_buf[0] = 2; /* type */ 706 gspca_dev->usb_buf[0] = 2; /* type */
694 reg_w(dev, 0x24, 0, 8, gspca_dev->usb_buf, 1); 707 reg_w(gspca_dev, 0x24, 0, 8, 1);
695 reg_r(dev, 0x24, 8, gspca_dev->usb_buf, 1); 708 reg_r(gspca_dev, 0x24, 8, 1);
696 709
697 gspca_dev->usb_buf[0] = Size; 710 gspca_dev->usb_buf[0] = Size;
698 reg_w(dev, 0x25, 0, 4, gspca_dev->usb_buf, 1); 711 reg_w(gspca_dev, 0x25, 0, 4, 1);
699 reg_r(dev, 0x25, 4, gspca_dev->usb_buf, 1); /* size */ 712 reg_r(gspca_dev, 0x25, 4, 1); /* size */
700 rc = spca504B_PollingDataReady(gspca_dev); 713 rc = spca504B_PollingDataReady(gspca_dev);
701 714
702 /* Init the cam width height with some values get on init ? */ 715 /* Init the cam width height with some values get on init ? */
703 reg_w(dev, 0x31, 0, 4, NULL, 0); 716 reg_w(gspca_dev, 0x31, 0, 4, 0);
704 spca504B_WaitCmdStatus(gspca_dev); 717 spca504B_WaitCmdStatus(gspca_dev);
705 rc = spca504B_PollingDataReady(gspca_dev); 718 rc = spca504B_PollingDataReady(gspca_dev);
706 break; 719 break;
@@ -708,12 +721,12 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
708/* case BRIDGE_SPCA504B: */ 721/* case BRIDGE_SPCA504B: */
709/* case BRIDGE_SPCA536: */ 722/* case BRIDGE_SPCA536: */
710 gspca_dev->usb_buf[0] = Size; 723 gspca_dev->usb_buf[0] = Size;
711 reg_w(dev, 0x25, 0, 4, gspca_dev->usb_buf, 1); 724 reg_w(gspca_dev, 0x25, 0, 4, 1);
712 reg_r(dev, 0x25, 4, gspca_dev->usb_buf, 1); /* size */ 725 reg_r(gspca_dev, 0x25, 4, 1); /* size */
713 Type = 6; 726 Type = 6;
714 gspca_dev->usb_buf[0] = Type; 727 gspca_dev->usb_buf[0] = Type;
715 reg_w(dev, 0x27, 0, 0, gspca_dev->usb_buf, 1); 728 reg_w(gspca_dev, 0x27, 0, 0, 1);
716 reg_r(dev, 0x27, 0, gspca_dev->usb_buf, 1); /* type */ 729 reg_r(gspca_dev, 0x27, 0, 1); /* type */
717 rc = spca504B_PollingDataReady(gspca_dev); 730 rc = spca504B_PollingDataReady(gspca_dev);
718 break; 731 break;
719 case BRIDGE_SPCA504: 732 case BRIDGE_SPCA504:
@@ -752,18 +765,15 @@ static void spca504_wait_status(struct gspca_dev *gspca_dev)
752 765
753static void spca504B_setQtable(struct gspca_dev *gspca_dev) 766static void spca504B_setQtable(struct gspca_dev *gspca_dev)
754{ 767{
755 struct usb_device *dev = gspca_dev->dev;
756
757 gspca_dev->usb_buf[0] = 3; 768 gspca_dev->usb_buf[0] = 3;
758 reg_w(dev, 0x26, 0, 0, gspca_dev->usb_buf, 1); 769 reg_w(gspca_dev, 0x26, 0, 0, 1);
759 reg_r(dev, 0x26, 0, gspca_dev->usb_buf, 1); 770 reg_r(gspca_dev, 0x26, 0, 1);
760 spca504B_PollingDataReady(gspca_dev); 771 spca504B_PollingDataReady(gspca_dev);
761} 772}
762 773
763static void sp5xx_initContBrigHueRegisters(struct gspca_dev *gspca_dev) 774static void sp5xx_initContBrigHueRegisters(struct gspca_dev *gspca_dev)
764{ 775{
765 struct sd *sd = (struct sd *) gspca_dev; 776 struct sd *sd = (struct sd *) gspca_dev;
766 struct usb_device *dev = gspca_dev->dev;
767 int pollreg = 1; 777 int pollreg = 1;
768 778
769 switch (sd->bridge) { 779 switch (sd->bridge) {
@@ -774,20 +784,20 @@ static void sp5xx_initContBrigHueRegisters(struct gspca_dev *gspca_dev)
774 default: 784 default:
775/* case BRIDGE_SPCA533: */ 785/* case BRIDGE_SPCA533: */
776/* case BRIDGE_SPCA504B: */ 786/* case BRIDGE_SPCA504B: */
777 reg_w(dev, 0, 0, 0x21a7, NULL, 0); /* brightness */ 787 reg_w(gspca_dev, 0, 0, 0x21a7, 0); /* brightness */
778 reg_w(dev, 0, 0x20, 0x21a8, NULL, 0); /* contrast */ 788 reg_w(gspca_dev, 0, 0x20, 0x21a8, 0); /* contrast */
779 reg_w(dev, 0, 0, 0x21ad, NULL, 0); /* hue */ 789 reg_w(gspca_dev, 0, 0, 0x21ad, 0); /* hue */
780 reg_w(dev, 0, 1, 0x21ac, NULL, 0); /* sat/hue */ 790 reg_w(gspca_dev, 0, 1, 0x21ac, 0); /* sat/hue */
781 reg_w(dev, 0, 0x20, 0x21ae, NULL, 0); /* saturation */ 791 reg_w(gspca_dev, 0, 0x20, 0x21ae, 0); /* saturation */
782 reg_w(dev, 0, 0, 0x21a3, NULL, 0); /* gamma */ 792 reg_w(gspca_dev, 0, 0, 0x21a3, 0); /* gamma */
783 break; 793 break;
784 case BRIDGE_SPCA536: 794 case BRIDGE_SPCA536:
785 reg_w(dev, 0, 0, 0x20f0, NULL, 0); 795 reg_w(gspca_dev, 0, 0, 0x20f0, 0);
786 reg_w(dev, 0, 0x21, 0x20f1, NULL, 0); 796 reg_w(gspca_dev, 0, 0x21, 0x20f1, 0);
787 reg_w(dev, 0, 0x40, 0x20f5, NULL, 0); 797 reg_w(gspca_dev, 0, 0x40, 0x20f5, 0);
788 reg_w(dev, 0, 1, 0x20f4, NULL, 0); 798 reg_w(gspca_dev, 0, 1, 0x20f4, 0);
789 reg_w(dev, 0, 0x40, 0x20f6, NULL, 0); 799 reg_w(gspca_dev, 0, 0x40, 0x20f6, 0);
790 reg_w(dev, 0, 0, 0x2089, NULL, 0); 800 reg_w(gspca_dev, 0, 0, 0x2089, 0);
791 break; 801 break;
792 } 802 }
793 if (pollreg) 803 if (pollreg)
@@ -799,7 +809,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
799 const struct usb_device_id *id) 809 const struct usb_device_id *id)
800{ 810{
801 struct sd *sd = (struct sd *) gspca_dev; 811 struct sd *sd = (struct sd *) gspca_dev;
802 struct usb_device *dev = gspca_dev->dev;
803 struct cam *cam; 812 struct cam *cam;
804 813
805 cam = &gspca_dev->cam; 814 cam = &gspca_dev->cam;
@@ -811,7 +820,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
811 if (sd->subtype == AiptekMiniPenCam13) { 820 if (sd->subtype == AiptekMiniPenCam13) {
812/* try to get the firmware as some cam answer 2.0.1.2.2 821/* try to get the firmware as some cam answer 2.0.1.2.2
813 * and should be a spca504b then overwrite that setting */ 822 * and should be a spca504b then overwrite that setting */
814 reg_r(dev, 0x20, 0, gspca_dev->usb_buf, 1); 823 reg_r(gspca_dev, 0x20, 0, 1);
815 switch (gspca_dev->usb_buf[0]) { 824 switch (gspca_dev->usb_buf[0]) {
816 case 1: 825 case 1:
817 break; /* (right bridge/subtype) */ 826 break; /* (right bridge/subtype) */
@@ -848,8 +857,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
848 return 0; 857 return 0;
849} 858}
850 859
851/* this function is called at open time */ 860/* this function is called at probe and resume time */
852static int sd_open(struct gspca_dev *gspca_dev) 861static int sd_init(struct gspca_dev *gspca_dev)
853{ 862{
854 struct sd *sd = (struct sd *) gspca_dev; 863 struct sd *sd = (struct sd *) gspca_dev;
855 struct usb_device *dev = gspca_dev->dev; 864 struct usb_device *dev = gspca_dev->dev;
@@ -860,12 +869,12 @@ static int sd_open(struct gspca_dev *gspca_dev)
860 869
861 switch (sd->bridge) { 870 switch (sd->bridge) {
862 case BRIDGE_SPCA504B: 871 case BRIDGE_SPCA504B:
863 reg_w(dev, 0x1d, 0, 0, NULL, 0); 872 reg_w(gspca_dev, 0x1d, 0, 0, 0);
864 reg_w(dev, 0, 1, 0x2306, NULL, 0); 873 reg_w(gspca_dev, 0, 1, 0x2306, 0);
865 reg_w(dev, 0, 0, 0x0d04, NULL, 0); 874 reg_w(gspca_dev, 0, 0, 0x0d04, 0);
866 reg_w(dev, 0, 0, 0x2000, NULL, 0); 875 reg_w(gspca_dev, 0, 0, 0x2000, 0);
867 reg_w(dev, 0, 0x13, 0x2301, NULL, 0); 876 reg_w(gspca_dev, 0, 0x13, 0x2301, 0);
868 reg_w(dev, 0, 0, 0x2306, NULL, 0); 877 reg_w(gspca_dev, 0, 0, 0x2306, 0);
869 /* fall thru */ 878 /* fall thru */
870 case BRIDGE_SPCA533: 879 case BRIDGE_SPCA533:
871 rc = spca504B_PollingDataReady(gspca_dev); 880 rc = spca504B_PollingDataReady(gspca_dev);
@@ -873,12 +882,12 @@ static int sd_open(struct gspca_dev *gspca_dev)
873 break; 882 break;
874 case BRIDGE_SPCA536: 883 case BRIDGE_SPCA536:
875 spca50x_GetFirmware(gspca_dev); 884 spca50x_GetFirmware(gspca_dev);
876 reg_r(dev, 0x00, 0x5002, gspca_dev->usb_buf, 1); 885 reg_r(gspca_dev, 0x00, 0x5002, 1);
877 gspca_dev->usb_buf[0] = 0; 886 gspca_dev->usb_buf[0] = 0;
878 reg_w(dev, 0x24, 0, 0, gspca_dev->usb_buf, 1); 887 reg_w(gspca_dev, 0x24, 0, 0, 1);
879 reg_r(dev, 0x24, 0, gspca_dev->usb_buf, 1); 888 reg_r(gspca_dev, 0x24, 0, 1);
880 rc = spca504B_PollingDataReady(gspca_dev); 889 rc = spca504B_PollingDataReady(gspca_dev);
881 reg_w(dev, 0x34, 0, 0, NULL, 0); 890 reg_w(gspca_dev, 0x34, 0, 0, 0);
882 spca504B_WaitCmdStatus(gspca_dev); 891 spca504B_WaitCmdStatus(gspca_dev);
883 break; 892 break;
884 case BRIDGE_SPCA504C: /* pccam600 */ 893 case BRIDGE_SPCA504C: /* pccam600 */
@@ -971,12 +980,12 @@ static void sd_start(struct gspca_dev *gspca_dev)
971/* case BRIDGE_SPCA536: */ 980/* case BRIDGE_SPCA536: */
972 if (sd->subtype == MegapixV4 || 981 if (sd->subtype == MegapixV4 ||
973 sd->subtype == LogitechClickSmart820) { 982 sd->subtype == LogitechClickSmart820) {
974 reg_w(dev, 0xf0, 0, 0, NULL, 0); 983 reg_w(gspca_dev, 0xf0, 0, 0, 0);
975 spca504B_WaitCmdStatus(gspca_dev); 984 spca504B_WaitCmdStatus(gspca_dev);
976 reg_r(dev, 0xf0, 4, NULL, 0); 985 reg_r(gspca_dev, 0xf0, 4, 0);
977 spca504B_WaitCmdStatus(gspca_dev); 986 spca504B_WaitCmdStatus(gspca_dev);
978 } else { 987 } else {
979 reg_w(dev, 0x31, 0, 4, NULL, 0); 988 reg_w(gspca_dev, 0x31, 0, 4, 0);
980 spca504B_WaitCmdStatus(gspca_dev); 989 spca504B_WaitCmdStatus(gspca_dev);
981 rc = spca504B_PollingDataReady(gspca_dev); 990 rc = spca504B_PollingDataReady(gspca_dev);
982 } 991 }
@@ -1045,7 +1054,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1045/* case BRIDGE_SPCA533: */ 1054/* case BRIDGE_SPCA533: */
1046/* case BRIDGE_SPCA536: */ 1055/* case BRIDGE_SPCA536: */
1047/* case BRIDGE_SPCA504B: */ 1056/* case BRIDGE_SPCA504B: */
1048 reg_w(dev, 0x31, 0, 0, NULL, 0); 1057 reg_w(gspca_dev, 0x31, 0, 0, 0);
1049 spca504B_WaitCmdStatus(gspca_dev); 1058 spca504B_WaitCmdStatus(gspca_dev);
1050 spca504B_PollingDataReady(gspca_dev); 1059 spca504B_PollingDataReady(gspca_dev);
1051 break; 1060 break;
@@ -1069,14 +1078,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1069 } 1078 }
1070} 1079}
1071 1080
1072static void sd_stop0(struct gspca_dev *gspca_dev)
1073{
1074}
1075
1076static void sd_close(struct gspca_dev *gspca_dev)
1077{
1078}
1079
1080static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1081static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1081 struct gspca_frame *frame, /* target */ 1082 struct gspca_frame *frame, /* target */
1082 __u8 *data, /* isoc packet */ 1083 __u8 *data, /* isoc packet */
@@ -1369,11 +1370,9 @@ static const struct sd_desc sd_desc = {
1369 .ctrls = sd_ctrls, 1370 .ctrls = sd_ctrls,
1370 .nctrls = ARRAY_SIZE(sd_ctrls), 1371 .nctrls = ARRAY_SIZE(sd_ctrls),
1371 .config = sd_config, 1372 .config = sd_config,
1372 .open = sd_open, 1373 .init = sd_init,
1373 .start = sd_start, 1374 .start = sd_start,
1374 .stopN = sd_stopN, 1375 .stopN = sd_stopN,
1375 .stop0 = sd_stop0,
1376 .close = sd_close,
1377 .pkt_scan = sd_pkt_scan, 1376 .pkt_scan = sd_pkt_scan,
1378}; 1377};
1379 1378
@@ -1456,6 +1455,10 @@ static struct usb_driver sd_driver = {
1456 .id_table = device_table, 1455 .id_table = device_table,
1457 .probe = sd_probe, 1456 .probe = sd_probe,
1458 .disconnect = gspca_disconnect, 1457 .disconnect = gspca_disconnect,
1458#ifdef CONFIG_PM
1459 .suspend = gspca_suspend,
1460 .resume = gspca_resume,
1461#endif
1459}; 1462};
1460 1463
1461/* -- module insert / remove -- */ 1464/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 91b555c34c68..f034c748fc7e 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -30,7 +30,7 @@
30 30
31#define MAX_GAMMA 0x10 /* 0 to 15 */ 31#define MAX_GAMMA 0x10 /* 0 to 15 */
32 32
33#define V4L2_CID_EFFECTS (V4L2_CID_PRIVATE_BASE + 3) 33#define V4L2_CID_EFFECTS (V4L2_CID_PRIVATE_BASE + 0)
34 34
35MODULE_AUTHOR("Leandro Costantino <le_costantino@pixartargentina.com.ar>"); 35MODULE_AUTHOR("Leandro Costantino <le_costantino@pixartargentina.com.ar>");
36MODULE_DESCRIPTION("GSPCA/T613 (JPEG Compliance) USB Camera Driver"); 36MODULE_DESCRIPTION("GSPCA/T613 (JPEG Compliance) USB Camera Driver");
@@ -233,7 +233,7 @@ static char *effects_control[] = {
233static struct v4l2_pix_format vga_mode_t16[] = { 233static struct v4l2_pix_format vga_mode_t16[] = {
234 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 234 {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
235 .bytesperline = 160, 235 .bytesperline = 160,
236 .sizeimage = 160 * 120 * 3 / 8 + 590, 236 .sizeimage = 160 * 120 * 4 / 8 + 590,
237 .colorspace = V4L2_COLORSPACE_JPEG, 237 .colorspace = V4L2_COLORSPACE_JPEG,
238 .priv = 4}, 238 .priv = 4},
239 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 239 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -391,7 +391,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
391 NULL, 0, 500); 391 NULL, 0, 500);
392 return; 392 return;
393 } 393 }
394 if (len <= sizeof gspca_dev->usb_buf) { 394 if (len <= USB_BUF_SZ) {
395 memcpy(gspca_dev->usb_buf, buffer, len); 395 memcpy(gspca_dev->usb_buf, buffer, len);
396 usb_control_msg(gspca_dev->dev, 396 usb_control_msg(gspca_dev->dev,
397 usb_sndctrlpipe(gspca_dev->dev, 0), 397 usb_sndctrlpipe(gspca_dev->dev, 0),
@@ -552,6 +552,13 @@ static int init_default_parameters(struct gspca_dev *gspca_dev)
552 return 0; 552 return 0;
553} 553}
554 554
555/* this function is called at probe and resume time */
556static int sd_init(struct gspca_dev *gspca_dev)
557{
558 init_default_parameters(gspca_dev);
559 return 0;
560}
561
555static void setbrightness(struct gspca_dev *gspca_dev) 562static void setbrightness(struct gspca_dev *gspca_dev)
556{ 563{
557 struct sd *sd = (struct sd *) gspca_dev; 564 struct sd *sd = (struct sd *) gspca_dev;
@@ -893,18 +900,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
893 setcolors(gspca_dev); 900 setcolors(gspca_dev);
894} 901}
895 902
896static void sd_stopN(struct gspca_dev *gspca_dev)
897{
898}
899
900static void sd_stop0(struct gspca_dev *gspca_dev)
901{
902}
903
904static void sd_close(struct gspca_dev *gspca_dev)
905{
906}
907
908static void sd_pkt_scan(struct gspca_dev *gspca_dev, 903static void sd_pkt_scan(struct gspca_dev *gspca_dev,
909 struct gspca_frame *frame, /* target */ 904 struct gspca_frame *frame, /* target */
910 __u8 *data, /* isoc packet */ 905 __u8 *data, /* isoc packet */
@@ -972,24 +967,14 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
972 return -EINVAL; 967 return -EINVAL;
973} 968}
974 969
975/* this function is called at open time */
976static int sd_open(struct gspca_dev *gspca_dev)
977{
978 init_default_parameters(gspca_dev);
979 return 0;
980}
981
982/* sub-driver description */ 970/* sub-driver description */
983static const struct sd_desc sd_desc = { 971static const struct sd_desc sd_desc = {
984 .name = MODULE_NAME, 972 .name = MODULE_NAME,
985 .ctrls = sd_ctrls, 973 .ctrls = sd_ctrls,
986 .nctrls = ARRAY_SIZE(sd_ctrls), 974 .nctrls = ARRAY_SIZE(sd_ctrls),
987 .config = sd_config, 975 .config = sd_config,
988 .open = sd_open, 976 .init = sd_init,
989 .start = sd_start, 977 .start = sd_start,
990 .stopN = sd_stopN,
991 .stop0 = sd_stop0,
992 .close = sd_close,
993 .pkt_scan = sd_pkt_scan, 978 .pkt_scan = sd_pkt_scan,
994 .querymenu = sd_querymenu, 979 .querymenu = sd_querymenu,
995}; 980};
@@ -1014,6 +999,10 @@ static struct usb_driver sd_driver = {
1014 .id_table = device_table, 999 .id_table = device_table,
1015 .probe = sd_probe, 1000 .probe = sd_probe,
1016 .disconnect = gspca_disconnect, 1001 .disconnect = gspca_disconnect,
1002#ifdef CONFIG_PM
1003 .suspend = gspca_suspend,
1004 .resume = gspca_resume,
1005#endif
1017}; 1006};
1018 1007
1019/* -- module insert / remove -- */ 1008/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 1ff8ba2f7fe5..084af05302a0 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -331,8 +331,8 @@ static void tv_8532_PollReg(struct gspca_dev *gspca_dev)
331 } 331 }
332} 332}
333 333
334/* this function is called at open time */ 334/* this function is called at probe and resume time */
335static int sd_open(struct gspca_dev *gspca_dev) 335static int sd_init(struct gspca_dev *gspca_dev)
336{ 336{
337 reg_w_1(gspca_dev, TV8532_AD_SLOPE, 0x32); 337 reg_w_1(gspca_dev, TV8532_AD_SLOPE, 0x32);
338 reg_w_1(gspca_dev, TV8532_AD_BITCTRL, 0x00); 338 reg_w_1(gspca_dev, TV8532_AD_BITCTRL, 0x00);
@@ -450,14 +450,6 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
450 reg_w_1(gspca_dev, TV8532_GPIO_OE, 0x0b); 450 reg_w_1(gspca_dev, TV8532_GPIO_OE, 0x0b);
451} 451}
452 452
453static void sd_stop0(struct gspca_dev *gspca_dev)
454{
455}
456
457static void sd_close(struct gspca_dev *gspca_dev)
458{
459}
460
461static void tv8532_preprocess(struct gspca_dev *gspca_dev) 453static void tv8532_preprocess(struct gspca_dev *gspca_dev)
462{ 454{
463 struct sd *sd = (struct sd *) gspca_dev; 455 struct sd *sd = (struct sd *) gspca_dev;
@@ -611,11 +603,9 @@ static const struct sd_desc sd_desc = {
611 .ctrls = sd_ctrls, 603 .ctrls = sd_ctrls,
612 .nctrls = ARRAY_SIZE(sd_ctrls), 604 .nctrls = ARRAY_SIZE(sd_ctrls),
613 .config = sd_config, 605 .config = sd_config,
614 .open = sd_open, 606 .init = sd_init,
615 .start = sd_start, 607 .start = sd_start,
616 .stopN = sd_stopN, 608 .stopN = sd_stopN,
617 .stop0 = sd_stop0,
618 .close = sd_close,
619 .pkt_scan = sd_pkt_scan, 609 .pkt_scan = sd_pkt_scan,
620}; 610};
621 611
@@ -644,6 +634,10 @@ static struct usb_driver sd_driver = {
644 .id_table = device_table, 634 .id_table = device_table,
645 .probe = sd_probe, 635 .probe = sd_probe,
646 .disconnect = gspca_disconnect, 636 .disconnect = gspca_disconnect,
637#ifdef CONFIG_PM
638 .suspend = gspca_suspend,
639 .resume = gspca_resume,
640#endif
647}; 641};
648 642
649/* -- module insert / remove -- */ 643/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index f4a52956e0d9..bd4c226c9a07 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -69,6 +69,7 @@ static struct ctrl sd_ctrls[] = {
69 .set = sd_setautogain, 69 .set = sd_setautogain,
70 .get = sd_getautogain, 70 .get = sd_getautogain,
71 }, 71 },
72#define LIGHTFREQ_IDX 1
72 { 73 {
73 { 74 {
74 .id = V4L2_CID_POWER_LINE_FREQUENCY, 75 .id = V4L2_CID_POWER_LINE_FREQUENCY,
@@ -87,12 +88,12 @@ static struct ctrl sd_ctrls[] = {
87}; 88};
88 89
89static struct v4l2_pix_format vc0321_mode[] = { 90static struct v4l2_pix_format vc0321_mode[] = {
90 {320, 240, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE, 91 {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
91 .bytesperline = 320, 92 .bytesperline = 320,
92 .sizeimage = 320 * 240 * 2, 93 .sizeimage = 320 * 240 * 2,
93 .colorspace = V4L2_COLORSPACE_SRGB, 94 .colorspace = V4L2_COLORSPACE_SRGB,
94 .priv = 1}, 95 .priv = 1},
95 {640, 480, V4L2_PIX_FMT_YUV420, V4L2_FIELD_NONE, 96 {640, 480, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
96 .bytesperline = 640, 97 .bytesperline = 640,
97 .sizeimage = 640 * 480 * 2, 98 .sizeimage = 640 * 480 * 2,
98 .colorspace = V4L2_COLORSPACE_SRGB, 99 .colorspace = V4L2_COLORSPACE_SRGB,
@@ -1463,6 +1464,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
1463 sd->qindex = 7; 1464 sd->qindex = 7;
1464 sd->autogain = AUTOGAIN_DEF; 1465 sd->autogain = AUTOGAIN_DEF;
1465 sd->lightfreq = FREQ_DEF; 1466 sd->lightfreq = FREQ_DEF;
1467 if (sd->sensor != SENSOR_OV7670)
1468 gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX);
1466 1469
1467 if (sd->bridge == BRIDGE_VC0321) { 1470 if (sd->bridge == BRIDGE_VC0321) {
1468 reg_r(gspca_dev, 0x8a, 0, 3); 1471 reg_r(gspca_dev, 0x8a, 0, 3);
@@ -1474,8 +1477,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
1474 return 0; 1477 return 0;
1475} 1478}
1476 1479
1477/* this function is called at open time */ 1480/* this function is called at probe and time */
1478static int sd_open(struct gspca_dev *gspca_dev) 1481static int sd_init(struct gspca_dev *gspca_dev)
1479{ 1482{
1480 return 0; 1483 return 0;
1481} 1484}
@@ -1637,19 +1640,6 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
1637 reg_w(dev, 0x89, 0xffff, 0xffff); 1640 reg_w(dev, 0x89, 0xffff, 0xffff);
1638} 1641}
1639 1642
1640/* this function is called at close time */
1641static void sd_close(struct gspca_dev *gspca_dev)
1642{
1643/* struct usb_device *dev = gspca_dev->dev;
1644 __u8 buffread;
1645
1646 reg_w(dev, 0x89, 0xffff, 0xffff);
1647 reg_w(dev, 0xa0, 0x01, 0xb301);
1648 reg_w(dev, 0xa0, 0x09, 0xb303);
1649 reg_w(dev, 0x89, 0xffff, 0xffff);
1650*/
1651}
1652
1653static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1643static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1654 struct gspca_frame *frame, /* target */ 1644 struct gspca_frame *frame, /* target */
1655 __u8 *data, /* isoc packet */ 1645 __u8 *data, /* isoc packet */
@@ -1738,11 +1728,10 @@ static const struct sd_desc sd_desc = {
1738 .ctrls = sd_ctrls, 1728 .ctrls = sd_ctrls,
1739 .nctrls = ARRAY_SIZE(sd_ctrls), 1729 .nctrls = ARRAY_SIZE(sd_ctrls),
1740 .config = sd_config, 1730 .config = sd_config,
1741 .open = sd_open, 1731 .init = sd_init,
1742 .start = sd_start, 1732 .start = sd_start,
1743 .stopN = sd_stopN, 1733 .stopN = sd_stopN,
1744 .stop0 = sd_stop0, 1734 .stop0 = sd_stop0,
1745 .close = sd_close,
1746 .pkt_scan = sd_pkt_scan, 1735 .pkt_scan = sd_pkt_scan,
1747 .querymenu = sd_querymenu, 1736 .querymenu = sd_querymenu,
1748}; 1737};
@@ -1774,6 +1763,10 @@ static struct usb_driver sd_driver = {
1774 .id_table = device_table, 1763 .id_table = device_table,
1775 .probe = sd_probe, 1764 .probe = sd_probe,
1776 .disconnect = gspca_disconnect, 1765 .disconnect = gspca_disconnect,
1766#ifdef CONFIG_PM
1767 .suspend = gspca_suspend,
1768 .resume = gspca_resume,
1769#endif
1777}; 1770};
1778 1771
1779/* -- module insert / remove -- */ 1772/* -- module insert / remove -- */
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index bc7d0eedcd81..8d7c27e6ac77 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -85,6 +85,7 @@ static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
85static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); 85static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
86 86
87static struct ctrl sd_ctrls[] = { 87static struct ctrl sd_ctrls[] = {
88#define BRIGHTNESS_IDX 0
88#define SD_BRIGHTNESS 0 89#define SD_BRIGHTNESS 0
89 { 90 {
90 { 91 {
@@ -141,6 +142,7 @@ static struct ctrl sd_ctrls[] = {
141 .set = sd_setautogain, 142 .set = sd_setautogain,
142 .get = sd_getautogain, 143 .get = sd_getautogain,
143 }, 144 },
145#define LIGHTFREQ_IDX 4
144#define SD_FREQ 4 146#define SD_FREQ 4
145 { 147 {
146 { 148 {
@@ -6964,8 +6966,13 @@ static int zcxx_probeSensor(struct gspca_dev *gspca_dev)
6964 case SENSOR_MC501CB: 6966 case SENSOR_MC501CB:
6965 return -1; /* don't probe */ 6967 return -1; /* don't probe */
6966 case SENSOR_TAS5130C_VF0250: 6968 case SENSOR_TAS5130C_VF0250:
6967 /* may probe but with write in reg 0x0010 */ 6969 /* may probe but with no write in reg 0x0010 */
6968 return -1; /* don't probe */ 6970 return -1; /* don't probe */
6971 case SENSOR_PAS106:
6972 sensor = sif_probe(gspca_dev);
6973 if (sensor >= 0)
6974 return sensor;
6975 break;
6969 } 6976 }
6970 sensor = vga_2wr_probe(gspca_dev); 6977 sensor = vga_2wr_probe(gspca_dev);
6971 if (sensor >= 0) { 6978 if (sensor >= 0) {
@@ -6974,12 +6981,10 @@ static int zcxx_probeSensor(struct gspca_dev *gspca_dev)
6974 /* next probe is needed for OmniVision ? */ 6981 /* next probe is needed for OmniVision ? */
6975 } 6982 }
6976 sensor2 = vga_3wr_probe(gspca_dev); 6983 sensor2 = vga_3wr_probe(gspca_dev);
6977 if (sensor2 >= 0) { 6984 if (sensor2 >= 0
6978 if (sensor >= 0) 6985 && sensor >= 0)
6979 return sensor; 6986 return sensor;
6980 return sensor2; 6987 return sensor2;
6981 }
6982 return sif_probe(gspca_dev);
6983} 6988}
6984 6989
6985/* this function is called at probe time */ 6990/* this function is called at probe time */
@@ -7147,13 +7152,27 @@ static int sd_config(struct gspca_dev *gspca_dev,
7147 sd->lightfreq = sd_ctrls[SD_FREQ].qctrl.default_value; 7152 sd->lightfreq = sd_ctrls[SD_FREQ].qctrl.default_value;
7148 sd->sharpness = sd_ctrls[SD_SHARPNESS].qctrl.default_value; 7153 sd->sharpness = sd_ctrls[SD_SHARPNESS].qctrl.default_value;
7149 7154
7155 switch (sd->sensor) {
7156 case SENSOR_GC0305:
7157 case SENSOR_OV7620:
7158 case SENSOR_PO2030:
7159 gspca_dev->ctrl_dis = (1 << BRIGHTNESS_IDX);
7160 break;
7161 case SENSOR_HDCS2020:
7162 case SENSOR_HV7131B:
7163 case SENSOR_HV7131C:
7164 case SENSOR_OV7630C:
7165 gspca_dev->ctrl_dis = (1 << LIGHTFREQ_IDX);
7166 break;
7167 }
7168
7150 /* switch the led off */ 7169 /* switch the led off */
7151 reg_w(gspca_dev->dev, 0x01, 0x0000); 7170 reg_w(gspca_dev->dev, 0x01, 0x0000);
7152 return 0; 7171 return 0;
7153} 7172}
7154 7173
7155/* this function is called at open time */ 7174/* this function is called at probe and resume time */
7156static int sd_open(struct gspca_dev *gspca_dev) 7175static int sd_init(struct gspca_dev *gspca_dev)
7157{ 7176{
7158 reg_w(gspca_dev->dev, 0x01, 0x0000); 7177 reg_w(gspca_dev->dev, 0x01, 0x0000);
7159 return 0; 7178 return 0;
@@ -7314,10 +7333,6 @@ static void sd_start(struct gspca_dev *gspca_dev)
7314 } 7333 }
7315} 7334}
7316 7335
7317static void sd_stopN(struct gspca_dev *gspca_dev)
7318{
7319}
7320
7321static void sd_stop0(struct gspca_dev *gspca_dev) 7336static void sd_stop0(struct gspca_dev *gspca_dev)
7322{ 7337{
7323 struct sd *sd = (struct sd *) gspca_dev; 7338 struct sd *sd = (struct sd *) gspca_dev;
@@ -7325,11 +7340,6 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
7325 send_unknown(gspca_dev->dev, sd->sensor); 7340 send_unknown(gspca_dev->dev, sd->sensor);
7326} 7341}
7327 7342
7328/* this function is called at close time */
7329static void sd_close(struct gspca_dev *gspca_dev)
7330{
7331}
7332
7333static void sd_pkt_scan(struct gspca_dev *gspca_dev, 7343static void sd_pkt_scan(struct gspca_dev *gspca_dev,
7334 struct gspca_frame *frame, 7344 struct gspca_frame *frame,
7335 __u8 *data, 7345 __u8 *data,
@@ -7489,37 +7499,30 @@ static const struct sd_desc sd_desc = {
7489 .ctrls = sd_ctrls, 7499 .ctrls = sd_ctrls,
7490 .nctrls = sizeof sd_ctrls / sizeof sd_ctrls[0], 7500 .nctrls = sizeof sd_ctrls / sizeof sd_ctrls[0],
7491 .config = sd_config, 7501 .config = sd_config,
7492 .open = sd_open, 7502 .init = sd_init,
7493 .start = sd_start, 7503 .start = sd_start,
7494 .stopN = sd_stopN,
7495 .stop0 = sd_stop0, 7504 .stop0 = sd_stop0,
7496 .close = sd_close,
7497 .pkt_scan = sd_pkt_scan, 7505 .pkt_scan = sd_pkt_scan,
7498 .querymenu = sd_querymenu, 7506 .querymenu = sd_querymenu,
7499}; 7507};
7500 7508
7501static const __devinitdata struct usb_device_id device_table[] = { 7509static const __devinitdata struct usb_device_id device_table[] = {
7502 {USB_DEVICE(0x041e, 0x041e)}, 7510 {USB_DEVICE(0x041e, 0x041e)},
7503#ifndef CONFIG_USB_ZC0301
7504 {USB_DEVICE(0x041e, 0x4017)}, 7511 {USB_DEVICE(0x041e, 0x4017)},
7505 {USB_DEVICE(0x041e, 0x401c)}, 7512 {USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
7506 {USB_DEVICE(0x041e, 0x401e)}, 7513 {USB_DEVICE(0x041e, 0x401e)},
7507 {USB_DEVICE(0x041e, 0x401f)}, 7514 {USB_DEVICE(0x041e, 0x401f)},
7508#endif 7515 {USB_DEVICE(0x041e, 0x4022)},
7509 {USB_DEVICE(0x041e, 0x4029)}, 7516 {USB_DEVICE(0x041e, 0x4029)},
7510#ifndef CONFIG_USB_ZC0301 7517 {USB_DEVICE(0x041e, 0x4034), .driver_info = SENSOR_PAS106},
7511 {USB_DEVICE(0x041e, 0x4034)}, 7518 {USB_DEVICE(0x041e, 0x4035), .driver_info = SENSOR_PAS106},
7512 {USB_DEVICE(0x041e, 0x4035)},
7513 {USB_DEVICE(0x041e, 0x4036)}, 7519 {USB_DEVICE(0x041e, 0x4036)},
7514 {USB_DEVICE(0x041e, 0x403a)}, 7520 {USB_DEVICE(0x041e, 0x403a)},
7515#endif
7516 {USB_DEVICE(0x041e, 0x4051), .driver_info = SENSOR_TAS5130C_VF0250}, 7521 {USB_DEVICE(0x041e, 0x4051), .driver_info = SENSOR_TAS5130C_VF0250},
7517 {USB_DEVICE(0x041e, 0x4053), .driver_info = SENSOR_TAS5130C_VF0250}, 7522 {USB_DEVICE(0x041e, 0x4053), .driver_info = SENSOR_TAS5130C_VF0250},
7518#ifndef CONFIG_USB_ZC0301
7519 {USB_DEVICE(0x0458, 0x7007)}, 7523 {USB_DEVICE(0x0458, 0x7007)},
7520 {USB_DEVICE(0x0458, 0x700c)}, 7524 {USB_DEVICE(0x0458, 0x700c)},
7521 {USB_DEVICE(0x0458, 0x700f)}, 7525 {USB_DEVICE(0x0458, 0x700f)},
7522#endif
7523 {USB_DEVICE(0x0461, 0x0a00)}, 7526 {USB_DEVICE(0x0461, 0x0a00)},
7524 {USB_DEVICE(0x046d, 0x08a0)}, 7527 {USB_DEVICE(0x046d, 0x08a0)},
7525 {USB_DEVICE(0x046d, 0x08a1)}, 7528 {USB_DEVICE(0x046d, 0x08a1)},
@@ -7531,7 +7534,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
7531 {USB_DEVICE(0x046d, 0x08aa)}, 7534 {USB_DEVICE(0x046d, 0x08aa)},
7532 {USB_DEVICE(0x046d, 0x08ac)}, 7535 {USB_DEVICE(0x046d, 0x08ac)},
7533 {USB_DEVICE(0x046d, 0x08ad)}, 7536 {USB_DEVICE(0x046d, 0x08ad)},
7534#ifndef CONFIG_USB_ZC0301 7537#if !defined CONFIG_USB_ZC0301 && !defined CONFIG_USB_ZC0301_MODULE
7535 {USB_DEVICE(0x046d, 0x08ae)}, 7538 {USB_DEVICE(0x046d, 0x08ae)},
7536#endif 7539#endif
7537 {USB_DEVICE(0x046d, 0x08af)}, 7540 {USB_DEVICE(0x046d, 0x08af)},
@@ -7541,27 +7544,25 @@ static const __devinitdata struct usb_device_id device_table[] = {
7541 {USB_DEVICE(0x046d, 0x08d8)}, 7544 {USB_DEVICE(0x046d, 0x08d8)},
7542 {USB_DEVICE(0x046d, 0x08da)}, 7545 {USB_DEVICE(0x046d, 0x08da)},
7543 {USB_DEVICE(0x046d, 0x08dd), .driver_info = SENSOR_MC501CB}, 7546 {USB_DEVICE(0x046d, 0x08dd), .driver_info = SENSOR_MC501CB},
7544 {USB_DEVICE(0x0471, 0x0325)}, 7547 {USB_DEVICE(0x0471, 0x0325), .driver_info = SENSOR_PAS106},
7545 {USB_DEVICE(0x0471, 0x0326)}, 7548 {USB_DEVICE(0x0471, 0x0326), .driver_info = SENSOR_PAS106},
7546 {USB_DEVICE(0x0471, 0x032d)}, 7549 {USB_DEVICE(0x0471, 0x032d), .driver_info = SENSOR_PAS106},
7547 {USB_DEVICE(0x0471, 0x032e)}, 7550 {USB_DEVICE(0x0471, 0x032e), .driver_info = SENSOR_PAS106},
7548 {USB_DEVICE(0x055f, 0xc005)}, 7551 {USB_DEVICE(0x055f, 0xc005)},
7549#ifndef CONFIG_USB_ZC0301
7550 {USB_DEVICE(0x055f, 0xd003)}, 7552 {USB_DEVICE(0x055f, 0xd003)},
7551 {USB_DEVICE(0x055f, 0xd004)}, 7553 {USB_DEVICE(0x055f, 0xd004)},
7552#endif
7553 {USB_DEVICE(0x0698, 0x2003)}, 7554 {USB_DEVICE(0x0698, 0x2003)},
7555 {USB_DEVICE(0x0ac8, 0x0301), .driver_info = SENSOR_PAS106},
7554 {USB_DEVICE(0x0ac8, 0x0302)}, 7556 {USB_DEVICE(0x0ac8, 0x0302)},
7555#ifndef CONFIG_USB_ZC0301
7556 {USB_DEVICE(0x0ac8, 0x301b)}, 7557 {USB_DEVICE(0x0ac8, 0x301b)},
7558#if !defined CONFIG_USB_ZC0301 && !defined CONFIG_USB_ZC0301_MODULE
7557 {USB_DEVICE(0x0ac8, 0x303b)}, 7559 {USB_DEVICE(0x0ac8, 0x303b)},
7558#endif 7560#endif
7559 {USB_DEVICE(0x0ac8, 0x305b), .driver_info = SENSOR_TAS5130C_VF0250}, 7561 {USB_DEVICE(0x0ac8, 0x305b), .driver_info = SENSOR_TAS5130C_VF0250},
7560#ifndef CONFIG_USB_ZC0301
7561 {USB_DEVICE(0x0ac8, 0x307b)}, 7562 {USB_DEVICE(0x0ac8, 0x307b)},
7562 {USB_DEVICE(0x10fd, 0x0128)}, 7563 {USB_DEVICE(0x10fd, 0x0128)},
7564 {USB_DEVICE(0x10fd, 0x804d)},
7563 {USB_DEVICE(0x10fd, 0x8050)}, 7565 {USB_DEVICE(0x10fd, 0x8050)},
7564#endif
7565 {} /* end of entry */ 7566 {} /* end of entry */
7566}; 7567};
7567#undef DVNAME 7568#undef DVNAME
@@ -7581,6 +7582,10 @@ static struct usb_driver sd_driver = {
7581 .id_table = device_table, 7582 .id_table = device_table,
7582 .probe = sd_probe, 7583 .probe = sd_probe,
7583 .disconnect = gspca_disconnect, 7584 .disconnect = gspca_disconnect,
7585#ifdef CONFIG_PM
7586 .suspend = gspca_suspend,
7587 .resume = gspca_resume,
7588#endif
7584}; 7589};
7585 7590
7586static int __init sd_mod_init(void) 7591static int __init sd_mod_init(void)
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index aea1664948ce..4afc7ea07e86 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -688,7 +688,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
688 spin_lock_init(&itv->lock); 688 spin_lock_init(&itv->lock);
689 spin_lock_init(&itv->dma_reg_lock); 689 spin_lock_init(&itv->dma_reg_lock);
690 690
691 itv->irq_work_queues = create_workqueue(itv->name); 691 itv->irq_work_queues = create_singlethread_workqueue(itv->name);
692 if (itv->irq_work_queues == NULL) { 692 if (itv->irq_work_queues == NULL) {
693 IVTV_ERR("Could not create ivtv workqueue\n"); 693 IVTV_ERR("Could not create ivtv workqueue\n");
694 return -1; 694 return -1;
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index ab287b48fc2b..2ceb5227637c 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -251,6 +251,7 @@ struct ivtv_mailbox_data {
251#define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */ 251#define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */
252#define IVTV_F_I_INITED 21 /* set after first open */ 252#define IVTV_F_I_INITED 21 /* set after first open */
253#define IVTV_F_I_FAILED 22 /* set if first open failed */ 253#define IVTV_F_I_FAILED 22 /* set if first open failed */
254#define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */
254 255
255/* Event notifications */ 256/* Event notifications */
256#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ 257#define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index fba150a6cd23..34f3ab827858 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -76,6 +76,13 @@ void ivtv_irq_work_handler(struct work_struct *work)
76 76
77 DEFINE_WAIT(wait); 77 DEFINE_WAIT(wait);
78 78
79 if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) {
80 struct sched_param param = { .sched_priority = 99 };
81
82 /* This thread must use the FIFO scheduler as it
83 is realtime sensitive. */
84 sched_setscheduler(current, SCHED_FIFO, &param);
85 }
79 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) 86 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
80 ivtv_pio_work_handler(itv); 87 ivtv_pio_work_handler(itv);
81 88
@@ -678,34 +685,14 @@ static void ivtv_irq_enc_start_cap(struct ivtv *itv)
678 685
679static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) 686static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
680{ 687{
681 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
682 u32 data[CX2341X_MBOX_MAX_DATA]; 688 u32 data[CX2341X_MBOX_MAX_DATA];
683 struct ivtv_stream *s; 689 struct ivtv_stream *s;
684 690
685 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n"); 691 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
686 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 692 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
687 693
688 /* If more than two VBI buffers are pending, then 694 if (!stream_enc_dma_append(s, data))
689 clear the old ones and start with this new one.
690 This can happen during transition stages when MPEG capturing is
691 started, but the first interrupts haven't arrived yet. During
692 that period VBI requests can accumulate without being able to
693 DMA the data. Since at most four VBI DMA buffers are available,
694 we just drop the old requests when there are already three
695 requests queued. */
696 if (s->sg_pending_size > 2) {
697 struct ivtv_buffer *buf;
698 list_for_each_entry(buf, &s->q_predma.list, list)
699 ivtv_buf_sync_for_cpu(s, buf);
700 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
701 s->sg_pending_size = 0;
702 }
703 /* if we can append the data, and the MPEG stream isn't capturing,
704 then start a DMA request for just the VBI data. */
705 if (!stream_enc_dma_append(s, data) &&
706 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
707 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); 695 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
708 }
709} 696}
710 697
711static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv) 698static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
diff --git a/drivers/media/video/ivtv/ivtv-queue.h b/drivers/media/video/ivtv/ivtv-queue.h
index 7cfc0c9ab050..476556afd39a 100644
--- a/drivers/media/video/ivtv/ivtv-queue.h
+++ b/drivers/media/video/ivtv/ivtv-queue.h
@@ -23,7 +23,7 @@
23#define IVTV_QUEUE_H 23#define IVTV_QUEUE_H
24 24
25#define IVTV_DMA_UNMAPPED ((u32) -1) 25#define IVTV_DMA_UNMAPPED ((u32) -1)
26#define SLICED_VBI_PIO 1 26#define SLICED_VBI_PIO 0
27 27
28/* ivtv_buffer utility functions */ 28/* ivtv_buffer utility functions */
29 29
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 54d2023b26c4..730e85d86fc8 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -363,7 +363,7 @@ static void ivtv_vbi_setup(struct ivtv *itv)
363 /* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */ 363 /* Every X number of frames a VBI interrupt arrives (frames as in 25 or 30 fps) */
364 data[1] = 1; 364 data[1] = 1;
365 /* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */ 365 /* The VBI frames are stored in a ringbuffer with this size (with a VBI frame as unit) */
366 data[2] = raw ? 4 : 8; 366 data[2] = raw ? 4 : 4 * (itv->vbi.raw_size / itv->vbi.enc_size);
367 /* The start/stop codes determine which VBI lines end up in the raw VBI data area. 367 /* The start/stop codes determine which VBI lines end up in the raw VBI data area.
368 The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line 368 The codes are from table 24 in the saa7115 datasheet. Each raw/sliced/video line
369 is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video) 369 is framed with codes FF0000XX where XX is the SAV/EAV (Start/End of Active Video)
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index 71798f0da27f..1ce9deb1104f 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -293,6 +293,7 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
293 u32 line_size = itv->vbi.sliced_decoder_line_size; 293 u32 line_size = itv->vbi.sliced_decoder_line_size;
294 struct v4l2_decode_vbi_line vbi; 294 struct v4l2_decode_vbi_line vbi;
295 int i; 295 int i;
296 unsigned lines = 0;
296 297
297 /* find the first valid line */ 298 /* find the first valid line */
298 for (i = 0; i < size; i++, buf++) { 299 for (i = 0; i < size; i++, buf++) {
@@ -313,7 +314,8 @@ static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8
313 } 314 }
314 vbi.p = p + 4; 315 vbi.p = p + 4;
315 itv->video_dec_func(itv, VIDIOC_INT_DECODE_VBI_LINE, &vbi); 316 itv->video_dec_func(itv, VIDIOC_INT_DECODE_VBI_LINE, &vbi);
316 if (vbi.type) { 317 if (vbi.type && !(lines & (1 << vbi.line))) {
318 lines |= 1 << vbi.line;
317 itv->vbi.sliced_data[line].id = vbi.type; 319 itv->vbi.sliced_data[line].id = vbi.type;
318 itv->vbi.sliced_data[line].field = vbi.is_second_field; 320 itv->vbi.sliced_data[line].field = vbi.is_second_field;
319 itv->vbi.sliced_data[line].line = vbi.line; 321 itv->vbi.sliced_data[line].line = vbi.line;
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
index 442f43f11b73..8cd753d30bf7 100644
--- a/drivers/media/video/ivtv/ivtv-version.h
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -22,7 +22,7 @@
22 22
23#define IVTV_DRIVER_NAME "ivtv" 23#define IVTV_DRIVER_NAME "ivtv"
24#define IVTV_DRIVER_VERSION_MAJOR 1 24#define IVTV_DRIVER_VERSION_MAJOR 1
25#define IVTV_DRIVER_VERSION_MINOR 3 25#define IVTV_DRIVER_VERSION_MINOR 4
26#define IVTV_DRIVER_VERSION_PATCHLEVEL 0 26#define IVTV_DRIVER_VERSION_PATCHLEVEL 0
27 27
28#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL) 28#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c
index 4895540be195..2fd4b4a44aa9 100644
--- a/drivers/media/video/ks0127.c
+++ b/drivers/media/video/ks0127.c
@@ -679,26 +679,27 @@ static int ks0127_command(struct i2c_client *client,
679 679
680 case DECODER_ENABLE_OUTPUT: 680 case DECODER_ENABLE_OUTPUT:
681 { 681 {
682 int enable;
682 683
683 int *iarg = arg; 684 iarg = arg;
684 int enable = (*iarg != 0); 685 enable = (*iarg != 0);
685 if (enable) { 686 if (enable) {
686 dprintk("ks0127: command " 687 dprintk("ks0127: command "
687 "DECODER_ENABLE_OUTPUT on " 688 "DECODER_ENABLE_OUTPUT on "
688 "(%d)\n", enable); 689 "(%d)\n", enable);
689 /* All output pins on */ 690 /* All output pins on */
690 ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x30); 691 ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x30);
691 /* Obey the OEN pin */ 692 /* Obey the OEN pin */
692 ks0127_and_or(ks, KS_CDEM, 0x7f, 0x00); 693 ks0127_and_or(ks, KS_CDEM, 0x7f, 0x00);
693 } else { 694 } else {
694 dprintk("ks0127: command " 695 dprintk("ks0127: command "
695 "DECODER_ENABLE_OUTPUT off " 696 "DECODER_ENABLE_OUTPUT off "
696 "(%d)\n", enable); 697 "(%d)\n", enable);
697 /* Video output pins off */ 698 /* Video output pins off */
698 ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x00); 699 ks0127_and_or(ks, KS_OFMTA, 0xcf, 0x00);
699 /* Ignore the OEN pin */ 700 /* Ignore the OEN pin */
700 ks0127_and_or(ks, KS_CDEM, 0x7f, 0x80); 701 ks0127_and_or(ks, KS_CDEM, 0x7f, 0x80);
701 } 702 }
702 } 703 }
703 break; 704 break;
704 705
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 7c8ef6ac6c39..a9ef7802eb5f 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1806,6 +1806,7 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
1806 memcpy(meye.video_dev, &meye_template, sizeof(meye_template)); 1806 memcpy(meye.video_dev, &meye_template, sizeof(meye_template));
1807 meye.video_dev->parent = &meye.mchip_dev->dev; 1807 meye.video_dev->parent = &meye.mchip_dev->dev;
1808 1808
1809 ret = -EIO;
1809 if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) { 1810 if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
1810 printk(KERN_ERR "meye: unable to power on the camera\n"); 1811 printk(KERN_ERR "meye: unable to power on the camera\n");
1811 printk(KERN_ERR "meye: did you enable the camera in " 1812 printk(KERN_ERR "meye: did you enable the camera in "
@@ -1813,7 +1814,6 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
1813 goto outsonypienable; 1814 goto outsonypienable;
1814 } 1815 }
1815 1816
1816 ret = -EIO;
1817 if ((ret = pci_enable_device(meye.mchip_dev))) { 1817 if ((ret = pci_enable_device(meye.mchip_dev))) {
1818 printk(KERN_ERR "meye: pci_enable_device failed\n"); 1818 printk(KERN_ERR "meye: pci_enable_device failed\n");
1819 goto outenabledev; 1819 goto outenabledev;
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index f68e91fbe7fb..8ef578caba3b 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -931,27 +931,29 @@ static int mxb_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
931 return 0; 931 return 0;
932} 932}
933 933
934static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std) 934static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standard)
935{ 935{
936 struct mxb* mxb = (struct mxb*)dev->ext_priv; 936 struct mxb *mxb = (struct mxb *)dev->ext_priv;
937 int zero = 0; 937 int zero = 0;
938 int one = 1; 938 int one = 1;
939 939
940 if(V4L2_STD_PAL_I == std->id ) { 940 if (V4L2_STD_PAL_I == standard->id) {
941 v4l2_std_id std = V4L2_STD_PAL_I; 941 v4l2_std_id std = V4L2_STD_PAL_I;
942
942 DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n")); 943 DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n"));
943 /* set the 7146 gpio register -- I don't know what this does exactly */ 944 /* set the 7146 gpio register -- I don't know what this does exactly */
944 saa7146_write(dev, GPIO_CTRL, 0x00404050); 945 saa7146_write(dev, GPIO_CTRL, 0x00404050);
945 /* unset the 7111 gpio register -- I don't know what this does exactly */ 946 /* unset the 7111 gpio register -- I don't know what this does exactly */
946 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero); 947 mxb->saa7111a->driver->command(mxb->saa7111a, DECODER_SET_GPIO, &zero);
947 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); 948 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
948 } else { 949 } else {
949 v4l2_std_id std = V4L2_STD_PAL_BG; 950 v4l2_std_id std = V4L2_STD_PAL_BG;
951
950 DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n")); 952 DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n"));
951 /* set the 7146 gpio register -- I don't know what this does exactly */ 953 /* set the 7146 gpio register -- I don't know what this does exactly */
952 saa7146_write(dev, GPIO_CTRL, 0x00404050); 954 saa7146_write(dev, GPIO_CTRL, 0x00404050);
953 /* set the 7111 gpio register -- I don't know what this does exactly */ 955 /* set the 7111 gpio register -- I don't know what this does exactly */
954 mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one); 956 mxb->saa7111a->driver->command(mxb->saa7111a, DECODER_SET_GPIO, &one);
955 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std); 957 mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
956 } 958 }
957 return 0; 959 return 0;
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 9edaca4371d7..3d3c48db45d9 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -626,9 +626,9 @@ ov511_i2c_write_internal(struct usb_ov511 *ov,
626 break; 626 break;
627 627
628 /* Retry until idle */ 628 /* Retry until idle */
629 do 629 do {
630 rc = reg_r(ov, R511_I2C_CTL); 630 rc = reg_r(ov, R511_I2C_CTL);
631 while (rc > 0 && ((rc&1) == 0)); 631 } while (rc > 0 && ((rc&1) == 0));
632 if (rc < 0) 632 if (rc < 0)
633 break; 633 break;
634 634
@@ -703,9 +703,9 @@ ov511_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg)
703 return rc; 703 return rc;
704 704
705 /* Retry until idle */ 705 /* Retry until idle */
706 do 706 do {
707 rc = reg_r(ov, R511_I2C_CTL); 707 rc = reg_r(ov, R511_I2C_CTL);
708 while (rc > 0 && ((rc&1) == 0)); 708 } while (rc > 0 && ((rc & 1) == 0));
709 if (rc < 0) 709 if (rc < 0)
710 return rc; 710 return rc;
711 711
@@ -729,9 +729,9 @@ ov511_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg)
729 return rc; 729 return rc;
730 730
731 /* Retry until idle */ 731 /* Retry until idle */
732 do 732 do {
733 rc = reg_r(ov, R511_I2C_CTL); 733 rc = reg_r(ov, R511_I2C_CTL);
734 while (rc > 0 && ((rc&1) == 0)); 734 } while (rc > 0 && ((rc&1) == 0));
735 if (rc < 0) 735 if (rc < 0)
736 return rc; 736 return rc;
737 737
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 00425d743656..7c84f795db54 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -1019,10 +1019,23 @@ static int init_mediavision(void)
1019 * Initialization and module stuff 1019 * Initialization and module stuff
1020 */ 1020 */
1021 1021
1022#ifndef MODULE
1023static int enable;
1024module_param(enable, int, 0);
1025#endif
1026
1022static int __init init_pms_cards(void) 1027static int __init init_pms_cards(void)
1023{ 1028{
1024 printk(KERN_INFO "Mediavision Pro Movie Studio driver 0.02\n"); 1029 printk(KERN_INFO "Mediavision Pro Movie Studio driver 0.02\n");
1025 1030
1031#ifndef MODULE
1032 if (!enable) {
1033 printk(KERN_INFO "PMS: not enabled, use pms.enable=1 to "
1034 "probe\n");
1035 return -ENODEV;
1036 }
1037#endif
1038
1026 data_port = io_port +1; 1039 data_port = io_port +1;
1027 1040
1028 if(init_mediavision()) 1041 if(init_mediavision())
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 1cccd5c77048..dbc560742553 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -1635,15 +1635,15 @@ int pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
1635 1635
1636 case VIDIOCPWCGVIDCMD: 1636 case VIDIOCPWCGVIDCMD:
1637 { 1637 {
1638 ARG_DEF(struct pwc_video_command, cmd); 1638 ARG_DEF(struct pwc_video_command, vcmd);
1639 1639
1640 ARGR(cmd).type = pdev->type; 1640 ARGR(vcmd).type = pdev->type;
1641 ARGR(cmd).release = pdev->release; 1641 ARGR(vcmd).release = pdev->release;
1642 ARGR(cmd).command_len = pdev->cmd_len; 1642 ARGR(vcmd).command_len = pdev->cmd_len;
1643 memcpy(&ARGR(cmd).command_buf, pdev->cmd_buf, pdev->cmd_len); 1643 memcpy(&ARGR(vcmd).command_buf, pdev->cmd_buf, pdev->cmd_len);
1644 ARGR(cmd).bandlength = pdev->vbandlength; 1644 ARGR(vcmd).bandlength = pdev->vbandlength;
1645 ARGR(cmd).frame_size = pdev->frame_size; 1645 ARGR(vcmd).frame_size = pdev->frame_size;
1646 ARG_OUT(cmd) 1646 ARG_OUT(vcmd)
1647 break; 1647 break;
1648 } 1648 }
1649 /* 1649 /*
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index bcd1c8f6cf6b..ad733caec720 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1489,10 +1489,9 @@ static int saa7115_probe(struct i2c_client *client,
1489 client->addr << 1, client->adapter->name); 1489 client->addr << 1, client->adapter->name);
1490 1490
1491 state = kzalloc(sizeof(struct saa711x_state), GFP_KERNEL); 1491 state = kzalloc(sizeof(struct saa711x_state), GFP_KERNEL);
1492 i2c_set_clientdata(client, state); 1492 if (state == NULL)
1493 if (state == NULL) {
1494 return -ENOMEM; 1493 return -ENOMEM;
1495 } 1494 i2c_set_clientdata(client, state);
1496 state->input = -1; 1495 state->input = -1;
1497 state->output = SAA7115_IPORT_ON; 1496 state->output = SAA7115_IPORT_ON;
1498 state->enable = 1; 1497 state->enable = 1;
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index f481277892da..acceed5d04ae 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1397,7 +1397,7 @@ static int se401_probe(struct usb_interface *intf,
1397 mutex_init(&se401->lock); 1397 mutex_init(&se401->lock);
1398 wmb(); 1398 wmb();
1399 1399
1400 if (video_register_device(&se401->vdev, VFL_TYPE_GRABBER, video_nr) == -1) { 1400 if (video_register_device(&se401->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
1401 kfree(se401); 1401 kfree(se401);
1402 err("video_register_device failed"); 1402 err("video_register_device failed");
1403 return -EIO; 1403 return -EIO;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 23408764d0ef..2da6938718f2 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -3312,6 +3312,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3312 cam->v4ldev->fops = &sn9c102_fops; 3312 cam->v4ldev->fops = &sn9c102_fops;
3313 cam->v4ldev->minor = video_nr[dev_nr]; 3313 cam->v4ldev->minor = video_nr[dev_nr];
3314 cam->v4ldev->release = video_device_release; 3314 cam->v4ldev->release = video_device_release;
3315 cam->v4ldev->parent = &udev->dev;
3315 3316
3316 init_completion(&cam->probe); 3317 init_completion(&cam->probe);
3317 3318
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 6ff489baacf3..90a401dc3884 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -40,11 +40,14 @@ struct sn9c102_device;
40 40
41static const struct usb_device_id sn9c102_id_table[] = { 41static const struct usb_device_id sn9c102_id_table[] = {
42 /* SN9C101 and SN9C102 */ 42 /* SN9C101 and SN9C102 */
43#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
43 { SN9C102_USB_DEVICE(0x0c45, 0x6001, BRIDGE_SN9C102), }, 44 { SN9C102_USB_DEVICE(0x0c45, 0x6001, BRIDGE_SN9C102), },
44 { SN9C102_USB_DEVICE(0x0c45, 0x6005, BRIDGE_SN9C102), }, 45 { SN9C102_USB_DEVICE(0x0c45, 0x6005, BRIDGE_SN9C102), },
46#endif
45 { SN9C102_USB_DEVICE(0x0c45, 0x6007, BRIDGE_SN9C102), }, 47 { SN9C102_USB_DEVICE(0x0c45, 0x6007, BRIDGE_SN9C102), },
46 { SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), }, 48 { SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
47 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 49 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
50/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
48 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 51 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
49 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
50 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
@@ -53,29 +56,33 @@ static const struct usb_device_id sn9c102_id_table[] = {
53 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 56 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
54 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, 57 { SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
55 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), }, 58 { SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
56 { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, 59/* { SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
57 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), }, 60 { SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
58 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, 61 { SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
59 /* SN9C103 */ 62 /* SN9C103 */
60 { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, 63 { SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), },
61 { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, 64 { SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), },
62 { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, 65/* { SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
63 { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, 66 { SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), },
64 { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, 67 { SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), },
65 { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, 68 { SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
66 { SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), }, 69 { SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
67 { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, 70/* { SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
71#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
68 { SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), }, 72 { SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
73#endif
69 { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, 74 { SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
70 { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, 75 { SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), },
71 { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, 76 { SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
72 { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, 77/* { SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
73 { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, 78/* { SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
74 { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, 79/* { SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */
75 { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, 80 { SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
76 { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, 81 { SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
77 { SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), }, 82 { SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
83#if !defined CONFIG_USB_GSPCA && !defined CONFIG_USB_GSPCA_MODULE
78 { SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), }, 84 { SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
85#endif
79 { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, 86 { SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
80 { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, 87 { SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
81 { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, 88 { SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
@@ -105,7 +112,7 @@ static const struct usb_device_id sn9c102_id_table[] = {
105 { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, 112 { SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
106 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, 113 { SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
107 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), }, 114 { SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
108 { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, 115/* { SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
109 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), }, 116 { SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
110 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), }, 117 { SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
111 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), }, 118 { SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 56dc3d6b5b29..dce947439459 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1462,7 +1462,7 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
1462 mutex_init (&stv680->lock); 1462 mutex_init (&stv680->lock);
1463 wmb (); 1463 wmb ();
1464 1464
1465 if (video_register_device (stv680->vdev, VFL_TYPE_GRABBER, video_nr) == -1) { 1465 if (video_register_device(stv680->vdev, VFL_TYPE_GRABBER, video_nr) < 0) {
1466 PDEBUG (0, "STV(e): video_register_device failed"); 1466 PDEBUG (0, "STV(e): video_register_device failed");
1467 retval = -EIO; 1467 retval = -EIO;
1468 goto error_vdev; 1468 goto error_vdev;
diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
index 59166b760104..cc27efe121dd 100644
--- a/drivers/media/video/usbvideo/ibmcam.c
+++ b/drivers/media/video/usbvideo/ibmcam.c
@@ -736,12 +736,12 @@ static enum ParseState ibmcam_model2_320x240_parse_lines(
736 * make black color and quit the horizontal scanning loop. 736 * make black color and quit the horizontal scanning loop.
737 */ 737 */
738 if (((frame->curline + 2) >= scanHeight) || (i >= scanLength)) { 738 if (((frame->curline + 2) >= scanHeight) || (i >= scanLength)) {
739 const int j = i * V4L_BYTES_PER_PIXEL; 739 const int offset = i * V4L_BYTES_PER_PIXEL;
740#if USES_IBMCAM_PUTPIXEL 740#if USES_IBMCAM_PUTPIXEL
741 /* Refresh 'f' because we don't use it much with PUTPIXEL */ 741 /* Refresh 'f' because we don't use it much with PUTPIXEL */
742 f = frame->data + (v4l_linesize * frame->curline) + j; 742 f = frame->data + (v4l_linesize * frame->curline) + offset;
743#endif 743#endif
744 memset(f, 0, v4l_linesize - j); 744 memset(f, 0, v4l_linesize - offset);
745 break; 745 break;
746 } 746 }
747 747
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index b7792451a299..2eb45829791c 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -866,7 +866,7 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
866 cam->udev = dev; 866 cam->udev = dev;
867 cam->bulkEndpoint = bulkEndpoint; 867 cam->bulkEndpoint = bulkEndpoint;
868 868
869 if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1) == -1) { 869 if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1) < 0) {
870 kfree(cam); 870 kfree(cam);
871 printk(KERN_WARNING "video_register_device failed\n"); 871 printk(KERN_WARNING "video_register_device failed\n");
872 return -EIO; 872 return -EIO;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 6f36006aecda..155fdec9ac7d 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -257,6 +257,9 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
257 int ret; 257 int ret;
258 char *name_base; 258 char *name_base;
259 259
260 if (vfd == NULL)
261 return -EINVAL;
262
260 switch (type) { 263 switch (type) {
261 case VFL_TYPE_GRABBER: 264 case VFL_TYPE_GRABBER:
262 base = MINOR_VFL_TYPE_GRABBER_MIN; 265 base = MINOR_VFL_TYPE_GRABBER_MIN;
@@ -281,7 +284,7 @@ int video_register_device_index(struct video_device *vfd, int type, int nr,
281 default: 284 default:
282 printk(KERN_ERR "%s called with unknown type: %d\n", 285 printk(KERN_ERR "%s called with unknown type: %d\n",
283 __func__, type); 286 __func__, type);
284 return -1; 287 return -EINVAL;
285 } 288 }
286 289
287 /* pick a minor number */ 290 /* pick a minor number */
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index fdfe7739c96e..140ef92c19c1 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -499,7 +499,7 @@ static void dbgbuf(unsigned int cmd, struct video_device *vfd,
499 p->timestamp.tv_sec / 3600, 499 p->timestamp.tv_sec / 3600,
500 (int)(p->timestamp.tv_sec / 60) % 60, 500 (int)(p->timestamp.tv_sec / 60) % 60,
501 (int)(p->timestamp.tv_sec % 60), 501 (int)(p->timestamp.tv_sec % 60),
502 p->timestamp.tv_usec, 502 (long)p->timestamp.tv_usec,
503 p->index, 503 p->index,
504 prt_names(p->type, v4l2_type_names), 504 prt_names(p->type, v4l2_type_names),
505 p->bytesused, p->flags, 505 p->bytesused, p->flags,
@@ -674,7 +674,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
674 __video_do_ioctl will be called again, with one or more 674 __video_do_ioctl will be called again, with one or more
675 V4L2 ioctls. 675 V4L2 ioctls.
676 ********************************************************/ 676 ********************************************************/
677 if (_IOC_TYPE(cmd) == 'v') 677 if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
678 return v4l_compat_translate_ioctl(inode, file, cmd, arg, 678 return v4l_compat_translate_ioctl(inode, file, cmd, arg,
679 __video_do_ioctl); 679 __video_do_ioctl);
680#endif 680#endif
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 3518af071a2e..8ba8daafd7ea 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1021,13 +1021,13 @@ static int vivi_release(void)
1021 dev = list_entry(list, struct vivi_dev, vivi_devlist); 1021 dev = list_entry(list, struct vivi_dev, vivi_devlist);
1022 1022
1023 if (-1 != dev->vfd->minor) { 1023 if (-1 != dev->vfd->minor) {
1024 video_unregister_device(dev->vfd); 1024 printk(KERN_INFO "%s: unregistering /dev/video%d\n",
1025 printk(KERN_INFO "%s: /dev/video%d unregistered.\n",
1026 VIVI_MODULE_NAME, dev->vfd->minor); 1025 VIVI_MODULE_NAME, dev->vfd->minor);
1026 video_unregister_device(dev->vfd);
1027 } else { 1027 } else {
1028 video_device_release(dev->vfd); 1028 printk(KERN_INFO "%s: releasing /dev/video%d\n",
1029 printk(KERN_INFO "%s: /dev/video%d released.\n",
1030 VIVI_MODULE_NAME, dev->vfd->minor); 1029 VIVI_MODULE_NAME, dev->vfd->minor);
1030 video_device_release(dev->vfd);
1031 } 1031 }
1032 1032
1033 kfree(dev); 1033 kfree(dev);
@@ -1104,19 +1104,29 @@ static struct video_device vivi_template = {
1104 Initialization and module stuff 1104 Initialization and module stuff
1105 ------------------------------------------------------------------*/ 1105 ------------------------------------------------------------------*/
1106 1106
1107/* This routine allocates from 1 to n_devs virtual drivers.
1108
1109 The real maximum number of virtual drivers will depend on how many drivers
1110 will succeed. This is limited to the maximum number of devices that
1111 videodev supports. Since there are 64 minors for video grabbers, this is
1112 currently the theoretical maximum limit. However, a further limit does
1113 exist at videodev that forbids any driver to register more than 32 video
1114 grabbers.
1115 */
1107static int __init vivi_init(void) 1116static int __init vivi_init(void)
1108{ 1117{
1109 int ret = -ENOMEM, i; 1118 int ret = -ENOMEM, i;
1110 struct vivi_dev *dev; 1119 struct vivi_dev *dev;
1111 struct video_device *vfd; 1120 struct video_device *vfd;
1112 1121
1122 if (n_devs <= 0)
1123 n_devs = 1;
1124
1113 for (i = 0; i < n_devs; i++) { 1125 for (i = 0; i < n_devs; i++) {
1114 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1126 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1115 if (NULL == dev) 1127 if (!dev)
1116 break; 1128 break;
1117 1129
1118 list_add_tail(&dev->vivi_devlist, &vivi_devlist);
1119
1120 /* init video dma queues */ 1130 /* init video dma queues */
1121 INIT_LIST_HEAD(&dev->vidq.active); 1131 INIT_LIST_HEAD(&dev->vidq.active);
1122 init_waitqueue_head(&dev->vidq.wq); 1132 init_waitqueue_head(&dev->vidq.wq);
@@ -1126,14 +1136,27 @@ static int __init vivi_init(void)
1126 mutex_init(&dev->mutex); 1136 mutex_init(&dev->mutex);
1127 1137
1128 vfd = video_device_alloc(); 1138 vfd = video_device_alloc();
1129 if (NULL == vfd) 1139 if (!vfd) {
1140 kfree(dev);
1130 break; 1141 break;
1142 }
1131 1143
1132 *vfd = vivi_template; 1144 *vfd = vivi_template;
1133 1145
1134 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); 1146 ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1135 if (ret < 0) 1147 if (ret < 0) {
1148 video_device_release(vfd);
1149 kfree(dev);
1150
1151 /* If some registers succeeded, keep driver */
1152 if (i)
1153 ret = 0;
1154
1136 break; 1155 break;
1156 }
1157
1158 /* Now that everything is fine, let's add it to device list */
1159 list_add_tail(&dev->vivi_devlist, &vivi_devlist);
1137 1160
1138 snprintf(vfd->name, sizeof(vfd->name), "%s (%i)", 1161 snprintf(vfd->name, sizeof(vfd->name), "%s (%i)",
1139 vivi_template.name, vfd->minor); 1162 vivi_template.name, vfd->minor);
@@ -1149,11 +1172,16 @@ static int __init vivi_init(void)
1149 if (ret < 0) { 1172 if (ret < 0) {
1150 vivi_release(); 1173 vivi_release();
1151 printk(KERN_INFO "Error %d while loading vivi driver\n", ret); 1174 printk(KERN_INFO "Error %d while loading vivi driver\n", ret);
1152 } else 1175 } else {
1153 printk(KERN_INFO "Video Technology Magazine Virtual Video " 1176 printk(KERN_INFO "Video Technology Magazine Virtual Video "
1154 "Capture Board ver %u.%u.%u successfully loaded.\n", 1177 "Capture Board ver %u.%u.%u successfully loaded.\n",
1155 (VIVI_VERSION >> 16) & 0xFF, (VIVI_VERSION >> 8) & 0xFF, 1178 (VIVI_VERSION >> 16) & 0xFF, (VIVI_VERSION >> 8) & 0xFF,
1156 VIVI_VERSION & 0xFF); 1179 VIVI_VERSION & 0xFF);
1180
1181 /* n_devs will reflect the actual number of allocated devices */
1182 n_devs = i;
1183 }
1184
1157 return ret; 1185 return ret;
1158} 1186}
1159 1187
@@ -1169,10 +1197,10 @@ MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
1169MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol"); 1197MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
1170MODULE_LICENSE("Dual BSD/GPL"); 1198MODULE_LICENSE("Dual BSD/GPL");
1171 1199
1172module_param(video_nr, int, 0); 1200module_param(video_nr, uint, 0444);
1173MODULE_PARM_DESC(video_nr, "video iminor start number"); 1201MODULE_PARM_DESC(video_nr, "video iminor start number");
1174 1202
1175module_param(n_devs, int, 0); 1203module_param(n_devs, uint, 0444);
1176MODULE_PARM_DESC(n_devs, "number of video devices to create"); 1204MODULE_PARM_DESC(n_devs, "number of video devices to create");
1177 1205
1178module_param_named(debug, vivi_template.debug, int, 0444); 1206module_param_named(debug, vivi_template.debug, int, 0444);
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 9402f40095b4..2ff00bc5ad64 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -334,7 +334,7 @@ static int w9966_init(struct w9966_dev* cam, struct parport* port)
334 memcpy(&cam->vdev, &w9966_template, sizeof(struct video_device)); 334 memcpy(&cam->vdev, &w9966_template, sizeof(struct video_device));
335 cam->vdev.priv = cam; 335 cam->vdev.priv = cam;
336 336
337 if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) == -1) 337 if (video_register_device(&cam->vdev, VFL_TYPE_GRABBER, video_nr) < 0)
338 return -1; 338 return -1;
339 339
340 w9966_setState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV); 340 w9966_setState(cam, W9966_STATE_VDEV, W9966_STATE_VDEV);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 550ce7bd5c87..0c3287734c93 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -1988,6 +1988,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
1988 cam->v4ldev->fops = &zc0301_fops; 1988 cam->v4ldev->fops = &zc0301_fops;
1989 cam->v4ldev->minor = video_nr[dev_nr]; 1989 cam->v4ldev->minor = video_nr[dev_nr];
1990 cam->v4ldev->release = video_device_release; 1990 cam->v4ldev->release = video_device_release;
1991 cam->v4ldev->parent = &udev->dev;
1991 video_set_drvdata(cam->v4ldev, cam); 1992 video_set_drvdata(cam->v4ldev, cam);
1992 1993
1993 init_completion(&cam->probe); 1994 init_completion(&cam->probe);
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 70fe6fc6cdd5..b0cd49c438a3 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -60,27 +60,8 @@ zc0301_attach_sensor(struct zc0301_device* cam, struct zc0301_sensor* sensor);
60 60
61#define ZC0301_ID_TABLE \ 61#define ZC0301_ID_TABLE \
62static const struct usb_device_id zc0301_id_table[] = { \ 62static const struct usb_device_id zc0301_id_table[] = { \
63 { ZC0301_USB_DEVICE(0x041e, 0x4017, 0xff), }, /* ICM105 */ \
64 { ZC0301_USB_DEVICE(0x041e, 0x401c, 0xff), }, /* PAS106 */ \
65 { ZC0301_USB_DEVICE(0x041e, 0x401e, 0xff), }, /* HV7131 */ \
66 { ZC0301_USB_DEVICE(0x041e, 0x401f, 0xff), }, /* TAS5130 */ \
67 { ZC0301_USB_DEVICE(0x041e, 0x4022, 0xff), }, \
68 { ZC0301_USB_DEVICE(0x041e, 0x4034, 0xff), }, /* PAS106 */ \
69 { ZC0301_USB_DEVICE(0x041e, 0x4035, 0xff), }, /* PAS106 */ \
70 { ZC0301_USB_DEVICE(0x041e, 0x4036, 0xff), }, /* HV7131 */ \
71 { ZC0301_USB_DEVICE(0x041e, 0x403a, 0xff), }, /* HV7131 */ \
72 { ZC0301_USB_DEVICE(0x0458, 0x7007, 0xff), }, /* TAS5130 */ \
73 { ZC0301_USB_DEVICE(0x0458, 0x700c, 0xff), }, /* TAS5130 */ \
74 { ZC0301_USB_DEVICE(0x0458, 0x700f, 0xff), }, /* TAS5130 */ \
75 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \ 63 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
76 { ZC0301_USB_DEVICE(0x055f, 0xd003, 0xff), }, /* TAS5130 */ \
77 { ZC0301_USB_DEVICE(0x055f, 0xd004, 0xff), }, /* TAS5130 */ \
78 { ZC0301_USB_DEVICE(0x0ac8, 0x0301, 0xff), }, \
79 { ZC0301_USB_DEVICE(0x0ac8, 0x301b, 0xff), }, /* PB-0330/HV7131 */ \
80 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \ 64 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \
81 { ZC0301_USB_DEVICE(0x10fd, 0x0128, 0xff), }, /* TAS5130 */ \
82 { ZC0301_USB_DEVICE(0x10fd, 0x8050, 0xff), }, /* TAS5130 */ \
83 { ZC0301_USB_DEVICE(0x10fd, 0x804e, 0xff), }, /* TAS5130 */ \
84 { } \ 65 { } \
85}; 66};
86 67
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index c6c77a505ec1..d8b0d326e452 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -1189,7 +1189,7 @@ static int create_debugfs(void)
1189 return 0; 1189 return 0;
1190 1190
1191error_debugfs: 1191error_debugfs:
1192 remove_debugfs(); 1192 remove_debugfs();
1193 return -ENOMEM; 1193 return -ENOMEM;
1194} 1194}
1195 1195
@@ -1272,6 +1272,7 @@ error_platform_register:
1272static void __exit acer_wmi_exit(void) 1272static void __exit acer_wmi_exit(void)
1273{ 1273{
1274 remove_sysfs(acer_platform_device); 1274 remove_sysfs(acer_platform_device);
1275 remove_debugfs();
1275 platform_device_del(acer_platform_device); 1276 platform_device_del(acer_platform_device);
1276 platform_driver_unregister(&acer_platform_driver); 1277 platform_driver_unregister(&acer_platform_driver);
1277 1278
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index 7a1ef6c262de..3e56203e4947 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -463,6 +463,13 @@ static struct dmi_system_id __initdata fujitsu_dmi_table[] = {
463 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"), 463 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S6410"),
464 }, 464 },
465 .callback = dmi_check_cb_s6410}, 465 .callback = dmi_check_cb_s6410},
466 {
467 .ident = "FUJITSU LifeBook P8010",
468 .matches = {
469 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
470 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P8010"),
471 },
472 .callback = dmi_check_cb_s6410},
466 {} 473 {}
467}; 474};
468 475
diff --git a/drivers/misc/hp-wmi.c b/drivers/misc/hp-wmi.c
index 1dbcbcb323a2..6d407c2a4f91 100644
--- a/drivers/misc/hp-wmi.c
+++ b/drivers/misc/hp-wmi.c
@@ -49,6 +49,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
49#define HPWMI_ALS_QUERY 0x3 49#define HPWMI_ALS_QUERY 0x3
50#define HPWMI_DOCK_QUERY 0x4 50#define HPWMI_DOCK_QUERY 0x4
51#define HPWMI_WIRELESS_QUERY 0x5 51#define HPWMI_WIRELESS_QUERY 0x5
52#define HPWMI_HOTKEY_QUERY 0xc
52 53
53static int __init hp_wmi_bios_setup(struct platform_device *device); 54static int __init hp_wmi_bios_setup(struct platform_device *device);
54static int __exit hp_wmi_bios_remove(struct platform_device *device); 55static int __exit hp_wmi_bios_remove(struct platform_device *device);
@@ -69,7 +70,7 @@ struct bios_return {
69 70
70struct key_entry { 71struct key_entry {
71 char type; /* See KE_* below */ 72 char type; /* See KE_* below */
72 u8 code; 73 u16 code;
73 u16 keycode; 74 u16 keycode;
74}; 75};
75 76
@@ -79,7 +80,9 @@ static struct key_entry hp_wmi_keymap[] = {
79 {KE_SW, 0x01, SW_DOCK}, 80 {KE_SW, 0x01, SW_DOCK},
80 {KE_KEY, 0x02, KEY_BRIGHTNESSUP}, 81 {KE_KEY, 0x02, KEY_BRIGHTNESSUP},
81 {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN}, 82 {KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
82 {KE_KEY, 0x04, KEY_HELP}, 83 {KE_KEY, 0x20e6, KEY_PROG1},
84 {KE_KEY, 0x2142, KEY_MEDIA},
85 {KE_KEY, 0x231b, KEY_HELP},
83 {KE_END, 0} 86 {KE_END, 0}
84}; 87};
85 88
@@ -177,9 +180,9 @@ static int hp_wmi_wifi_state(void)
177 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); 180 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
178 181
179 if (wireless & 0x100) 182 if (wireless & 0x100)
180 return 1; 183 return RFKILL_STATE_UNBLOCKED;
181 else 184 else
182 return 0; 185 return RFKILL_STATE_SOFT_BLOCKED;
183} 186}
184 187
185static int hp_wmi_bluetooth_state(void) 188static int hp_wmi_bluetooth_state(void)
@@ -187,9 +190,9 @@ static int hp_wmi_bluetooth_state(void)
187 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); 190 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
188 191
189 if (wireless & 0x10000) 192 if (wireless & 0x10000)
190 return 1; 193 return RFKILL_STATE_UNBLOCKED;
191 else 194 else
192 return 0; 195 return RFKILL_STATE_SOFT_BLOCKED;
193} 196}
194 197
195static int hp_wmi_wwan_state(void) 198static int hp_wmi_wwan_state(void)
@@ -197,9 +200,9 @@ static int hp_wmi_wwan_state(void)
197 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0); 200 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
198 201
199 if (wireless & 0x1000000) 202 if (wireless & 0x1000000)
200 return 1; 203 return RFKILL_STATE_UNBLOCKED;
201 else 204 else
202 return 0; 205 return RFKILL_STATE_SOFT_BLOCKED;
203} 206}
204 207
205static ssize_t show_display(struct device *dev, struct device_attribute *attr, 208static ssize_t show_display(struct device *dev, struct device_attribute *attr,
@@ -318,6 +321,9 @@ void hp_wmi_notify(u32 value, void *context)
318 321
319 if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) { 322 if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
320 int eventcode = *((u8 *) obj->buffer.pointer); 323 int eventcode = *((u8 *) obj->buffer.pointer);
324 if (eventcode == 0x4)
325 eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
326 0);
321 key = hp_wmi_get_entry_by_scancode(eventcode); 327 key = hp_wmi_get_entry_by_scancode(eventcode);
322 if (key) { 328 if (key) {
323 switch (key->type) { 329 switch (key->type) {
@@ -338,12 +344,14 @@ void hp_wmi_notify(u32 value, void *context)
338 } 344 }
339 } else if (eventcode == 0x5) { 345 } else if (eventcode == 0x5) {
340 if (wifi_rfkill) 346 if (wifi_rfkill)
341 wifi_rfkill->state = hp_wmi_wifi_state(); 347 rfkill_force_state(wifi_rfkill,
348 hp_wmi_wifi_state());
342 if (bluetooth_rfkill) 349 if (bluetooth_rfkill)
343 bluetooth_rfkill->state = 350 rfkill_force_state(bluetooth_rfkill,
344 hp_wmi_bluetooth_state(); 351 hp_wmi_bluetooth_state());
345 if (wwan_rfkill) 352 if (wwan_rfkill)
346 wwan_rfkill->state = hp_wmi_wwan_state(); 353 rfkill_force_state(wwan_rfkill,
354 hp_wmi_wwan_state());
347 } else 355 } else
348 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", 356 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
349 eventcode); 357 eventcode);
@@ -398,6 +406,7 @@ static void cleanup_sysfs(struct platform_device *device)
398static int __init hp_wmi_bios_setup(struct platform_device *device) 406static int __init hp_wmi_bios_setup(struct platform_device *device)
399{ 407{
400 int err; 408 int err;
409 int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
401 410
402 err = device_create_file(&device->dev, &dev_attr_display); 411 err = device_create_file(&device->dev, &dev_attr_display);
403 if (err) 412 if (err)
@@ -412,28 +421,33 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
412 if (err) 421 if (err)
413 goto add_sysfs_error; 422 goto add_sysfs_error;
414 423
415 wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN); 424 if (wireless & 0x1) {
416 wifi_rfkill->name = "hp-wifi"; 425 wifi_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WLAN);
417 wifi_rfkill->state = hp_wmi_wifi_state(); 426 wifi_rfkill->name = "hp-wifi";
418 wifi_rfkill->toggle_radio = hp_wmi_wifi_set; 427 wifi_rfkill->state = hp_wmi_wifi_state();
419 wifi_rfkill->user_claim_unsupported = 1; 428 wifi_rfkill->toggle_radio = hp_wmi_wifi_set;
420 429 wifi_rfkill->user_claim_unsupported = 1;
421 bluetooth_rfkill = rfkill_allocate(&device->dev, 430 rfkill_register(wifi_rfkill);
422 RFKILL_TYPE_BLUETOOTH); 431 }
423 bluetooth_rfkill->name = "hp-bluetooth"; 432
424 bluetooth_rfkill->state = hp_wmi_bluetooth_state(); 433 if (wireless & 0x2) {
425 bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set; 434 bluetooth_rfkill = rfkill_allocate(&device->dev,
426 bluetooth_rfkill->user_claim_unsupported = 1; 435 RFKILL_TYPE_BLUETOOTH);
427 436 bluetooth_rfkill->name = "hp-bluetooth";
428 wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WIMAX); 437 bluetooth_rfkill->state = hp_wmi_bluetooth_state();
429 wwan_rfkill->name = "hp-wwan"; 438 bluetooth_rfkill->toggle_radio = hp_wmi_bluetooth_set;
430 wwan_rfkill->state = hp_wmi_wwan_state(); 439 bluetooth_rfkill->user_claim_unsupported = 1;
431 wwan_rfkill->toggle_radio = hp_wmi_wwan_set; 440 rfkill_register(bluetooth_rfkill);
432 wwan_rfkill->user_claim_unsupported = 1; 441 }
433 442
434 rfkill_register(wifi_rfkill); 443 if (wireless & 0x4) {
435 rfkill_register(bluetooth_rfkill); 444 wwan_rfkill = rfkill_allocate(&device->dev, RFKILL_TYPE_WWAN);
436 rfkill_register(wwan_rfkill); 445 wwan_rfkill->name = "hp-wwan";
446 wwan_rfkill->state = hp_wmi_wwan_state();
447 wwan_rfkill->toggle_radio = hp_wmi_wwan_set;
448 wwan_rfkill->user_claim_unsupported = 1;
449 rfkill_register(wwan_rfkill);
450 }
437 451
438 return 0; 452 return 0;
439add_sysfs_error: 453add_sysfs_error:
@@ -445,9 +459,12 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
445{ 459{
446 cleanup_sysfs(device); 460 cleanup_sysfs(device);
447 461
448 rfkill_unregister(wifi_rfkill); 462 if (wifi_rfkill)
449 rfkill_unregister(bluetooth_rfkill); 463 rfkill_unregister(wifi_rfkill);
450 rfkill_unregister(wwan_rfkill); 464 if (bluetooth_rfkill)
465 rfkill_unregister(bluetooth_rfkill);
466 if (wwan_rfkill)
467 rfkill_unregister(wwan_rfkill);
451 468
452 return 0; 469 return 0;
453} 470}
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index d3eb7903c346..6b9300779a43 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -3086,7 +3086,6 @@ static struct ibm_struct wan_driver_data = {
3086 .read = wan_read, 3086 .read = wan_read,
3087 .write = wan_write, 3087 .write = wan_write,
3088 .exit = wan_exit, 3088 .exit = wan_exit,
3089 .flags.experimental = 1,
3090}; 3089};
3091 3090
3092/************************************************************************* 3091/*************************************************************************
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 86dbb366415a..6986f3926244 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -103,8 +103,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
103 check_disk_change(inode->i_bdev); 103 check_disk_change(inode->i_bdev);
104 ret = 0; 104 ret = 0;
105 105
106 if ((filp->f_mode & FMODE_WRITE) && md->read_only) 106 if ((filp->f_mode & FMODE_WRITE) && md->read_only) {
107 mmc_blk_put(md);
107 ret = -EROFS; 108 ret = -EROFS;
109 }
108 } 110 }
109 111
110 return ret; 112 return ret;
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 6915f40ac8ab..1f8b5b36222c 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -621,12 +621,21 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263()) 621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12) 622 if (host->total_length < 12)
623 host->total_length = 12; 623 host->total_length = 12;
624 host->buffer = dma_alloc_coherent(NULL, 624
625 host->total_length, 625 host->buffer = kmalloc(host->total_length, GFP_KERNEL);
626 &host->physical_address, GFP_KERNEL); 626 if (!host->buffer) {
627 pr_debug("Can't alloc tx buffer\n");
628 cmd->error = -ENOMEM;
629 mmc_request_done(host->mmc, host->request);
630 return;
631 }
627 632
628 at91_mci_sg_to_dma(host, data); 633 at91_mci_sg_to_dma(host, data);
629 634
635 host->physical_address = dma_map_single(NULL,
636 host->buffer, host->total_length,
637 DMA_TO_DEVICE);
638
630 pr_debug("Transmitting %d bytes\n", host->total_length); 639 pr_debug("Transmitting %d bytes\n", host->total_length);
631 640
632 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); 641 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
@@ -694,7 +703,10 @@ static void at91_mci_completed_command(struct at91mci_host *host, unsigned int s
694 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); 703 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
695 704
696 if (host->buffer) { 705 if (host->buffer) {
697 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address); 706 dma_unmap_single(NULL,
707 host->physical_address, host->total_length,
708 DMA_TO_DEVICE);
709 kfree(host->buffer);
698 host->buffer = NULL; 710 host->buffer = NULL;
699 } 711 }
700 712
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index d2f331876e4c..e00d424e6575 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -410,16 +410,20 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
410 410
411 case MEMGETREGIONINFO: 411 case MEMGETREGIONINFO:
412 { 412 {
413 struct region_info_user ur; 413 uint32_t ur_idx;
414 struct mtd_erase_region_info *kr;
415 struct region_info_user *ur = (struct region_info_user *) argp;
414 416
415 if (copy_from_user(&ur, argp, sizeof(struct region_info_user))) 417 if (get_user(ur_idx, &(ur->regionindex)))
416 return -EFAULT; 418 return -EFAULT;
417 419
418 if (ur.regionindex >= mtd->numeraseregions) 420 kr = &(mtd->eraseregions[ur_idx]);
419 return -EINVAL; 421
420 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]), 422 if (put_user(kr->offset, &(ur->offset))
421 sizeof(struct mtd_erase_region_info))) 423 || put_user(kr->erasesize, &(ur->erasesize))
424 || put_user(kr->numblocks, &(ur->numblocks)))
422 return -EFAULT; 425 return -EFAULT;
426
423 break; 427 break;
424 } 428 }
425 429
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index cbab654b03c8..edb1e322113d 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -109,7 +109,7 @@ struct tmio_nand {
109 109
110 void __iomem *ccr; 110 void __iomem *ccr;
111 void __iomem *fcr; 111 void __iomem *fcr;
112 unsigned long fcr_phys; 112 unsigned long fcr_base;
113 113
114 unsigned int irq; 114 unsigned int irq;
115 115
@@ -316,8 +316,8 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
316 tmio_iowrite8(0x81, tmio->ccr + CCR_ICC); 316 tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
317 317
318 /* (10h)BaseAddress 0x1000 spba.spba2 */ 318 /* (10h)BaseAddress 0x1000 spba.spba2 */
319 tmio_iowrite16(tmio->fcr_phys, tmio->ccr + CCR_BASE); 319 tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
320 tmio_iowrite16(tmio->fcr_phys >> 16, tmio->ccr + CCR_BASE + 16); 320 tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
321 321
322 /* (04h)Command Register I/O spcmd */ 322 /* (04h)Command Register I/O spcmd */
323 tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND); 323 tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
@@ -395,7 +395,7 @@ static int tmio_probe(struct platform_device *dev)
395 goto err_iomap_ccr; 395 goto err_iomap_ccr;
396 } 396 }
397 397
398 tmio->fcr_phys = (unsigned long)fcr->start; 398 tmio->fcr_base = fcr->start & 0xfffff;
399 tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); 399 tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
400 if (!tmio->fcr) { 400 if (!tmio->fcr) {
401 retval = -EIO; 401 retval = -EIO;
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index fdfb2b2cb734..a424869707a5 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -130,12 +130,12 @@ static const char filename[] = __FILE__;
130 130
131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n"; 131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
132#define TIMEOUT_MSG(lineno) \ 132#define TIMEOUT_MSG(lineno) \
133 printk(timeout_msg, filename,__FUNCTION__,(lineno)) 133 printk(timeout_msg, filename,__func__,(lineno))
134 134
135static const char invalid_pcb_msg[] = 135static const char invalid_pcb_msg[] =
136"*** invalid pcb length %d at %s:%s (line %d) ***\n"; 136"*** invalid pcb length %d at %s:%s (line %d) ***\n";
137#define INVALID_PCB_MSG(len) \ 137#define INVALID_PCB_MSG(len) \
138 printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__) 138 printk(invalid_pcb_msg, (len),filename,__func__,__LINE__)
139 139
140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x..."; 140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
141 141
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 6011d6fabef0..85fa40a0a667 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -127,7 +127,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu
127 (CP)->tx_tail - (CP)->tx_head - 1) 127 (CP)->tx_tail - (CP)->tx_head - 1)
128 128
129#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 129#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
130#define RX_OFFSET 2
131#define CP_INTERNAL_PHY 32 130#define CP_INTERNAL_PHY 32
132 131
133/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ 132/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
@@ -552,14 +551,14 @@ rx_status_loop:
552 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", 551 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
553 dev->name, rx_tail, status, len); 552 dev->name, rx_tail, status, len);
554 553
555 buflen = cp->rx_buf_sz + RX_OFFSET; 554 buflen = cp->rx_buf_sz + NET_IP_ALIGN;
556 new_skb = dev_alloc_skb (buflen); 555 new_skb = netdev_alloc_skb(dev, buflen);
557 if (!new_skb) { 556 if (!new_skb) {
558 dev->stats.rx_dropped++; 557 dev->stats.rx_dropped++;
559 goto rx_next; 558 goto rx_next;
560 } 559 }
561 560
562 skb_reserve(new_skb, RX_OFFSET); 561 skb_reserve(new_skb, NET_IP_ALIGN);
563 562
564 dma_unmap_single(&cp->pdev->dev, mapping, 563 dma_unmap_single(&cp->pdev->dev, mapping,
565 buflen, PCI_DMA_FROMDEVICE); 564 buflen, PCI_DMA_FROMDEVICE);
@@ -1051,19 +1050,20 @@ static void cp_init_hw (struct cp_private *cp)
1051 cpw8_f(Cfg9346, Cfg9346_Lock); 1050 cpw8_f(Cfg9346, Cfg9346_Lock);
1052} 1051}
1053 1052
1054static int cp_refill_rx (struct cp_private *cp) 1053static int cp_refill_rx(struct cp_private *cp)
1055{ 1054{
1055 struct net_device *dev = cp->dev;
1056 unsigned i; 1056 unsigned i;
1057 1057
1058 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1058 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1059 struct sk_buff *skb; 1059 struct sk_buff *skb;
1060 dma_addr_t mapping; 1060 dma_addr_t mapping;
1061 1061
1062 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); 1062 skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
1063 if (!skb) 1063 if (!skb)
1064 goto err_out; 1064 goto err_out;
1065 1065
1066 skb_reserve(skb, RX_OFFSET); 1066 skb_reserve(skb, NET_IP_ALIGN);
1067 1067
1068 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1068 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1069 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1069 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 8a5b0d293f75..32e66f0d4344 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -309,7 +309,7 @@ enum RTL8139_registers {
309 Cfg9346 = 0x50, 309 Cfg9346 = 0x50,
310 Config0 = 0x51, 310 Config0 = 0x51,
311 Config1 = 0x52, 311 Config1 = 0x52,
312 FlashReg = 0x54, 312 TimerInt = 0x54,
313 MediaStatus = 0x58, 313 MediaStatus = 0x58,
314 Config3 = 0x59, 314 Config3 = 0x59,
315 Config4 = 0x5A, /* absent on RTL-8139A */ 315 Config4 = 0x5A, /* absent on RTL-8139A */
@@ -325,6 +325,7 @@ enum RTL8139_registers {
325 FIFOTMS = 0x70, /* FIFO Control and test. */ 325 FIFOTMS = 0x70, /* FIFO Control and test. */
326 CSCR = 0x74, /* Chip Status and Configuration Register. */ 326 CSCR = 0x74, /* Chip Status and Configuration Register. */
327 PARA78 = 0x78, 327 PARA78 = 0x78,
328 FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */
328 PARA7c = 0x7c, /* Magic transceiver parameter register. */ 329 PARA7c = 0x7c, /* Magic transceiver parameter register. */
329 Config5 = 0xD8, /* absent on RTL-8139A */ 330 Config5 = 0xD8, /* absent on RTL-8139A */
330}; 331};
@@ -2009,9 +2010,9 @@ no_early_rx:
2009 /* Malloc up new buffer, compatible with net-2e. */ 2010 /* Malloc up new buffer, compatible with net-2e. */
2010 /* Omit the four octet CRC from the length. */ 2011 /* Omit the four octet CRC from the length. */
2011 2012
2012 skb = dev_alloc_skb (pkt_size + 2); 2013 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
2013 if (likely(skb)) { 2014 if (likely(skb)) {
2014 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 2015 skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
2015#if RX_BUF_IDX == 3 2016#if RX_BUF_IDX == 3
2016 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2017 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2017#else 2018#else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d85d76019afd..42b4eb703c2d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1840,6 +1840,17 @@ config NE_H8300
1840 Say Y here if you want to use the NE2000 compatible 1840 Say Y here if you want to use the NE2000 compatible
1841 controller on the Renesas H8/300 processor. 1841 controller on the Renesas H8/300 processor.
1842 1842
1843config ATL2
1844 tristate "Atheros L2 Fast Ethernet support"
1845 depends on PCI
1846 select CRC32
1847 select MII
1848 help
1849 This driver supports the Atheros L2 fast ethernet adapter.
1850
1851 To compile this driver as a module, choose M here. The module
1852 will be called atl2.
1853
1843source "drivers/net/fs_enet/Kconfig" 1854source "drivers/net/fs_enet/Kconfig"
1844 1855
1845endif # NET_ETHERNET 1856endif # NET_ETHERNET
@@ -2046,6 +2057,7 @@ config R8169
2046 tristate "Realtek 8169 gigabit ethernet support" 2057 tristate "Realtek 8169 gigabit ethernet support"
2047 depends on PCI 2058 depends on PCI
2048 select CRC32 2059 select CRC32
2060 select MII
2049 ---help--- 2061 ---help---
2050 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. 2062 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
2051 2063
@@ -2302,6 +2314,18 @@ config ATL1E
2302 To compile this driver as a module, choose M here. The module 2314 To compile this driver as a module, choose M here. The module
2303 will be called atl1e. 2315 will be called atl1e.
2304 2316
2317config JME
2318 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
2319 depends on PCI
2320 select CRC32
2321 select MII
2322 ---help---
2323 This driver supports the PCI-Express gigabit ethernet adapters
2324 based on JMicron JMC250 chipset.
2325
2326 To compile this driver as a module, choose M here. The module
2327 will be called jme.
2328
2305endif # NETDEV_1000 2329endif # NETDEV_1000
2306 2330
2307# 2331#
@@ -2377,10 +2401,18 @@ config EHEA
2377 To compile the driver as a module, choose M here. The module 2401 To compile the driver as a module, choose M here. The module
2378 will be called ehea. 2402 will be called ehea.
2379 2403
2404config ENIC
2405 tristate "E, the Cisco 10G Ethernet NIC"
2406 depends on PCI && INET
2407 select INET_LRO
2408 help
2409 This enables the support for the Cisco 10G Ethernet card.
2410
2380config IXGBE 2411config IXGBE
2381 tristate "Intel(R) 10GbE PCI Express adapters support" 2412 tristate "Intel(R) 10GbE PCI Express adapters support"
2382 depends on PCI && INET 2413 depends on PCI && INET
2383 select INET_LRO 2414 select INET_LRO
2415 select INTEL_IOATDMA
2384 ---help--- 2416 ---help---
2385 This driver supports Intel(R) 10GbE PCI Express family of 2417 This driver supports Intel(R) 10GbE PCI Express family of
2386 adapters. For more information on how to identify your adapter, go 2418 adapters. For more information on how to identify your adapter, go
@@ -2432,6 +2464,7 @@ config MYRI10GE
2432 select FW_LOADER 2464 select FW_LOADER
2433 select CRC32 2465 select CRC32
2434 select INET_LRO 2466 select INET_LRO
2467 select INTEL_IOATDMA
2435 ---help--- 2468 ---help---
2436 This driver supports Myricom Myri-10G Dual Protocol interface in 2469 This driver supports Myricom Myri-10G Dual Protocol interface in
2437 Ethernet mode. If the eeprom on your board is not recent enough, 2470 Ethernet mode. If the eeprom on your board is not recent enough,
@@ -2496,6 +2529,15 @@ config BNX2X
2496 To compile this driver as a module, choose M here: the module 2529 To compile this driver as a module, choose M here: the module
2497 will be called bnx2x. This is recommended. 2530 will be called bnx2x. This is recommended.
2498 2531
2532config QLGE
2533 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
2534 depends on PCI
2535 help
2536 This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
2537
2538 To compile this driver as a module, choose M here: the module
2539 will be called qlge.
2540
2499source "drivers/net/sfc/Kconfig" 2541source "drivers/net/sfc/Kconfig"
2500 2542
2501endif # NETDEV_10000 2543endif # NETDEV_10000
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7629c9017215..fa2510b2e609 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,9 +15,12 @@ obj-$(CONFIG_EHEA) += ehea/
15obj-$(CONFIG_CAN) += can/ 15obj-$(CONFIG_CAN) += can/
16obj-$(CONFIG_BONDING) += bonding/ 16obj-$(CONFIG_BONDING) += bonding/
17obj-$(CONFIG_ATL1) += atlx/ 17obj-$(CONFIG_ATL1) += atlx/
18obj-$(CONFIG_ATL2) += atlx/
18obj-$(CONFIG_ATL1E) += atl1e/ 19obj-$(CONFIG_ATL1E) += atl1e/
19obj-$(CONFIG_GIANFAR) += gianfar_driver.o 20obj-$(CONFIG_GIANFAR) += gianfar_driver.o
20obj-$(CONFIG_TEHUTI) += tehuti.o 21obj-$(CONFIG_TEHUTI) += tehuti.o
22obj-$(CONFIG_ENIC) += enic/
23obj-$(CONFIG_JME) += jme.o
21 24
22gianfar_driver-objs := gianfar.o \ 25gianfar_driver-objs := gianfar.o \
23 gianfar_ethtool.o \ 26 gianfar_ethtool.o \
@@ -111,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
111obj-$(CONFIG_NE2000) += ne.o 8390p.o 114obj-$(CONFIG_NE2000) += ne.o 8390p.o
112obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
113obj-$(CONFIG_HPLAN) += hp.o 8390p.o 116obj-$(CONFIG_HPLAN) += hp.o 8390p.o
114obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o 117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o
115obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
116obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o 119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
117obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -128,6 +131,7 @@ obj-$(CONFIG_AX88796) += ax88796.o
128obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 131obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
129obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 132obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
130obj-$(CONFIG_QLA3XXX) += qla3xxx.o 133obj-$(CONFIG_QLA3XXX) += qla3xxx.o
134obj-$(CONFIG_QLGE) += qlge/
131 135
132obj-$(CONFIG_PPP) += ppp_generic.o 136obj-$(CONFIG_PPP) += ppp_generic.o
133obj-$(CONFIG_PPP_ASYNC) += ppp_async.o 137obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index bdc4c0bb56d9..a5b07691e466 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -442,24 +442,24 @@ static int arcnet_open(struct net_device *dev)
442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " 442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
443 "DOS networking programs!\n"); 443 "DOS networking programs!\n");
444 444
445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
446 if (ASTATUS() & RESETflag) { 446 if (ASTATUS() & RESETflag) {
447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
448 ACOMMAND(CFLAGScmd | RESETclear); 448 ACOMMAND(CFLAGScmd | RESETclear);
449 } 449 }
450 450
451 451
452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
453 /* make sure we're ready to receive IRQ's. */ 453 /* make sure we're ready to receive IRQ's. */
454 AINTMASK(0); 454 AINTMASK(0);
455 udelay(1); /* give it time to set the mask before 455 udelay(1); /* give it time to set the mask before
456 * we reset it again. (may not even be 456 * we reset it again. (may not even be
457 * necessary) 457 * necessary)
458 */ 458 */
459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
460 lp->intmask = NORXflag | RECONflag; 460 lp->intmask = NORXflag | RECONflag;
461 AINTMASK(lp->intmask); 461 AINTMASK(lp->intmask);
462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
463 463
464 netif_start_queue(dev); 464 netif_start_queue(dev);
465 465
@@ -670,14 +670,14 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
670 freeskb = 0; 670 freeskb = 0;
671 } 671 }
672 672
673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
674 /* make sure we didn't ignore a TX IRQ while we were in here */ 674 /* make sure we didn't ignore a TX IRQ while we were in here */
675 AINTMASK(0); 675 AINTMASK(0);
676 676
677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
678 lp->intmask |= TXFREEflag|EXCNAKflag; 678 lp->intmask |= TXFREEflag|EXCNAKflag;
679 AINTMASK(lp->intmask); 679 AINTMASK(lp->intmask);
680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
681 681
682 spin_unlock_irqrestore(&lp->lock, flags); 682 spin_unlock_irqrestore(&lp->lock, flags);
683 if (freeskb) { 683 if (freeskb) {
@@ -798,7 +798,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
798 diagstatus = (status >> 8) & 0xFF; 798 diagstatus = (status >> 8) & 0xFF;
799 799
800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", 800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
801 __FILE__,__LINE__,__FUNCTION__,status); 801 __FILE__,__LINE__,__func__,status);
802 didsomething = 0; 802 didsomething = 0;
803 803
804 /* 804 /*
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 8b51313b1300..70124a944e7d 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -238,15 +238,15 @@ static int com20020_reset(struct net_device *dev, int really_reset)
238 u_char inbyte; 238 u_char inbyte;
239 239
240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n", 240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
241 __FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name); 241 __FILE__,__LINE__,__func__,dev,lp,dev->name);
242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", 242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
243 dev->name, ASTATUS()); 243 dev->name, ASTATUS());
244 244
245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); 246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
247 /* power-up defaults */ 247 /* power-up defaults */
248 SETCONF; 248 SETCONF;
249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
250 250
251 if (really_reset) { 251 if (really_reset) {
252 /* reset the card */ 252 /* reset the card */
@@ -254,22 +254,22 @@ static int com20020_reset(struct net_device *dev, int really_reset)
254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */ 254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
255 } 255 }
256 /* clear flags & end reset */ 256 /* clear flags & end reset */
257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); 258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
259 259
260 /* verify that the ARCnet signature byte is present */ 260 /* verify that the ARCnet signature byte is present */
261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
262 262
263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1); 263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
265 if (inbyte != TESTvalue) { 265 if (inbyte != TESTvalue) {
266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); 267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
268 return 1; 268 return 1;
269 } 269 }
270 /* enable extended (512-byte) packets */ 270 /* enable extended (512-byte) packets */
271 ACOMMAND(CONFIGcmd | EXTconf); 271 ACOMMAND(CONFIGcmd | EXTconf);
272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
273 273
274 /* done! return success. */ 274 /* done! return success. */
275 return 0; 275 return 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 7685b995ff9b..9b603528143d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2390,9 +2390,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2390 } 2390 }
2391 2391
2392 /* Init GPHY as early as possible due to power saving issue */ 2392 /* Init GPHY as early as possible due to power saving issue */
2393 spin_lock(&adapter->mdio_lock);
2394 atl1e_phy_init(&adapter->hw); 2393 atl1e_phy_init(&adapter->hw);
2395 spin_unlock(&adapter->mdio_lock);
2396 /* reset the controller to 2394 /* reset the controller to
2397 * put the device in a known good starting state */ 2395 * put the device in a known good starting state */
2398 err = atl1e_reset_hw(&adapter->hw); 2396 err = atl1e_reset_hw(&adapter->hw);
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile
index ca45553a040d..e4f6022ca552 100644
--- a/drivers/net/atlx/Makefile
+++ b/drivers/net/atlx/Makefile
@@ -1 +1,3 @@
1obj-$(CONFIG_ATL1) += atl1.o 1obj-$(CONFIG_ATL1) += atl1.o
2obj-$(CONFIG_ATL2) += atl2.o
3
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
new file mode 100644
index 000000000000..d548a67da1e8
--- /dev/null
+++ b/drivers/net/atlx/atl2.c
@@ -0,0 +1,3127 @@
1/*
2 * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
3 * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
4 *
5 * Derived from Intel e1000 driver
6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <asm/atomic.h>
24#include <linux/crc32.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/hardirq.h>
29#include <linux/if_vlan.h>
30#include <linux/in.h>
31#include <linux/interrupt.h>
32#include <linux/ip.h>
33#include <linux/irqflags.h>
34#include <linux/irqreturn.h>
35#include <linux/mii.h>
36#include <linux/net.h>
37#include <linux/netdevice.h>
38#include <linux/pci.h>
39#include <linux/pci_ids.h>
40#include <linux/pm.h>
41#include <linux/skbuff.h>
42#include <linux/spinlock.h>
43#include <linux/string.h>
44#include <linux/tcp.h>
45#include <linux/timer.h>
46#include <linux/types.h>
47#include <linux/workqueue.h>
48
49#include "atl2.h"
50
51#define ATL2_DRV_VERSION "2.2.3"
52
53static char atl2_driver_name[] = "atl2";
54static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
55static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
56static char atl2_driver_version[] = ATL2_DRV_VERSION;
57
58MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
59MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
60MODULE_LICENSE("GPL");
61MODULE_VERSION(ATL2_DRV_VERSION);
62
63/*
64 * atl2_pci_tbl - PCI Device ID Table
65 */
66static struct pci_device_id atl2_pci_tbl[] = {
67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
68 /* required last entry */
69 {0,}
70};
71MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
72
73static void atl2_set_ethtool_ops(struct net_device *netdev);
74
75static void atl2_check_options(struct atl2_adapter *adapter);
76
77/*
78 * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
79 * @adapter: board private structure to initialize
80 *
81 * atl2_sw_init initializes the Adapter private data structure.
82 * Fields are initialized based on PCI device information and
83 * OS network device settings (MTU size).
84 */
85static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
86{
87 struct atl2_hw *hw = &adapter->hw;
88 struct pci_dev *pdev = adapter->pdev;
89
90 /* PCI config space info */
91 hw->vendor_id = pdev->vendor;
92 hw->device_id = pdev->device;
93 hw->subsystem_vendor_id = pdev->subsystem_vendor;
94 hw->subsystem_id = pdev->subsystem_device;
95
96 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
97 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
98
99 adapter->wol = 0;
100 adapter->ict = 50000; /* ~100ms */
101 adapter->link_speed = SPEED_0; /* hardware init */
102 adapter->link_duplex = FULL_DUPLEX;
103
104 hw->phy_configured = false;
105 hw->preamble_len = 7;
106 hw->ipgt = 0x60;
107 hw->min_ifg = 0x50;
108 hw->ipgr1 = 0x40;
109 hw->ipgr2 = 0x60;
110 hw->retry_buf = 2;
111 hw->max_retry = 0xf;
112 hw->lcol = 0x37;
113 hw->jam_ipg = 7;
114 hw->fc_rxd_hi = 0;
115 hw->fc_rxd_lo = 0;
116 hw->max_frame_size = adapter->netdev->mtu;
117
118 spin_lock_init(&adapter->stats_lock);
119 spin_lock_init(&adapter->tx_lock);
120
121 set_bit(__ATL2_DOWN, &adapter->flags);
122
123 return 0;
124}
125
126/*
127 * atl2_set_multi - Multicast and Promiscuous mode set
128 * @netdev: network interface device structure
129 *
130 * The set_multi entry point is called whenever the multicast address
131 * list or the network interface flags are updated. This routine is
132 * responsible for configuring the hardware for proper multicast,
133 * promiscuous mode, and all-multi behavior.
134 */
135static void atl2_set_multi(struct net_device *netdev)
136{
137 struct atl2_adapter *adapter = netdev_priv(netdev);
138 struct atl2_hw *hw = &adapter->hw;
139 struct dev_mc_list *mc_ptr;
140 u32 rctl;
141 u32 hash_value;
142
143 /* Check for Promiscuous and All Multicast modes */
144 rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
145
146 if (netdev->flags & IFF_PROMISC) {
147 rctl |= MAC_CTRL_PROMIS_EN;
148 } else if (netdev->flags & IFF_ALLMULTI) {
149 rctl |= MAC_CTRL_MC_ALL_EN;
150 rctl &= ~MAC_CTRL_PROMIS_EN;
151 } else
152 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
153
154 ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
155
156 /* clear the old settings from the multicast hash table */
157 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
159
160 /* comoute mc addresses' hash value ,and put it into hash table */
161 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
162 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
163 atl2_hash_set(hw, hash_value);
164 }
165}
166
167static void init_ring_ptrs(struct atl2_adapter *adapter)
168{
169 /* Read / Write Ptr Initialize: */
170 adapter->txd_write_ptr = 0;
171 atomic_set(&adapter->txd_read_ptr, 0);
172
173 adapter->rxd_read_ptr = 0;
174 adapter->rxd_write_ptr = 0;
175
176 atomic_set(&adapter->txs_write_ptr, 0);
177 adapter->txs_next_clear = 0;
178}
179
180/*
181 * atl2_configure - Configure Transmit&Receive Unit after Reset
182 * @adapter: board private structure
183 *
184 * Configure the Tx /Rx unit of the MAC after a reset.
185 */
186static int atl2_configure(struct atl2_adapter *adapter)
187{
188 struct atl2_hw *hw = &adapter->hw;
189 u32 value;
190
191 /* clear interrupt status */
192 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
193
194 /* set MAC Address */
195 value = (((u32)hw->mac_addr[2]) << 24) |
196 (((u32)hw->mac_addr[3]) << 16) |
197 (((u32)hw->mac_addr[4]) << 8) |
198 (((u32)hw->mac_addr[5]));
199 ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
200 value = (((u32)hw->mac_addr[0]) << 8) |
201 (((u32)hw->mac_addr[1]));
202 ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
203
204 /* HI base address */
205 ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
206 (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
207
208 /* LO base address */
209 ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
210 (u32)(adapter->txd_dma & 0x00000000ffffffffULL));
211 ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
212 (u32)(adapter->txs_dma & 0x00000000ffffffffULL));
213 ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
214 (u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
215
216 /* element count */
217 ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
218 ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
219 ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
220
221 /* config Internal SRAM */
222/*
223 ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
224 ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
225*/
226
227 /* config IPG/IFG */
228 value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
229 MAC_IPG_IFG_IPGT_SHIFT) |
230 (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
231 MAC_IPG_IFG_MIFG_SHIFT) |
232 (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
233 MAC_IPG_IFG_IPGR1_SHIFT)|
234 (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
235 MAC_IPG_IFG_IPGR2_SHIFT);
236 ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
237
238 /* config Half-Duplex Control */
239 value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
240 (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
241 MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
242 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
243 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
244 (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
245 MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
246 ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
247
248 /* set Interrupt Moderator Timer */
249 ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
250 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
251
252 /* set Interrupt Clear Timer */
253 ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
254
255 /* set MTU */
256 ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
257 ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
258
259 /* 1590 */
260 ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
261
262 /* flow control */
263 ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
264 ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
265
266 /* Init mailbox */
267 ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
268 ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
269
270 /* enable DMA read/write */
271 ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
272 ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
273
274 value = ATL2_READ_REG(&adapter->hw, REG_ISR);
275 if ((value & ISR_PHY_LINKDOWN) != 0)
276 value = 1; /* config failed */
277 else
278 value = 0;
279
280 /* clear all interrupt status */
281 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
282 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
283 return value;
284}
285
286/*
287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
288 * @adapter: board private structure
289 *
290 * Return 0 on success, negative on failure
291 */
292static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
293{
294 struct pci_dev *pdev = adapter->pdev;
295 int size;
296 u8 offset = 0;
297
298 /* real ring DMA buffer */
299 adapter->ring_size = size =
300 adapter->txd_ring_size * 1 + 7 + /* dword align */
301 adapter->txs_ring_size * 4 + 7 + /* dword align */
302 adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
303
304 adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
305 &adapter->ring_dma);
306 if (!adapter->ring_vir_addr)
307 return -ENOMEM;
308 memset(adapter->ring_vir_addr, 0, adapter->ring_size);
309
310 /* Init TXD Ring */
311 adapter->txd_dma = adapter->ring_dma ;
312 offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
313 adapter->txd_dma += offset;
314 adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
315 offset);
316
317 /* Init TXS Ring */
318 adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
319 offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
320 adapter->txs_dma += offset;
321 adapter->txs_ring = (struct tx_pkt_status *)
322 (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
323
324 /* Init RXD Ring */
325 adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
326 offset = (adapter->rxd_dma & 127) ?
327 (128 - (adapter->rxd_dma & 127)) : 0;
328 if (offset > 7)
329 offset -= 8;
330 else
331 offset += (128 - 8);
332
333 adapter->rxd_dma += offset;
334 adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
335 (adapter->txs_ring_size * 4 + offset));
336
337/*
338 * Read / Write Ptr Initialize:
339 * init_ring_ptrs(adapter);
340 */
341 return 0;
342}
343
344/*
345 * atl2_irq_enable - Enable default interrupt generation settings
346 * @adapter: board private structure
347 */
348static inline void atl2_irq_enable(struct atl2_adapter *adapter)
349{
350 ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
351 ATL2_WRITE_FLUSH(&adapter->hw);
352}
353
354/*
355 * atl2_irq_disable - Mask off interrupt generation on the NIC
356 * @adapter: board private structure
357 */
358static inline void atl2_irq_disable(struct atl2_adapter *adapter)
359{
360 ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
361 ATL2_WRITE_FLUSH(&adapter->hw);
362 synchronize_irq(adapter->pdev->irq);
363}
364
365#ifdef NETIF_F_HW_VLAN_TX
366static void atl2_vlan_rx_register(struct net_device *netdev,
367 struct vlan_group *grp)
368{
369 struct atl2_adapter *adapter = netdev_priv(netdev);
370 u32 ctrl;
371
372 atl2_irq_disable(adapter);
373 adapter->vlgrp = grp;
374
375 if (grp) {
376 /* enable VLAN tag insert/strip */
377 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
378 ctrl |= MAC_CTRL_RMV_VLAN;
379 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
380 } else {
381 /* disable VLAN tag insert/strip */
382 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
383 ctrl &= ~MAC_CTRL_RMV_VLAN;
384 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
385 }
386
387 atl2_irq_enable(adapter);
388}
389
390static void atl2_restore_vlan(struct atl2_adapter *adapter)
391{
392 atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp);
393}
394#endif
395
396static void atl2_intr_rx(struct atl2_adapter *adapter)
397{
398 struct net_device *netdev = adapter->netdev;
399 struct rx_desc *rxd;
400 struct sk_buff *skb;
401
402 do {
403 rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
404 if (!rxd->status.update)
405 break; /* end of tx */
406
407 /* clear this flag at once */
408 rxd->status.update = 0;
409
410 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
411 int rx_size = (int)(rxd->status.pkt_size - 4);
412 /* alloc new buffer */
413 skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
414 if (NULL == skb) {
415 printk(KERN_WARNING
416 "%s: Mem squeeze, deferring packet.\n",
417 netdev->name);
418 /*
419 * Check that some rx space is free. If not,
420 * free one and mark stats->rx_dropped++.
421 */
422 adapter->net_stats.rx_dropped++;
423 break;
424 }
425 skb_reserve(skb, NET_IP_ALIGN);
426 skb->dev = netdev;
427 memcpy(skb->data, rxd->packet, rx_size);
428 skb_put(skb, rx_size);
429 skb->protocol = eth_type_trans(skb, netdev);
430#ifdef NETIF_F_HW_VLAN_TX
431 if (adapter->vlgrp && (rxd->status.vlan)) {
432 u16 vlan_tag = (rxd->status.vtag>>4) |
433 ((rxd->status.vtag&7) << 13) |
434 ((rxd->status.vtag&8) << 9);
435 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
436 } else
437#endif
438 netif_rx(skb);
439 adapter->net_stats.rx_bytes += rx_size;
440 adapter->net_stats.rx_packets++;
441 netdev->last_rx = jiffies;
442 } else {
443 adapter->net_stats.rx_errors++;
444
445 if (rxd->status.ok && rxd->status.pkt_size <= 60)
446 adapter->net_stats.rx_length_errors++;
447 if (rxd->status.mcast)
448 adapter->net_stats.multicast++;
449 if (rxd->status.crc)
450 adapter->net_stats.rx_crc_errors++;
451 if (rxd->status.align)
452 adapter->net_stats.rx_frame_errors++;
453 }
454
455 /* advance write ptr */
456 if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
457 adapter->rxd_write_ptr = 0;
458 } while (1);
459
460 /* update mailbox? */
461 adapter->rxd_read_ptr = adapter->rxd_write_ptr;
462 ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
463}
464
465static void atl2_intr_tx(struct atl2_adapter *adapter)
466{
467 u32 txd_read_ptr;
468 u32 txs_write_ptr;
469 struct tx_pkt_status *txs;
470 struct tx_pkt_header *txph;
471 int free_hole = 0;
472
473 do {
474 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
475 txs = adapter->txs_ring + txs_write_ptr;
476 if (!txs->update)
477 break; /* tx stop here */
478
479 free_hole = 1;
480 txs->update = 0;
481
482 if (++txs_write_ptr == adapter->txs_ring_size)
483 txs_write_ptr = 0;
484 atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
485
486 txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
487 txph = (struct tx_pkt_header *)
488 (((u8 *)adapter->txd_ring) + txd_read_ptr);
489
490 if (txph->pkt_size != txs->pkt_size) {
491 struct tx_pkt_status *old_txs = txs;
492 printk(KERN_WARNING
493 "%s: txs packet size not consistent with txd"
494 " txd_:0x%08x, txs_:0x%08x!\n",
495 adapter->netdev->name,
496 *(u32 *)txph, *(u32 *)txs);
497 printk(KERN_WARNING
498 "txd read ptr: 0x%x\n",
499 txd_read_ptr);
500 txs = adapter->txs_ring + txs_write_ptr;
501 printk(KERN_WARNING
502 "txs-behind:0x%08x\n",
503 *(u32 *)txs);
504 if (txs_write_ptr < 2) {
505 txs = adapter->txs_ring +
506 (adapter->txs_ring_size +
507 txs_write_ptr - 2);
508 } else {
509 txs = adapter->txs_ring + (txs_write_ptr - 2);
510 }
511 printk(KERN_WARNING
512 "txs-before:0x%08x\n",
513 *(u32 *)txs);
514 txs = old_txs;
515 }
516
517 /* 4for TPH */
518 txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
519 if (txd_read_ptr >= adapter->txd_ring_size)
520 txd_read_ptr -= adapter->txd_ring_size;
521
522 atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
523
524 /* tx statistics: */
525 if (txs->ok)
526 adapter->net_stats.tx_packets++;
527 else
528 adapter->net_stats.tx_errors++;
529
530 if (txs->defer)
531 adapter->net_stats.collisions++;
532 if (txs->abort_col)
533 adapter->net_stats.tx_aborted_errors++;
534 if (txs->late_col)
535 adapter->net_stats.tx_window_errors++;
536 if (txs->underun)
537 adapter->net_stats.tx_fifo_errors++;
538 } while (1);
539
540 if (free_hole) {
541 if (netif_queue_stopped(adapter->netdev) &&
542 netif_carrier_ok(adapter->netdev))
543 netif_wake_queue(adapter->netdev);
544 }
545}
546
547static void atl2_check_for_link(struct atl2_adapter *adapter)
548{
549 struct net_device *netdev = adapter->netdev;
550 u16 phy_data = 0;
551
552 spin_lock(&adapter->stats_lock);
553 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
554 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
555 spin_unlock(&adapter->stats_lock);
556
557 /* notify upper layer link down ASAP */
558 if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
559 if (netif_carrier_ok(netdev)) { /* old link state: Up */
560 printk(KERN_INFO "%s: %s NIC Link is Down\n",
561 atl2_driver_name, netdev->name);
562 adapter->link_speed = SPEED_0;
563 netif_carrier_off(netdev);
564 netif_stop_queue(netdev);
565 }
566 }
567 schedule_work(&adapter->link_chg_task);
568}
569
570static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
571{
572 u16 phy_data;
573 spin_lock(&adapter->stats_lock);
574 atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
575 spin_unlock(&adapter->stats_lock);
576}
577
578/*
579 * atl2_intr - Interrupt Handler
580 * @irq: interrupt number
581 * @data: pointer to a network interface device structure
582 * @pt_regs: CPU registers structure
583 */
584static irqreturn_t atl2_intr(int irq, void *data)
585{
586 struct atl2_adapter *adapter = netdev_priv(data);
587 struct atl2_hw *hw = &adapter->hw;
588 u32 status;
589
590 status = ATL2_READ_REG(hw, REG_ISR);
591 if (0 == status)
592 return IRQ_NONE;
593
594 /* link event */
595 if (status & ISR_PHY)
596 atl2_clear_phy_int(adapter);
597
598 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
599 ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
600
601 /* check if PCIE PHY Link down */
602 if (status & ISR_PHY_LINKDOWN) {
603 if (netif_running(adapter->netdev)) { /* reset MAC */
604 ATL2_WRITE_REG(hw, REG_ISR, 0);
605 ATL2_WRITE_REG(hw, REG_IMR, 0);
606 ATL2_WRITE_FLUSH(hw);
607 schedule_work(&adapter->reset_task);
608 return IRQ_HANDLED;
609 }
610 }
611
612 /* check if DMA read/write error? */
613 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
614 ATL2_WRITE_REG(hw, REG_ISR, 0);
615 ATL2_WRITE_REG(hw, REG_IMR, 0);
616 ATL2_WRITE_FLUSH(hw);
617 schedule_work(&adapter->reset_task);
618 return IRQ_HANDLED;
619 }
620
621 /* link event */
622 if (status & (ISR_PHY | ISR_MANUAL)) {
623 adapter->net_stats.tx_carrier_errors++;
624 atl2_check_for_link(adapter);
625 }
626
627 /* transmit event */
628 if (status & ISR_TX_EVENT)
629 atl2_intr_tx(adapter);
630
631 /* rx exception */
632 if (status & ISR_RX_EVENT)
633 atl2_intr_rx(adapter);
634
635 /* re-enable Interrupt */
636 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
637 return IRQ_HANDLED;
638}
639
640static int atl2_request_irq(struct atl2_adapter *adapter)
641{
642 struct net_device *netdev = adapter->netdev;
643 int flags, err = 0;
644
645 flags = IRQF_SHARED;
646#ifdef CONFIG_PCI_MSI
647 adapter->have_msi = true;
648 err = pci_enable_msi(adapter->pdev);
649 if (err)
650 adapter->have_msi = false;
651
652 if (adapter->have_msi)
653 flags &= ~IRQF_SHARED;
654#endif
655
656 return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
657 netdev);
658}
659
660/*
661 * atl2_free_ring_resources - Free Tx / RX descriptor Resources
662 * @adapter: board private structure
663 *
664 * Free all transmit software resources
665 */
666static void atl2_free_ring_resources(struct atl2_adapter *adapter)
667{
668 struct pci_dev *pdev = adapter->pdev;
669 pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
670 adapter->ring_dma);
671}
672
673/*
674 * atl2_open - Called when a network interface is made active
675 * @netdev: network interface device structure
676 *
677 * Returns 0 on success, negative value on failure
678 *
679 * The open entry point is called when a network interface is made
680 * active by the system (IFF_UP). At this point all resources needed
681 * for transmit and receive operations are allocated, the interrupt
682 * handler is registered with the OS, the watchdog timer is started,
683 * and the stack is notified that the interface is ready.
684 */
685static int atl2_open(struct net_device *netdev)
686{
687 struct atl2_adapter *adapter = netdev_priv(netdev);
688 int err;
689 u32 val;
690
691 /* disallow open during test */
692 if (test_bit(__ATL2_TESTING, &adapter->flags))
693 return -EBUSY;
694
695 /* allocate transmit descriptors */
696 err = atl2_setup_ring_resources(adapter);
697 if (err)
698 return err;
699
700 err = atl2_init_hw(&adapter->hw);
701 if (err) {
702 err = -EIO;
703 goto err_init_hw;
704 }
705
706 /* hardware has been reset, we need to reload some things */
707 atl2_set_multi(netdev);
708 init_ring_ptrs(adapter);
709
710#ifdef NETIF_F_HW_VLAN_TX
711 atl2_restore_vlan(adapter);
712#endif
713
714 if (atl2_configure(adapter)) {
715 err = -EIO;
716 goto err_config;
717 }
718
719 err = atl2_request_irq(adapter);
720 if (err)
721 goto err_req_irq;
722
723 clear_bit(__ATL2_DOWN, &adapter->flags);
724
725 mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ);
726
727 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
728 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
729 val | MASTER_CTRL_MANUAL_INT);
730
731 atl2_irq_enable(adapter);
732
733 return 0;
734
735err_init_hw:
736err_req_irq:
737err_config:
738 atl2_free_ring_resources(adapter);
739 atl2_reset_hw(&adapter->hw);
740
741 return err;
742}
743
744static void atl2_down(struct atl2_adapter *adapter)
745{
746 struct net_device *netdev = adapter->netdev;
747
748 /* signal that we're down so the interrupt handler does not
749 * reschedule our watchdog timer */
750 set_bit(__ATL2_DOWN, &adapter->flags);
751
752#ifdef NETIF_F_LLTX
753 netif_stop_queue(netdev);
754#else
755 netif_tx_disable(netdev);
756#endif
757
758 /* reset MAC to disable all RX/TX */
759 atl2_reset_hw(&adapter->hw);
760 msleep(1);
761
762 atl2_irq_disable(adapter);
763
764 del_timer_sync(&adapter->watchdog_timer);
765 del_timer_sync(&adapter->phy_config_timer);
766 clear_bit(0, &adapter->cfg_phy);
767
768 netif_carrier_off(netdev);
769 adapter->link_speed = SPEED_0;
770 adapter->link_duplex = -1;
771}
772
773static void atl2_free_irq(struct atl2_adapter *adapter)
774{
775 struct net_device *netdev = adapter->netdev;
776
777 free_irq(adapter->pdev->irq, netdev);
778
779#ifdef CONFIG_PCI_MSI
780 if (adapter->have_msi)
781 pci_disable_msi(adapter->pdev);
782#endif
783}
784
785/*
786 * atl2_close - Disables a network interface
787 * @netdev: network interface device structure
788 *
789 * Returns 0, this is not allowed to fail
790 *
791 * The close entry point is called when an interface is de-activated
792 * by the OS. The hardware is still under the drivers control, but
793 * needs to be disabled. A global MAC reset is issued to stop the
794 * hardware, and all transmit and receive resources are freed.
795 */
796static int atl2_close(struct net_device *netdev)
797{
798 struct atl2_adapter *adapter = netdev_priv(netdev);
799
800 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
801
802 atl2_down(adapter);
803 atl2_free_irq(adapter);
804 atl2_free_ring_resources(adapter);
805
806 return 0;
807}
808
809static inline int TxsFreeUnit(struct atl2_adapter *adapter)
810{
811 u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
812
813 return (adapter->txs_next_clear >= txs_write_ptr) ?
814 (int) (adapter->txs_ring_size - adapter->txs_next_clear +
815 txs_write_ptr - 1) :
816 (int) (txs_write_ptr - adapter->txs_next_clear - 1);
817}
818
819static inline int TxdFreeBytes(struct atl2_adapter *adapter)
820{
821 u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
822
823 return (adapter->txd_write_ptr >= txd_read_ptr) ?
824 (int) (adapter->txd_ring_size - adapter->txd_write_ptr +
825 txd_read_ptr - 1) :
826 (int) (txd_read_ptr - adapter->txd_write_ptr - 1);
827}
828
829static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
830{
831 struct atl2_adapter *adapter = netdev_priv(netdev);
832 unsigned long flags;
833 struct tx_pkt_header *txph;
834 u32 offset, copy_len;
835 int txs_unused;
836 int txbuf_unused;
837
838 if (test_bit(__ATL2_DOWN, &adapter->flags)) {
839 dev_kfree_skb_any(skb);
840 return NETDEV_TX_OK;
841 }
842
843 if (unlikely(skb->len <= 0)) {
844 dev_kfree_skb_any(skb);
845 return NETDEV_TX_OK;
846 }
847
848#ifdef NETIF_F_LLTX
849 local_irq_save(flags);
850 if (!spin_trylock(&adapter->tx_lock)) {
851 /* Collision - tell upper layer to requeue */
852 local_irq_restore(flags);
853 return NETDEV_TX_LOCKED;
854 }
855#else
856 spin_lock_irqsave(&adapter->tx_lock, flags);
857#endif
858 txs_unused = TxsFreeUnit(adapter);
859 txbuf_unused = TxdFreeBytes(adapter);
860
861 if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
862 txs_unused < 1) {
863 /* not enough resources */
864 netif_stop_queue(netdev);
865 spin_unlock_irqrestore(&adapter->tx_lock, flags);
866 return NETDEV_TX_BUSY;
867 }
868
869 offset = adapter->txd_write_ptr;
870
871 txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
872
873 *(u32 *)txph = 0;
874 txph->pkt_size = skb->len;
875
876 offset += 4;
877 if (offset >= adapter->txd_ring_size)
878 offset -= adapter->txd_ring_size;
879 copy_len = adapter->txd_ring_size - offset;
880 if (copy_len >= skb->len) {
881 memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
882 offset += ((u32)(skb->len + 3) & ~3);
883 } else {
884 memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
885 memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
886 skb->len-copy_len);
887 offset = ((u32)(skb->len-copy_len + 3) & ~3);
888 }
889#ifdef NETIF_F_HW_VLAN_TX
890 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
891 u16 vlan_tag = vlan_tx_tag_get(skb);
892 vlan_tag = (vlan_tag << 4) |
893 (vlan_tag >> 13) |
894 ((vlan_tag >> 9) & 0x8);
895 txph->ins_vlan = 1;
896 txph->vlan = vlan_tag;
897 }
898#endif
899 if (offset >= adapter->txd_ring_size)
900 offset -= adapter->txd_ring_size;
901 adapter->txd_write_ptr = offset;
902
903 /* clear txs before send */
904 adapter->txs_ring[adapter->txs_next_clear].update = 0;
905 if (++adapter->txs_next_clear == adapter->txs_ring_size)
906 adapter->txs_next_clear = 0;
907
908 ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
909 (adapter->txd_write_ptr >> 2));
910
911 spin_unlock_irqrestore(&adapter->tx_lock, flags);
912
913 netdev->trans_start = jiffies;
914 dev_kfree_skb_any(skb);
915 return NETDEV_TX_OK;
916}
917
918/*
919 * atl2_get_stats - Get System Network Statistics
920 * @netdev: network interface device structure
921 *
922 * Returns the address of the device statistics structure.
923 * The statistics are actually updated from the timer callback.
924 */
925static struct net_device_stats *atl2_get_stats(struct net_device *netdev)
926{
927 struct atl2_adapter *adapter = netdev_priv(netdev);
928 return &adapter->net_stats;
929}
930
931/*
932 * atl2_change_mtu - Change the Maximum Transfer Unit
933 * @netdev: network interface device structure
934 * @new_mtu: new value for maximum frame size
935 *
936 * Returns 0 on success, negative on failure
937 */
938static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
939{
940 struct atl2_adapter *adapter = netdev_priv(netdev);
941 struct atl2_hw *hw = &adapter->hw;
942
943 if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
944 return -EINVAL;
945
946 /* set MTU */
947 if (hw->max_frame_size != new_mtu) {
948 netdev->mtu = new_mtu;
949 ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
950 VLAN_SIZE + ETHERNET_FCS_SIZE);
951 }
952
953 return 0;
954}
955
956/*
957 * atl2_set_mac - Change the Ethernet Address of the NIC
958 * @netdev: network interface device structure
959 * @p: pointer to an address structure
960 *
961 * Returns 0 on success, negative on failure
962 */
963static int atl2_set_mac(struct net_device *netdev, void *p)
964{
965 struct atl2_adapter *adapter = netdev_priv(netdev);
966 struct sockaddr *addr = p;
967
968 if (!is_valid_ether_addr(addr->sa_data))
969 return -EADDRNOTAVAIL;
970
971 if (netif_running(netdev))
972 return -EBUSY;
973
974 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
975 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
976
977 atl2_set_mac_addr(&adapter->hw);
978
979 return 0;
980}
981
982/*
983 * atl2_mii_ioctl -
984 * @netdev:
985 * @ifreq:
986 * @cmd:
987 */
988static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
989{
990 struct atl2_adapter *adapter = netdev_priv(netdev);
991 struct mii_ioctl_data *data = if_mii(ifr);
992 unsigned long flags;
993
994 switch (cmd) {
995 case SIOCGMIIPHY:
996 data->phy_id = 0;
997 break;
998 case SIOCGMIIREG:
999 if (!capable(CAP_NET_ADMIN))
1000 return -EPERM;
1001 spin_lock_irqsave(&adapter->stats_lock, flags);
1002 if (atl2_read_phy_reg(&adapter->hw,
1003 data->reg_num & 0x1F, &data->val_out)) {
1004 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1005 return -EIO;
1006 }
1007 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1008 break;
1009 case SIOCSMIIREG:
1010 if (!capable(CAP_NET_ADMIN))
1011 return -EPERM;
1012 if (data->reg_num & ~(0x1F))
1013 return -EFAULT;
1014 spin_lock_irqsave(&adapter->stats_lock, flags);
1015 if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
1016 data->val_in)) {
1017 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1018 return -EIO;
1019 }
1020 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1021 break;
1022 default:
1023 return -EOPNOTSUPP;
1024 }
1025 return 0;
1026}
1027
1028/*
1029 * atl2_ioctl -
1030 * @netdev:
1031 * @ifreq:
1032 * @cmd:
1033 */
1034static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035{
1036 switch (cmd) {
1037 case SIOCGMIIPHY:
1038 case SIOCGMIIREG:
1039 case SIOCSMIIREG:
1040 return atl2_mii_ioctl(netdev, ifr, cmd);
1041#ifdef ETHTOOL_OPS_COMPAT
1042 case SIOCETHTOOL:
1043 return ethtool_ioctl(ifr);
1044#endif
1045 default:
1046 return -EOPNOTSUPP;
1047 }
1048}
1049
1050/*
1051 * atl2_tx_timeout - Respond to a Tx Hang
1052 * @netdev: network interface device structure
1053 */
1054static void atl2_tx_timeout(struct net_device *netdev)
1055{
1056 struct atl2_adapter *adapter = netdev_priv(netdev);
1057
1058 /* Do the reset outside of interrupt context */
1059 schedule_work(&adapter->reset_task);
1060}
1061
1062/*
1063 * atl2_watchdog - Timer Call-back
1064 * @data: pointer to netdev cast into an unsigned long
1065 */
1066static void atl2_watchdog(unsigned long data)
1067{
1068 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1069 u32 drop_rxd, drop_rxs;
1070 unsigned long flags;
1071
1072 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1073 spin_lock_irqsave(&adapter->stats_lock, flags);
1074 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
1075 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
1076 adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs);
1077 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1078
1079 /* Reset the timer */
1080 mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ);
1081 }
1082}
1083
1084/*
1085 * atl2_phy_config - Timer Call-back
1086 * @data: pointer to netdev cast into an unsigned long
1087 */
1088static void atl2_phy_config(unsigned long data)
1089{
1090 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1091 struct atl2_hw *hw = &adapter->hw;
1092 unsigned long flags;
1093
1094 spin_lock_irqsave(&adapter->stats_lock, flags);
1095 atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1096 atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
1097 MII_CR_RESTART_AUTO_NEG);
1098 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1099 clear_bit(0, &adapter->cfg_phy);
1100}
1101
1102static int atl2_up(struct atl2_adapter *adapter)
1103{
1104 struct net_device *netdev = adapter->netdev;
1105 int err = 0;
1106 u32 val;
1107
1108 /* hardware has been reset, we need to reload some things */
1109
1110 err = atl2_init_hw(&adapter->hw);
1111 if (err) {
1112 err = -EIO;
1113 return err;
1114 }
1115
1116 atl2_set_multi(netdev);
1117 init_ring_ptrs(adapter);
1118
1119#ifdef NETIF_F_HW_VLAN_TX
1120 atl2_restore_vlan(adapter);
1121#endif
1122
1123 if (atl2_configure(adapter)) {
1124 err = -EIO;
1125 goto err_up;
1126 }
1127
1128 clear_bit(__ATL2_DOWN, &adapter->flags);
1129
1130 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1131 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
1132 MASTER_CTRL_MANUAL_INT);
1133
1134 atl2_irq_enable(adapter);
1135
1136err_up:
1137 return err;
1138}
1139
1140static void atl2_reinit_locked(struct atl2_adapter *adapter)
1141{
1142 WARN_ON(in_interrupt());
1143 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1144 msleep(1);
1145 atl2_down(adapter);
1146 atl2_up(adapter);
1147 clear_bit(__ATL2_RESETTING, &adapter->flags);
1148}
1149
1150static void atl2_reset_task(struct work_struct *work)
1151{
1152 struct atl2_adapter *adapter;
1153 adapter = container_of(work, struct atl2_adapter, reset_task);
1154
1155 atl2_reinit_locked(adapter);
1156}
1157
1158static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
1159{
1160 u32 value;
1161 struct atl2_hw *hw = &adapter->hw;
1162 struct net_device *netdev = adapter->netdev;
1163
1164 /* Config MAC CTRL Register */
1165 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1166
1167 /* duplex */
1168 if (FULL_DUPLEX == adapter->link_duplex)
1169 value |= MAC_CTRL_DUPLX;
1170
1171 /* flow control */
1172 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1173
1174 /* PAD & CRC */
1175 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1176
1177 /* preamble length */
1178 value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1179 MAC_CTRL_PRMLEN_SHIFT);
1180
1181 /* vlan */
1182 if (adapter->vlgrp)
1183 value |= MAC_CTRL_RMV_VLAN;
1184
1185 /* filter mode */
1186 value |= MAC_CTRL_BC_EN;
1187 if (netdev->flags & IFF_PROMISC)
1188 value |= MAC_CTRL_PROMIS_EN;
1189 else if (netdev->flags & IFF_ALLMULTI)
1190 value |= MAC_CTRL_MC_ALL_EN;
1191
1192 /* half retry buffer */
1193 value |= (((u32)(adapter->hw.retry_buf &
1194 MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1195
1196 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1197}
1198
1199static int atl2_check_link(struct atl2_adapter *adapter)
1200{
1201 struct atl2_hw *hw = &adapter->hw;
1202 struct net_device *netdev = adapter->netdev;
1203 int ret_val;
1204 u16 speed, duplex, phy_data;
1205 int reconfig = 0;
1206
1207 /* MII_BMSR must read twise */
1208 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1209 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1210 if (!(phy_data&BMSR_LSTATUS)) { /* link down */
1211 if (netif_carrier_ok(netdev)) { /* old link state: Up */
1212 u32 value;
1213 /* disable rx */
1214 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1215 value &= ~MAC_CTRL_RX_EN;
1216 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1217 adapter->link_speed = SPEED_0;
1218 netif_carrier_off(netdev);
1219 netif_stop_queue(netdev);
1220 }
1221 return 0;
1222 }
1223
1224 /* Link Up */
1225 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1226 if (ret_val)
1227 return ret_val;
1228 switch (hw->MediaType) {
1229 case MEDIA_TYPE_100M_FULL:
1230 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1231 reconfig = 1;
1232 break;
1233 case MEDIA_TYPE_100M_HALF:
1234 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1235 reconfig = 1;
1236 break;
1237 case MEDIA_TYPE_10M_FULL:
1238 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1239 reconfig = 1;
1240 break;
1241 case MEDIA_TYPE_10M_HALF:
1242 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1243 reconfig = 1;
1244 break;
1245 }
1246 /* link result is our setting */
1247 if (reconfig == 0) {
1248 if (adapter->link_speed != speed ||
1249 adapter->link_duplex != duplex) {
1250 adapter->link_speed = speed;
1251 adapter->link_duplex = duplex;
1252 atl2_setup_mac_ctrl(adapter);
1253 printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
1254 atl2_driver_name, netdev->name,
1255 adapter->link_speed,
1256 adapter->link_duplex == FULL_DUPLEX ?
1257 "Full Duplex" : "Half Duplex");
1258 }
1259
1260 if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
1261 netif_carrier_on(netdev);
1262 netif_wake_queue(netdev);
1263 }
1264 return 0;
1265 }
1266
1267 /* change original link status */
1268 if (netif_carrier_ok(netdev)) {
1269 u32 value;
1270 /* disable rx */
1271 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1272 value &= ~MAC_CTRL_RX_EN;
1273 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1274
1275 adapter->link_speed = SPEED_0;
1276 netif_carrier_off(netdev);
1277 netif_stop_queue(netdev);
1278 }
1279
1280 /* auto-neg, insert timer to re-config phy
1281 * (if interval smaller than 5 seconds, something strange) */
1282 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1283 if (!test_and_set_bit(0, &adapter->cfg_phy))
1284 mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ);
1285 }
1286
1287 return 0;
1288}
1289
1290/*
1291 * atl2_link_chg_task - deal with link change event Out of interrupt context
1292 * @netdev: network interface device structure
1293 */
1294static void atl2_link_chg_task(struct work_struct *work)
1295{
1296 struct atl2_adapter *adapter;
1297 unsigned long flags;
1298
1299 adapter = container_of(work, struct atl2_adapter, link_chg_task);
1300
1301 spin_lock_irqsave(&adapter->stats_lock, flags);
1302 atl2_check_link(adapter);
1303 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1304}
1305
1306static void atl2_setup_pcicmd(struct pci_dev *pdev)
1307{
1308 u16 cmd;
1309
1310 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1311
1312 if (cmd & PCI_COMMAND_INTX_DISABLE)
1313 cmd &= ~PCI_COMMAND_INTX_DISABLE;
1314 if (cmd & PCI_COMMAND_IO)
1315 cmd &= ~PCI_COMMAND_IO;
1316 if (0 == (cmd & PCI_COMMAND_MEMORY))
1317 cmd |= PCI_COMMAND_MEMORY;
1318 if (0 == (cmd & PCI_COMMAND_MASTER))
1319 cmd |= PCI_COMMAND_MASTER;
1320 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1321
1322 /*
1323 * some motherboards BIOS(PXE/EFI) driver may set PME
1324 * while they transfer control to OS (Windows/Linux)
1325 * so we should clear this bit before NIC work normally
1326 */
1327 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
1328}
1329
1330/*
1331 * atl2_probe - Device Initialization Routine
1332 * @pdev: PCI device information struct
1333 * @ent: entry in atl2_pci_tbl
1334 *
1335 * Returns 0 on success, negative on failure
1336 *
1337 * atl2_probe initializes an adapter identified by a pci_dev structure.
1338 * The OS initialization, configuring of the adapter private structure,
1339 * and a hardware reset occur.
1340 */
1341static int __devinit atl2_probe(struct pci_dev *pdev,
1342 const struct pci_device_id *ent)
1343{
1344 struct net_device *netdev;
1345 struct atl2_adapter *adapter;
1346 static int cards_found;
1347 unsigned long mmio_start;
1348 int mmio_len;
1349 int err;
1350
1351 cards_found = 0;
1352
1353 err = pci_enable_device(pdev);
1354 if (err)
1355 return err;
1356
1357 /*
1358 * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
1359 * until the kernel has the proper infrastructure to support 64-bit DMA
1360 * on these devices.
1361 */
1362 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) &&
1363 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
1364 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1365 goto err_dma;
1366 }
1367
1368 /* Mark all PCI regions associated with PCI device
1369 * pdev as being reserved by owner atl2_driver_name */
1370 err = pci_request_regions(pdev, atl2_driver_name);
1371 if (err)
1372 goto err_pci_reg;
1373
1374 /* Enables bus-mastering on the device and calls
1375 * pcibios_set_master to do the needed arch specific settings */
1376 pci_set_master(pdev);
1377
1378 err = -ENOMEM;
1379 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1380 if (!netdev)
1381 goto err_alloc_etherdev;
1382
1383 SET_NETDEV_DEV(netdev, &pdev->dev);
1384
1385 pci_set_drvdata(pdev, netdev);
1386 adapter = netdev_priv(netdev);
1387 adapter->netdev = netdev;
1388 adapter->pdev = pdev;
1389 adapter->hw.back = adapter;
1390
1391 mmio_start = pci_resource_start(pdev, 0x0);
1392 mmio_len = pci_resource_len(pdev, 0x0);
1393
1394 adapter->hw.mem_rang = (u32)mmio_len;
1395 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1396 if (!adapter->hw.hw_addr) {
1397 err = -EIO;
1398 goto err_ioremap;
1399 }
1400
1401 atl2_setup_pcicmd(pdev);
1402
1403 netdev->open = &atl2_open;
1404 netdev->stop = &atl2_close;
1405 netdev->hard_start_xmit = &atl2_xmit_frame;
1406 netdev->get_stats = &atl2_get_stats;
1407 netdev->set_multicast_list = &atl2_set_multi;
1408 netdev->set_mac_address = &atl2_set_mac;
1409 netdev->change_mtu = &atl2_change_mtu;
1410 netdev->do_ioctl = &atl2_ioctl;
1411 atl2_set_ethtool_ops(netdev);
1412
1413#ifdef HAVE_TX_TIMEOUT
1414 netdev->tx_timeout = &atl2_tx_timeout;
1415 netdev->watchdog_timeo = 5 * HZ;
1416#endif
1417#ifdef NETIF_F_HW_VLAN_TX
1418 netdev->vlan_rx_register = atl2_vlan_rx_register;
1419#endif
1420 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1421
1422 netdev->mem_start = mmio_start;
1423 netdev->mem_end = mmio_start + mmio_len;
1424 adapter->bd_number = cards_found;
1425 adapter->pci_using_64 = false;
1426
1427 /* setup the private structure */
1428 err = atl2_sw_init(adapter);
1429 if (err)
1430 goto err_sw_init;
1431
1432 err = -EIO;
1433
1434#ifdef NETIF_F_HW_VLAN_TX
1435 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
1436#endif
1437
1438#ifdef NETIF_F_LLTX
1439 netdev->features |= NETIF_F_LLTX;
1440#endif
1441
1442 /* Init PHY as early as possible due to power saving issue */
1443 atl2_phy_init(&adapter->hw);
1444
1445 /* reset the controller to
1446 * put the device in a known good starting state */
1447
1448 if (atl2_reset_hw(&adapter->hw)) {
1449 err = -EIO;
1450 goto err_reset;
1451 }
1452
1453 /* copy the MAC address out of the EEPROM */
1454 atl2_read_mac_addr(&adapter->hw);
1455 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
1456/* FIXME: do we still need this? */
1457#ifdef ETHTOOL_GPERMADDR
1458 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
1459
1460 if (!is_valid_ether_addr(netdev->perm_addr)) {
1461#else
1462 if (!is_valid_ether_addr(netdev->dev_addr)) {
1463#endif
1464 err = -EIO;
1465 goto err_eeprom;
1466 }
1467
1468 atl2_check_options(adapter);
1469
1470 init_timer(&adapter->watchdog_timer);
1471 adapter->watchdog_timer.function = &atl2_watchdog;
1472 adapter->watchdog_timer.data = (unsigned long) adapter;
1473
1474 init_timer(&adapter->phy_config_timer);
1475 adapter->phy_config_timer.function = &atl2_phy_config;
1476 adapter->phy_config_timer.data = (unsigned long) adapter;
1477
1478 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1479 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
1480
1481 strcpy(netdev->name, "eth%d"); /* ?? */
1482 err = register_netdev(netdev);
1483 if (err)
1484 goto err_register;
1485
1486 /* assume we have no link for now */
1487 netif_carrier_off(netdev);
1488 netif_stop_queue(netdev);
1489
1490 cards_found++;
1491
1492 return 0;
1493
1494err_reset:
1495err_register:
1496err_sw_init:
1497err_eeprom:
1498 iounmap(adapter->hw.hw_addr);
1499err_ioremap:
1500 free_netdev(netdev);
1501err_alloc_etherdev:
1502 pci_release_regions(pdev);
1503err_pci_reg:
1504err_dma:
1505 pci_disable_device(pdev);
1506 return err;
1507}
1508
1509/*
1510 * atl2_remove - Device Removal Routine
1511 * @pdev: PCI device information struct
1512 *
1513 * atl2_remove is called by the PCI subsystem to alert the driver
1514 * that it should release a PCI device. The could be caused by a
1515 * Hot-Plug event, or because the driver is going to be removed from
1516 * memory.
1517 */
1518/* FIXME: write the original MAC address back in case it was changed from a
1519 * BIOS-set value, as in atl1 -- CHS */
1520static void __devexit atl2_remove(struct pci_dev *pdev)
1521{
1522 struct net_device *netdev = pci_get_drvdata(pdev);
1523 struct atl2_adapter *adapter = netdev_priv(netdev);
1524
1525 /* flush_scheduled work may reschedule our watchdog task, so
1526 * explicitly disable watchdog tasks from being rescheduled */
1527 set_bit(__ATL2_DOWN, &adapter->flags);
1528
1529 del_timer_sync(&adapter->watchdog_timer);
1530 del_timer_sync(&adapter->phy_config_timer);
1531
1532 flush_scheduled_work();
1533
1534 unregister_netdev(netdev);
1535
1536 atl2_force_ps(&adapter->hw);
1537
1538 iounmap(adapter->hw.hw_addr);
1539 pci_release_regions(pdev);
1540
1541 free_netdev(netdev);
1542
1543 pci_disable_device(pdev);
1544}
1545
1546static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
1547{
1548 struct net_device *netdev = pci_get_drvdata(pdev);
1549 struct atl2_adapter *adapter = netdev_priv(netdev);
1550 struct atl2_hw *hw = &adapter->hw;
1551 u16 speed, duplex;
1552 u32 ctrl = 0;
1553 u32 wufc = adapter->wol;
1554
1555#ifdef CONFIG_PM
1556 int retval = 0;
1557#endif
1558
1559 netif_device_detach(netdev);
1560
1561 if (netif_running(netdev)) {
1562 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
1563 atl2_down(adapter);
1564 }
1565
1566#ifdef CONFIG_PM
1567 retval = pci_save_state(pdev);
1568 if (retval)
1569 return retval;
1570#endif
1571
1572 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1573 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1574 if (ctrl & BMSR_LSTATUS)
1575 wufc &= ~ATLX_WUFC_LNKC;
1576
1577 if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
1578 u32 ret_val;
1579 /* get current link speed & duplex */
1580 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1581 if (ret_val) {
1582 printk(KERN_DEBUG
1583 "%s: get speed&duplex error while suspend\n",
1584 atl2_driver_name);
1585 goto wol_dis;
1586 }
1587
1588 ctrl = 0;
1589
1590 /* turn on magic packet wol */
1591 if (wufc & ATLX_WUFC_MAG)
1592 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
1593
1594 /* ignore Link Chg event when Link is up */
1595 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1596
1597 /* Config MAC CTRL Register */
1598 ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1599 if (FULL_DUPLEX == adapter->link_duplex)
1600 ctrl |= MAC_CTRL_DUPLX;
1601 ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1602 ctrl |= (((u32)adapter->hw.preamble_len &
1603 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1604 ctrl |= (((u32)(adapter->hw.retry_buf &
1605 MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
1606 MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1607 if (wufc & ATLX_WUFC_MAG) {
1608 /* magic packet maybe Broadcast&multicast&Unicast */
1609 ctrl |= MAC_CTRL_BC_EN;
1610 }
1611
1612 ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
1613
1614 /* pcie patch */
1615 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1616 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1617 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1618 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1619 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1620 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1621
1622 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1623 goto suspend_exit;
1624 }
1625
1626 if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
1627 /* link is down, so only LINK CHG WOL event enable */
1628 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1629 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1630 ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
1631
1632 /* pcie patch */
1633 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1634 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1635 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1636 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1637 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1638 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1639
1640 hw->phy_configured = false; /* re-init PHY when resume */
1641
1642 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1643
1644 goto suspend_exit;
1645 }
1646
1647wol_dis:
1648 /* WOL disabled */
1649 ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
1650
1651 /* pcie patch */
1652 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1653 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1654 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1655 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1656 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1657 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1658
1659 atl2_force_ps(hw);
1660 hw->phy_configured = false; /* re-init PHY when resume */
1661
1662 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1663
1664suspend_exit:
1665 if (netif_running(netdev))
1666 atl2_free_irq(adapter);
1667
1668 pci_disable_device(pdev);
1669
1670 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1671
1672 return 0;
1673}
1674
1675#ifdef CONFIG_PM
1676static int atl2_resume(struct pci_dev *pdev)
1677{
1678 struct net_device *netdev = pci_get_drvdata(pdev);
1679 struct atl2_adapter *adapter = netdev_priv(netdev);
1680 u32 err;
1681
1682 pci_set_power_state(pdev, PCI_D0);
1683 pci_restore_state(pdev);
1684
1685 err = pci_enable_device(pdev);
1686 if (err) {
1687 printk(KERN_ERR
1688 "atl2: Cannot enable PCI device from suspend\n");
1689 return err;
1690 }
1691
1692 pci_set_master(pdev);
1693
1694 ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
1695
1696 pci_enable_wake(pdev, PCI_D3hot, 0);
1697 pci_enable_wake(pdev, PCI_D3cold, 0);
1698
1699 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1700
1701 err = atl2_request_irq(adapter);
1702 if (netif_running(netdev) && err)
1703 return err;
1704
1705 atl2_reset_hw(&adapter->hw);
1706
1707 if (netif_running(netdev))
1708 atl2_up(adapter);
1709
1710 netif_device_attach(netdev);
1711
1712 return 0;
1713}
1714#endif
1715
1716static void atl2_shutdown(struct pci_dev *pdev)
1717{
1718 atl2_suspend(pdev, PMSG_SUSPEND);
1719}
1720
1721static struct pci_driver atl2_driver = {
1722 .name = atl2_driver_name,
1723 .id_table = atl2_pci_tbl,
1724 .probe = atl2_probe,
1725 .remove = __devexit_p(atl2_remove),
1726 /* Power Managment Hooks */
1727 .suspend = atl2_suspend,
1728#ifdef CONFIG_PM
1729 .resume = atl2_resume,
1730#endif
1731 .shutdown = atl2_shutdown,
1732};
1733
1734/*
1735 * atl2_init_module - Driver Registration Routine
1736 *
1737 * atl2_init_module is the first routine called when the driver is
1738 * loaded. All it does is register with the PCI subsystem.
1739 */
1740static int __init atl2_init_module(void)
1741{
1742 printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
1743 atl2_driver_version);
1744 printk(KERN_INFO "%s\n", atl2_copyright);
1745 return pci_register_driver(&atl2_driver);
1746}
1747module_init(atl2_init_module);
1748
1749/*
1750 * atl2_exit_module - Driver Exit Cleanup Routine
1751 *
1752 * atl2_exit_module is called just before the driver is removed
1753 * from memory.
1754 */
1755static void __exit atl2_exit_module(void)
1756{
1757 pci_unregister_driver(&atl2_driver);
1758}
1759module_exit(atl2_exit_module);
1760
1761static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1762{
1763 struct atl2_adapter *adapter = hw->back;
1764 pci_read_config_word(adapter->pdev, reg, value);
1765}
1766
1767static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1768{
1769 struct atl2_adapter *adapter = hw->back;
1770 pci_write_config_word(adapter->pdev, reg, *value);
1771}
1772
1773static int atl2_get_settings(struct net_device *netdev,
1774 struct ethtool_cmd *ecmd)
1775{
1776 struct atl2_adapter *adapter = netdev_priv(netdev);
1777 struct atl2_hw *hw = &adapter->hw;
1778
1779 ecmd->supported = (SUPPORTED_10baseT_Half |
1780 SUPPORTED_10baseT_Full |
1781 SUPPORTED_100baseT_Half |
1782 SUPPORTED_100baseT_Full |
1783 SUPPORTED_Autoneg |
1784 SUPPORTED_TP);
1785 ecmd->advertising = ADVERTISED_TP;
1786
1787 ecmd->advertising |= ADVERTISED_Autoneg;
1788 ecmd->advertising |= hw->autoneg_advertised;
1789
1790 ecmd->port = PORT_TP;
1791 ecmd->phy_address = 0;
1792 ecmd->transceiver = XCVR_INTERNAL;
1793
1794 if (adapter->link_speed != SPEED_0) {
1795 ecmd->speed = adapter->link_speed;
1796 if (adapter->link_duplex == FULL_DUPLEX)
1797 ecmd->duplex = DUPLEX_FULL;
1798 else
1799 ecmd->duplex = DUPLEX_HALF;
1800 } else {
1801 ecmd->speed = -1;
1802 ecmd->duplex = -1;
1803 }
1804
1805 ecmd->autoneg = AUTONEG_ENABLE;
1806 return 0;
1807}
1808
1809static int atl2_set_settings(struct net_device *netdev,
1810 struct ethtool_cmd *ecmd)
1811{
1812 struct atl2_adapter *adapter = netdev_priv(netdev);
1813 struct atl2_hw *hw = &adapter->hw;
1814
1815 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1816 msleep(1);
1817
1818 if (ecmd->autoneg == AUTONEG_ENABLE) {
1819#define MY_ADV_MASK (ADVERTISE_10_HALF | \
1820 ADVERTISE_10_FULL | \
1821 ADVERTISE_100_HALF| \
1822 ADVERTISE_100_FULL)
1823
1824 if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
1825 hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
1826 hw->autoneg_advertised = MY_ADV_MASK;
1827 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1828 ADVERTISE_100_FULL) {
1829 hw->MediaType = MEDIA_TYPE_100M_FULL;
1830 hw->autoneg_advertised = ADVERTISE_100_FULL;
1831 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1832 ADVERTISE_100_HALF) {
1833 hw->MediaType = MEDIA_TYPE_100M_HALF;
1834 hw->autoneg_advertised = ADVERTISE_100_HALF;
1835 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1836 ADVERTISE_10_FULL) {
1837 hw->MediaType = MEDIA_TYPE_10M_FULL;
1838 hw->autoneg_advertised = ADVERTISE_10_FULL;
1839 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1840 ADVERTISE_10_HALF) {
1841 hw->MediaType = MEDIA_TYPE_10M_HALF;
1842 hw->autoneg_advertised = ADVERTISE_10_HALF;
1843 } else {
1844 clear_bit(__ATL2_RESETTING, &adapter->flags);
1845 return -EINVAL;
1846 }
1847 ecmd->advertising = hw->autoneg_advertised |
1848 ADVERTISED_TP | ADVERTISED_Autoneg;
1849 } else {
1850 clear_bit(__ATL2_RESETTING, &adapter->flags);
1851 return -EINVAL;
1852 }
1853
1854 /* reset the link */
1855 if (netif_running(adapter->netdev)) {
1856 atl2_down(adapter);
1857 atl2_up(adapter);
1858 } else
1859 atl2_reset_hw(&adapter->hw);
1860
1861 clear_bit(__ATL2_RESETTING, &adapter->flags);
1862 return 0;
1863}
1864
1865static u32 atl2_get_tx_csum(struct net_device *netdev)
1866{
1867 return (netdev->features & NETIF_F_HW_CSUM) != 0;
1868}
1869
1870static u32 atl2_get_msglevel(struct net_device *netdev)
1871{
1872 return 0;
1873}
1874
1875/*
1876 * It's sane for this to be empty, but we might want to take advantage of this.
1877 */
1878static void atl2_set_msglevel(struct net_device *netdev, u32 data)
1879{
1880}
1881
1882static int atl2_get_regs_len(struct net_device *netdev)
1883{
1884#define ATL2_REGS_LEN 42
1885 return sizeof(u32) * ATL2_REGS_LEN;
1886}
1887
1888static void atl2_get_regs(struct net_device *netdev,
1889 struct ethtool_regs *regs, void *p)
1890{
1891 struct atl2_adapter *adapter = netdev_priv(netdev);
1892 struct atl2_hw *hw = &adapter->hw;
1893 u32 *regs_buff = p;
1894 u16 phy_data;
1895
1896 memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
1897
1898 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
1899
1900 regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
1901 regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
1902 regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
1903 regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
1904 regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
1905 regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
1906 regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
1907 regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
1908 regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
1909 regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
1910 regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
1911 regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
1912 regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
1913 regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
1914 regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
1915 regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
1916 regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
1917 regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
1918 regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
1919 regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
1920 regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
1921 regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
1922 regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
1923 regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
1924 regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
1925 regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
1926 regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
1927 regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
1928 regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
1929 regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
1930 regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
1931 regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
1932 regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
1933 regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
1934 regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
1935 regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
1936 regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
1937 regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
1938 regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
1939
1940 atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
1941 regs_buff[40] = (u32)phy_data;
1942 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1943 regs_buff[41] = (u32)phy_data;
1944}
1945
1946static int atl2_get_eeprom_len(struct net_device *netdev)
1947{
1948 struct atl2_adapter *adapter = netdev_priv(netdev);
1949
1950 if (!atl2_check_eeprom_exist(&adapter->hw))
1951 return 512;
1952 else
1953 return 0;
1954}
1955
1956static int atl2_get_eeprom(struct net_device *netdev,
1957 struct ethtool_eeprom *eeprom, u8 *bytes)
1958{
1959 struct atl2_adapter *adapter = netdev_priv(netdev);
1960 struct atl2_hw *hw = &adapter->hw;
1961 u32 *eeprom_buff;
1962 int first_dword, last_dword;
1963 int ret_val = 0;
1964 int i;
1965
1966 if (eeprom->len == 0)
1967 return -EINVAL;
1968
1969 if (atl2_check_eeprom_exist(hw))
1970 return -EINVAL;
1971
1972 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1973
1974 first_dword = eeprom->offset >> 2;
1975 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
1976
1977 eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
1978 GFP_KERNEL);
1979 if (!eeprom_buff)
1980 return -ENOMEM;
1981
1982 for (i = first_dword; i < last_dword; i++) {
1983 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
1984 return -EIO;
1985 }
1986
1987 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
1988 eeprom->len);
1989 kfree(eeprom_buff);
1990
1991 return ret_val;
1992}
1993
1994static int atl2_set_eeprom(struct net_device *netdev,
1995 struct ethtool_eeprom *eeprom, u8 *bytes)
1996{
1997 struct atl2_adapter *adapter = netdev_priv(netdev);
1998 struct atl2_hw *hw = &adapter->hw;
1999 u32 *eeprom_buff;
2000 u32 *ptr;
2001 int max_len, first_dword, last_dword, ret_val = 0;
2002 int i;
2003
2004 if (eeprom->len == 0)
2005 return -EOPNOTSUPP;
2006
2007 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
2008 return -EFAULT;
2009
2010 max_len = 512;
2011
2012 first_dword = eeprom->offset >> 2;
2013 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
2014 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
2015 if (!eeprom_buff)
2016 return -ENOMEM;
2017
2018 ptr = (u32 *)eeprom_buff;
2019
2020 if (eeprom->offset & 3) {
2021 /* need read/modify/write of first changed EEPROM word */
2022 /* only the second byte of the word is being modified */
2023 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
2024 return -EIO;
2025 ptr++;
2026 }
2027 if (((eeprom->offset + eeprom->len) & 3)) {
2028 /*
2029 * need read/modify/write of last changed EEPROM word
2030 * only the first byte of the word is being modified
2031 */
2032 if (!atl2_read_eeprom(hw, last_dword * 4,
2033 &(eeprom_buff[last_dword - first_dword])))
2034 return -EIO;
2035 }
2036
2037 /* Device's eeprom is always little-endian, word addressable */
2038 memcpy(ptr, bytes, eeprom->len);
2039
2040 for (i = 0; i < last_dword - first_dword + 1; i++) {
2041 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
2042 return -EIO;
2043 }
2044
2045 kfree(eeprom_buff);
2046 return ret_val;
2047}
2048
2049static void atl2_get_drvinfo(struct net_device *netdev,
2050 struct ethtool_drvinfo *drvinfo)
2051{
2052 struct atl2_adapter *adapter = netdev_priv(netdev);
2053
2054 strncpy(drvinfo->driver, atl2_driver_name, 32);
2055 strncpy(drvinfo->version, atl2_driver_version, 32);
2056 strncpy(drvinfo->fw_version, "L2", 32);
2057 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
2058 drvinfo->n_stats = 0;
2059 drvinfo->testinfo_len = 0;
2060 drvinfo->regdump_len = atl2_get_regs_len(netdev);
2061 drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
2062}
2063
2064static void atl2_get_wol(struct net_device *netdev,
2065 struct ethtool_wolinfo *wol)
2066{
2067 struct atl2_adapter *adapter = netdev_priv(netdev);
2068
2069 wol->supported = WAKE_MAGIC;
2070 wol->wolopts = 0;
2071
2072 if (adapter->wol & ATLX_WUFC_EX)
2073 wol->wolopts |= WAKE_UCAST;
2074 if (adapter->wol & ATLX_WUFC_MC)
2075 wol->wolopts |= WAKE_MCAST;
2076 if (adapter->wol & ATLX_WUFC_BC)
2077 wol->wolopts |= WAKE_BCAST;
2078 if (adapter->wol & ATLX_WUFC_MAG)
2079 wol->wolopts |= WAKE_MAGIC;
2080 if (adapter->wol & ATLX_WUFC_LNKC)
2081 wol->wolopts |= WAKE_PHY;
2082}
2083
2084static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2085{
2086 struct atl2_adapter *adapter = netdev_priv(netdev);
2087
2088 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2089 return -EOPNOTSUPP;
2090
2091 if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
2092 return -EOPNOTSUPP;
2093
2094 /* these settings will always override what we currently have */
2095 adapter->wol = 0;
2096
2097 if (wol->wolopts & WAKE_MAGIC)
2098 adapter->wol |= ATLX_WUFC_MAG;
2099 if (wol->wolopts & WAKE_PHY)
2100 adapter->wol |= ATLX_WUFC_LNKC;
2101
2102 return 0;
2103}
2104
2105static int atl2_nway_reset(struct net_device *netdev)
2106{
2107 struct atl2_adapter *adapter = netdev_priv(netdev);
2108 if (netif_running(netdev))
2109 atl2_reinit_locked(adapter);
2110 return 0;
2111}
2112
2113static struct ethtool_ops atl2_ethtool_ops = {
2114 .get_settings = atl2_get_settings,
2115 .set_settings = atl2_set_settings,
2116 .get_drvinfo = atl2_get_drvinfo,
2117 .get_regs_len = atl2_get_regs_len,
2118 .get_regs = atl2_get_regs,
2119 .get_wol = atl2_get_wol,
2120 .set_wol = atl2_set_wol,
2121 .get_msglevel = atl2_get_msglevel,
2122 .set_msglevel = atl2_set_msglevel,
2123 .nway_reset = atl2_nway_reset,
2124 .get_link = ethtool_op_get_link,
2125 .get_eeprom_len = atl2_get_eeprom_len,
2126 .get_eeprom = atl2_get_eeprom,
2127 .set_eeprom = atl2_set_eeprom,
2128 .get_tx_csum = atl2_get_tx_csum,
2129 .get_sg = ethtool_op_get_sg,
2130 .set_sg = ethtool_op_set_sg,
2131#ifdef NETIF_F_TSO
2132 .get_tso = ethtool_op_get_tso,
2133#endif
2134};
2135
2136static void atl2_set_ethtool_ops(struct net_device *netdev)
2137{
2138 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
2139}
2140
2141#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2142 (((a) & 0xff00ff00) >> 8))
2143#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
2144#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
2145
2146/*
2147 * Reset the transmit and receive units; mask and clear all interrupts.
2148 *
2149 * hw - Struct containing variables accessed by shared code
2150 * return : 0 or idle status (if error)
2151 */
2152static s32 atl2_reset_hw(struct atl2_hw *hw)
2153{
2154 u32 icr;
2155 u16 pci_cfg_cmd_word;
2156 int i;
2157
2158 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
2159 atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2160 if ((pci_cfg_cmd_word &
2161 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
2162 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
2163 pci_cfg_cmd_word |=
2164 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
2165 atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2166 }
2167
2168 /* Clear Interrupt mask to stop board from generating
2169 * interrupts & Clear any pending interrupt events
2170 */
2171 /* FIXME */
2172 /* ATL2_WRITE_REG(hw, REG_IMR, 0); */
2173 /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
2174
2175 /* Issue Soft Reset to the MAC. This will reset the chip's
2176 * transmit, receive, DMA. It will not effect
2177 * the current PCI configuration. The global reset bit is self-
2178 * clearing, and should clear within a microsecond.
2179 */
2180 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
2181 wmb();
2182 msleep(1); /* delay about 1ms */
2183
2184 /* Wait at least 10ms for All module to be Idle */
2185 for (i = 0; i < 10; i++) {
2186 icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
2187 if (!icr)
2188 break;
2189 msleep(1); /* delay 1 ms */
2190 cpu_relax();
2191 }
2192
2193 if (icr)
2194 return icr;
2195
2196 return 0;
2197}
2198
2199#define CUSTOM_SPI_CS_SETUP 2
2200#define CUSTOM_SPI_CLK_HI 2
2201#define CUSTOM_SPI_CLK_LO 2
2202#define CUSTOM_SPI_CS_HOLD 2
2203#define CUSTOM_SPI_CS_HI 3
2204
2205static struct atl2_spi_flash_dev flash_table[] =
2206{
2207/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
2208{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
2209{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
2210{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
2211};
2212
2213static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
2214{
2215 int i;
2216 u32 value;
2217
2218 ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
2219 ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
2220
2221 value = SPI_FLASH_CTRL_WAIT_READY |
2222 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2223 SPI_FLASH_CTRL_CS_SETUP_SHIFT |
2224 (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
2225 SPI_FLASH_CTRL_CLK_HI_SHIFT |
2226 (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
2227 SPI_FLASH_CTRL_CLK_LO_SHIFT |
2228 (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2229 SPI_FLASH_CTRL_CS_HOLD_SHIFT |
2230 (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
2231 SPI_FLASH_CTRL_CS_HI_SHIFT |
2232 (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
2233
2234 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2235
2236 value |= SPI_FLASH_CTRL_START;
2237
2238 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2239
2240 for (i = 0; i < 10; i++) {
2241 msleep(1);
2242 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2243 if (!(value & SPI_FLASH_CTRL_START))
2244 break;
2245 }
2246
2247 if (value & SPI_FLASH_CTRL_START)
2248 return false;
2249
2250 *buf = ATL2_READ_REG(hw, REG_SPI_DATA);
2251
2252 return true;
2253}
2254
2255/*
2256 * get_permanent_address
2257 * return 0 if get valid mac address,
2258 */
2259static int get_permanent_address(struct atl2_hw *hw)
2260{
2261 u32 Addr[2];
2262 u32 i, Control;
2263 u16 Register;
2264 u8 EthAddr[NODE_ADDRESS_SIZE];
2265 bool KeyValid;
2266
2267 if (is_valid_ether_addr(hw->perm_mac_addr))
2268 return 0;
2269
2270 Addr[0] = 0;
2271 Addr[1] = 0;
2272
2273 if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
2274 Register = 0;
2275 KeyValid = false;
2276
2277 /* Read out all EEPROM content */
2278 i = 0;
2279 while (1) {
2280 if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
2281 if (KeyValid) {
2282 if (Register == REG_MAC_STA_ADDR)
2283 Addr[0] = Control;
2284 else if (Register ==
2285 (REG_MAC_STA_ADDR + 4))
2286 Addr[1] = Control;
2287 KeyValid = false;
2288 } else if ((Control & 0xff) == 0x5A) {
2289 KeyValid = true;
2290 Register = (u16) (Control >> 16);
2291 } else {
2292 /* assume data end while encount an invalid KEYWORD */
2293 break;
2294 }
2295 } else {
2296 break; /* read error */
2297 }
2298 i += 4;
2299 }
2300
2301 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2302 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2303
2304 if (is_valid_ether_addr(EthAddr)) {
2305 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2306 return 0;
2307 }
2308 return 1;
2309 }
2310
2311 /* see if SPI flash exists? */
2312 Addr[0] = 0;
2313 Addr[1] = 0;
2314 Register = 0;
2315 KeyValid = false;
2316 i = 0;
2317 while (1) {
2318 if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
2319 if (KeyValid) {
2320 if (Register == REG_MAC_STA_ADDR)
2321 Addr[0] = Control;
2322 else if (Register == (REG_MAC_STA_ADDR + 4))
2323 Addr[1] = Control;
2324 KeyValid = false;
2325 } else if ((Control & 0xff) == 0x5A) {
2326 KeyValid = true;
2327 Register = (u16) (Control >> 16);
2328 } else {
2329 break; /* data end */
2330 }
2331 } else {
2332 break; /* read error */
2333 }
2334 i += 4;
2335 }
2336
2337 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2338 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
2339 if (is_valid_ether_addr(EthAddr)) {
2340 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2341 return 0;
2342 }
2343 /* maybe MAC-address is from BIOS */
2344 Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
2345 Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
2346 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2347 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2348
2349 if (is_valid_ether_addr(EthAddr)) {
2350 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2351 return 0;
2352 }
2353
2354 return 1;
2355}
2356
2357/*
2358 * Reads the adapter's MAC address from the EEPROM
2359 *
2360 * hw - Struct containing variables accessed by shared code
2361 */
2362static s32 atl2_read_mac_addr(struct atl2_hw *hw)
2363{
2364 u16 i;
2365
2366 if (get_permanent_address(hw)) {
2367 /* for test */
2368 /* FIXME: shouldn't we use random_ether_addr() here? */
2369 hw->perm_mac_addr[0] = 0x00;
2370 hw->perm_mac_addr[1] = 0x13;
2371 hw->perm_mac_addr[2] = 0x74;
2372 hw->perm_mac_addr[3] = 0x00;
2373 hw->perm_mac_addr[4] = 0x5c;
2374 hw->perm_mac_addr[5] = 0x38;
2375 }
2376
2377 for (i = 0; i < NODE_ADDRESS_SIZE; i++)
2378 hw->mac_addr[i] = hw->perm_mac_addr[i];
2379
2380 return 0;
2381}
2382
2383/*
2384 * Hashes an address to determine its location in the multicast table
2385 *
2386 * hw - Struct containing variables accessed by shared code
2387 * mc_addr - the multicast address to hash
2388 *
2389 * atl2_hash_mc_addr
2390 * purpose
2391 * set hash value for a multicast address
2392 * hash calcu processing :
2393 * 1. calcu 32bit CRC for multicast address
2394 * 2. reverse crc with MSB to LSB
2395 */
2396static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
2397{
2398 u32 crc32, value;
2399 int i;
2400
2401 value = 0;
2402 crc32 = ether_crc_le(6, mc_addr);
2403
2404 for (i = 0; i < 32; i++)
2405 value |= (((crc32 >> i) & 1) << (31 - i));
2406
2407 return value;
2408}
2409
2410/*
2411 * Sets the bit in the multicast table corresponding to the hash value.
2412 *
2413 * hw - Struct containing variables accessed by shared code
2414 * hash_value - Multicast address hash value
2415 */
2416static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
2417{
2418 u32 hash_bit, hash_reg;
2419 u32 mta;
2420
2421 /* The HASH Table is a register array of 2 32-bit registers.
2422 * It is treated like an array of 64 bits. We want to set
2423 * bit BitArray[hash_value]. So we figure out what register
2424 * the bit is in, read it, OR in the new bit, then write
2425 * back the new value. The register is determined by the
2426 * upper 7 bits of the hash value and the bit within that
2427 * register are determined by the lower 5 bits of the value.
2428 */
2429 hash_reg = (hash_value >> 31) & 0x1;
2430 hash_bit = (hash_value >> 26) & 0x1F;
2431
2432 mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
2433
2434 mta |= (1 << hash_bit);
2435
2436 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
2437}
2438
2439/*
2440 * atl2_init_pcie - init PCIE module
2441 */
2442static void atl2_init_pcie(struct atl2_hw *hw)
2443{
2444 u32 value;
2445 value = LTSSM_TEST_MODE_DEF;
2446 ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
2447
2448 value = PCIE_DLL_TX_CTRL1_DEF;
2449 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
2450}
2451
2452static void atl2_init_flash_opcode(struct atl2_hw *hw)
2453{
2454 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
2455 hw->flash_vendor = 0; /* ATMEL */
2456
2457 /* Init OP table */
2458 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
2459 flash_table[hw->flash_vendor].cmdPROGRAM);
2460 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
2461 flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
2462 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
2463 flash_table[hw->flash_vendor].cmdCHIP_ERASE);
2464 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
2465 flash_table[hw->flash_vendor].cmdRDID);
2466 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
2467 flash_table[hw->flash_vendor].cmdWREN);
2468 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
2469 flash_table[hw->flash_vendor].cmdRDSR);
2470 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
2471 flash_table[hw->flash_vendor].cmdWRSR);
2472 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
2473 flash_table[hw->flash_vendor].cmdREAD);
2474}
2475
2476/********************************************************************
2477* Performs basic configuration of the adapter.
2478*
2479* hw - Struct containing variables accessed by shared code
2480* Assumes that the controller has previously been reset and is in a
2481* post-reset uninitialized state. Initializes multicast table,
2482* and Calls routines to setup link
2483* Leaves the transmit and receive units disabled and uninitialized.
2484********************************************************************/
2485static s32 atl2_init_hw(struct atl2_hw *hw)
2486{
2487 u32 ret_val = 0;
2488
2489 atl2_init_pcie(hw);
2490
2491 /* Zero out the Multicast HASH table */
2492 /* clear the old settings from the multicast hash table */
2493 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
2494 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
2495
2496 atl2_init_flash_opcode(hw);
2497
2498 ret_val = atl2_phy_init(hw);
2499
2500 return ret_val;
2501}
2502
2503/*
2504 * Detects the current speed and duplex settings of the hardware.
2505 *
2506 * hw - Struct containing variables accessed by shared code
2507 * speed - Speed of the connection
2508 * duplex - Duplex setting of the connection
2509 */
2510static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
2511 u16 *duplex)
2512{
2513 s32 ret_val;
2514 u16 phy_data;
2515
2516 /* Read PHY Specific Status Register (17) */
2517 ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
2518 if (ret_val)
2519 return ret_val;
2520
2521 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
2522 return ATLX_ERR_PHY_RES;
2523
2524 switch (phy_data & MII_ATLX_PSSR_SPEED) {
2525 case MII_ATLX_PSSR_100MBS:
2526 *speed = SPEED_100;
2527 break;
2528 case MII_ATLX_PSSR_10MBS:
2529 *speed = SPEED_10;
2530 break;
2531 default:
2532 return ATLX_ERR_PHY_SPEED;
2533 break;
2534 }
2535
2536 if (phy_data & MII_ATLX_PSSR_DPLX)
2537 *duplex = FULL_DUPLEX;
2538 else
2539 *duplex = HALF_DUPLEX;
2540
2541 return 0;
2542}
2543
2544/*
2545 * Reads the value from a PHY register
2546 * hw - Struct containing variables accessed by shared code
2547 * reg_addr - address of the PHY register to read
2548 */
2549static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
2550{
2551 u32 val;
2552 int i;
2553
2554 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2555 MDIO_START |
2556 MDIO_SUP_PREAMBLE |
2557 MDIO_RW |
2558 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2559 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2560
2561 wmb();
2562
2563 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2564 udelay(2);
2565 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2566 if (!(val & (MDIO_START | MDIO_BUSY)))
2567 break;
2568 wmb();
2569 }
2570 if (!(val & (MDIO_START | MDIO_BUSY))) {
2571 *phy_data = (u16)val;
2572 return 0;
2573 }
2574
2575 return ATLX_ERR_PHY;
2576}
2577
2578/*
2579 * Writes a value to a PHY register
2580 * hw - Struct containing variables accessed by shared code
2581 * reg_addr - address of the PHY register to write
2582 * data - data to write to the PHY
2583 */
2584static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
2585{
2586 int i;
2587 u32 val;
2588
2589 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
2590 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
2591 MDIO_SUP_PREAMBLE |
2592 MDIO_START |
2593 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2594 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2595
2596 wmb();
2597
2598 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2599 udelay(2);
2600 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2601 if (!(val & (MDIO_START | MDIO_BUSY)))
2602 break;
2603
2604 wmb();
2605 }
2606
2607 if (!(val & (MDIO_START | MDIO_BUSY)))
2608 return 0;
2609
2610 return ATLX_ERR_PHY;
2611}
2612
2613/*
2614 * Configures PHY autoneg and flow control advertisement settings
2615 *
2616 * hw - Struct containing variables accessed by shared code
2617 */
2618static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
2619{
2620 s32 ret_val;
2621 s16 mii_autoneg_adv_reg;
2622
2623 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2624 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
2625
2626 /* Need to parse autoneg_advertised and set up
2627 * the appropriate PHY registers. First we will parse for
2628 * autoneg_advertised software override. Since we can advertise
2629 * a plethora of combinations, we need to check each bit
2630 * individually.
2631 */
2632
2633 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2634 * Advertisement Register (Address 4) and the 1000 mb speed bits in
2635 * the 1000Base-T Control Register (Address 9). */
2636 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
2637
2638 /* Need to parse MediaType and setup the
2639 * appropriate PHY registers. */
2640 switch (hw->MediaType) {
2641 case MEDIA_TYPE_AUTO_SENSOR:
2642 mii_autoneg_adv_reg |=
2643 (MII_AR_10T_HD_CAPS |
2644 MII_AR_10T_FD_CAPS |
2645 MII_AR_100TX_HD_CAPS|
2646 MII_AR_100TX_FD_CAPS);
2647 hw->autoneg_advertised =
2648 ADVERTISE_10_HALF |
2649 ADVERTISE_10_FULL |
2650 ADVERTISE_100_HALF|
2651 ADVERTISE_100_FULL;
2652 break;
2653 case MEDIA_TYPE_100M_FULL:
2654 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
2655 hw->autoneg_advertised = ADVERTISE_100_FULL;
2656 break;
2657 case MEDIA_TYPE_100M_HALF:
2658 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
2659 hw->autoneg_advertised = ADVERTISE_100_HALF;
2660 break;
2661 case MEDIA_TYPE_10M_FULL:
2662 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
2663 hw->autoneg_advertised = ADVERTISE_10_FULL;
2664 break;
2665 default:
2666 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
2667 hw->autoneg_advertised = ADVERTISE_10_HALF;
2668 break;
2669 }
2670
2671 /* flow control fixed to enable all */
2672 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
2673
2674 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
2675
2676 ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
2677
2678 if (ret_val)
2679 return ret_val;
2680
2681 return 0;
2682}
2683
2684/*
2685 * Resets the PHY and make all config validate
2686 *
2687 * hw - Struct containing variables accessed by shared code
2688 *
2689 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
2690 */
2691static s32 atl2_phy_commit(struct atl2_hw *hw)
2692{
2693 s32 ret_val;
2694 u16 phy_data;
2695
2696 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2697 ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
2698 if (ret_val) {
2699 u32 val;
2700 int i;
2701 /* pcie serdes link may be down ! */
2702 for (i = 0; i < 25; i++) {
2703 msleep(1);
2704 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2705 if (!(val & (MDIO_START | MDIO_BUSY)))
2706 break;
2707 }
2708
2709 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
2710 printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
2711 return ret_val;
2712 }
2713 }
2714 return 0;
2715}
2716
2717static s32 atl2_phy_init(struct atl2_hw *hw)
2718{
2719 s32 ret_val;
2720 u16 phy_val;
2721
2722 if (hw->phy_configured)
2723 return 0;
2724
2725 /* Enable PHY */
2726 ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
2727 ATL2_WRITE_FLUSH(hw);
2728 msleep(1);
2729
2730 /* check if the PHY is in powersaving mode */
2731 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2732 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2733
2734 /* 024E / 124E 0r 0274 / 1274 ? */
2735 if (phy_val & 0x1000) {
2736 phy_val &= ~0x1000;
2737 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
2738 }
2739
2740 msleep(1);
2741
2742 /*Enable PHY LinkChange Interrupt */
2743 ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
2744 if (ret_val)
2745 return ret_val;
2746
2747 /* setup AutoNeg parameters */
2748 ret_val = atl2_phy_setup_autoneg_adv(hw);
2749 if (ret_val)
2750 return ret_val;
2751
2752 /* SW.Reset & En-Auto-Neg to restart Auto-Neg */
2753 ret_val = atl2_phy_commit(hw);
2754 if (ret_val)
2755 return ret_val;
2756
2757 hw->phy_configured = true;
2758
2759 return ret_val;
2760}
2761
2762static void atl2_set_mac_addr(struct atl2_hw *hw)
2763{
2764 u32 value;
2765 /* 00-0B-6A-F6-00-DC
2766 * 0: 6AF600DC 1: 000B
2767 * low dword */
2768 value = (((u32)hw->mac_addr[2]) << 24) |
2769 (((u32)hw->mac_addr[3]) << 16) |
2770 (((u32)hw->mac_addr[4]) << 8) |
2771 (((u32)hw->mac_addr[5]));
2772 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
2773 /* hight dword */
2774 value = (((u32)hw->mac_addr[0]) << 8) |
2775 (((u32)hw->mac_addr[1]));
2776 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
2777}
2778
2779/*
2780 * check_eeprom_exist
2781 * return 0 if eeprom exist
2782 */
2783static int atl2_check_eeprom_exist(struct atl2_hw *hw)
2784{
2785 u32 value;
2786
2787 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2788 if (value & SPI_FLASH_CTRL_EN_VPD) {
2789 value &= ~SPI_FLASH_CTRL_EN_VPD;
2790 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2791 }
2792 value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
2793 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2794}
2795
2796/* FIXME: This doesn't look right. -- CHS */
2797static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
2798{
2799 return true;
2800}
2801
2802static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
2803{
2804 int i;
2805 u32 Control;
2806
2807 if (Offset & 0x3)
2808 return false; /* address do not align */
2809
2810 ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
2811 Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2812 ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
2813
2814 for (i = 0; i < 10; i++) {
2815 msleep(2);
2816 Control = ATL2_READ_REG(hw, REG_VPD_CAP);
2817 if (Control & VPD_CAP_VPD_FLAG)
2818 break;
2819 }
2820
2821 if (Control & VPD_CAP_VPD_FLAG) {
2822 *pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
2823 return true;
2824 }
2825 return false; /* timeout */
2826}
2827
2828static void atl2_force_ps(struct atl2_hw *hw)
2829{
2830 u16 phy_val;
2831
2832 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2833 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2834 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
2835
2836 atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
2837 atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
2838 atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
2839 atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
2840}
2841
2842/* This is the only thing that needs to be changed to adjust the
2843 * maximum number of ports that the driver can manage.
2844 */
2845#define ATL2_MAX_NIC 4
2846
2847#define OPTION_UNSET -1
2848#define OPTION_DISABLED 0
2849#define OPTION_ENABLED 1
2850
2851/* All parameters are treated the same, as an integer array of values.
2852 * This macro just reduces the need to repeat the same declaration code
2853 * over and over (plus this helps to avoid typo bugs).
2854 */
2855#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
2856#ifndef module_param_array
2857/* Module Parameters are always initialized to -1, so that the driver
2858 * can tell the difference between no user specified value or the
2859 * user asking for the default value.
2860 * The true default values are loaded in when atl2_check_options is called.
2861 *
2862 * This is a GCC extension to ANSI C.
2863 * See the item "Labeled Elements in Initializers" in the section
2864 * "Extensions to the C Language Family" of the GCC documentation.
2865 */
2866
2867#define ATL2_PARAM(X, desc) \
2868 static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2869 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2870 MODULE_PARM_DESC(X, desc);
2871#else
2872#define ATL2_PARAM(X, desc) \
2873 static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
2874 static int num_##X = 0; \
2875 module_param_array_named(X, X, int, &num_##X, 0); \
2876 MODULE_PARM_DESC(X, desc);
2877#endif
2878
2879/*
2880 * Transmit Memory Size
2881 * Valid Range: 64-2048
2882 * Default Value: 128
2883 */
2884#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */
2885#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */
2886#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */
2887ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
2888
2889/*
2890 * Receive Memory Block Count
2891 * Valid Range: 16-512
2892 * Default Value: 128
2893 */
2894#define ATL2_MIN_RXD_COUNT 16
2895#define ATL2_MAX_RXD_COUNT 512
2896#define ATL2_DEFAULT_RXD_COUNT 64
2897ATL2_PARAM(RxMemBlock, "Number of receive memory block");
2898
2899/*
2900 * User Specified MediaType Override
2901 *
2902 * Valid Range: 0-5
2903 * - 0 - auto-negotiate at all supported speeds
2904 * - 1 - only link at 1000Mbps Full Duplex
2905 * - 2 - only link at 100Mbps Full Duplex
2906 * - 3 - only link at 100Mbps Half Duplex
2907 * - 4 - only link at 10Mbps Full Duplex
2908 * - 5 - only link at 10Mbps Half Duplex
2909 * Default Value: 0
2910 */
2911ATL2_PARAM(MediaType, "MediaType Select");
2912
2913/*
2914 * Interrupt Moderate Timer in units of 2048 ns (~2 us)
2915 * Valid Range: 10-65535
2916 * Default Value: 45000(90ms)
2917 */
2918#define INT_MOD_DEFAULT_CNT 100 /* 200us */
2919#define INT_MOD_MAX_CNT 65000
2920#define INT_MOD_MIN_CNT 50
2921ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
2922
2923/*
2924 * FlashVendor
2925 * Valid Range: 0-2
2926 * 0 - Atmel
2927 * 1 - SST
2928 * 2 - ST
2929 */
2930ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
2931
2932#define AUTONEG_ADV_DEFAULT 0x2F
2933#define AUTONEG_ADV_MASK 0x2F
2934#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
2935
2936#define FLASH_VENDOR_DEFAULT 0
2937#define FLASH_VENDOR_MIN 0
2938#define FLASH_VENDOR_MAX 2
2939
2940struct atl2_option {
2941 enum { enable_option, range_option, list_option } type;
2942 char *name;
2943 char *err;
2944 int def;
2945 union {
2946 struct { /* range_option info */
2947 int min;
2948 int max;
2949 } r;
2950 struct { /* list_option info */
2951 int nr;
2952 struct atl2_opt_list { int i; char *str; } *p;
2953 } l;
2954 } arg;
2955};
2956
2957static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2958{
2959 int i;
2960 struct atl2_opt_list *ent;
2961
2962 if (*value == OPTION_UNSET) {
2963 *value = opt->def;
2964 return 0;
2965 }
2966
2967 switch (opt->type) {
2968 case enable_option:
2969 switch (*value) {
2970 case OPTION_ENABLED:
2971 printk(KERN_INFO "%s Enabled\n", opt->name);
2972 return 0;
2973 break;
2974 case OPTION_DISABLED:
2975 printk(KERN_INFO "%s Disabled\n", opt->name);
2976 return 0;
2977 break;
2978 }
2979 break;
2980 case range_option:
2981 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
2982 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
2983 return 0;
2984 }
2985 break;
2986 case list_option:
2987 for (i = 0; i < opt->arg.l.nr; i++) {
2988 ent = &opt->arg.l.p[i];
2989 if (*value == ent->i) {
2990 if (ent->str[0] != '\0')
2991 printk(KERN_INFO "%s\n", ent->str);
2992 return 0;
2993 }
2994 }
2995 break;
2996 default:
2997 BUG();
2998 }
2999
3000 printk(KERN_INFO "Invalid %s specified (%i) %s\n",
3001 opt->name, *value, opt->err);
3002 *value = opt->def;
3003 return -1;
3004}
3005
3006/*
3007 * atl2_check_options - Range Checking for Command Line Parameters
3008 * @adapter: board private structure
3009 *
3010 * This routine checks all command line parameters for valid user
3011 * input. If an invalid value is given, or if no user specified
3012 * value exists, a default value is used. The final value is stored
3013 * in a variable in the adapter structure.
3014 */
3015static void __devinit atl2_check_options(struct atl2_adapter *adapter)
3016{
3017 int val;
3018 struct atl2_option opt;
3019 int bd = adapter->bd_number;
3020 if (bd >= ATL2_MAX_NIC) {
3021 printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
3022 bd);
3023 printk(KERN_NOTICE "Using defaults for all values\n");
3024#ifndef module_param_array
3025 bd = ATL2_MAX_NIC;
3026#endif
3027 }
3028
3029 /* Bytes of Transmit Memory */
3030 opt.type = range_option;
3031 opt.name = "Bytes of Transmit Memory";
3032 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
3033 opt.def = ATL2_DEFAULT_TX_MEMSIZE;
3034 opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
3035 opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
3036#ifdef module_param_array
3037 if (num_TxMemSize > bd) {
3038#endif
3039 val = TxMemSize[bd];
3040 atl2_validate_option(&val, &opt);
3041 adapter->txd_ring_size = ((u32) val) * 1024;
3042#ifdef module_param_array
3043 } else
3044 adapter->txd_ring_size = ((u32)opt.def) * 1024;
3045#endif
3046 /* txs ring size: */
3047 adapter->txs_ring_size = adapter->txd_ring_size / 128;
3048 if (adapter->txs_ring_size > 160)
3049 adapter->txs_ring_size = 160;
3050
3051 /* Receive Memory Block Count */
3052 opt.type = range_option;
3053 opt.name = "Number of receive memory block";
3054 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
3055 opt.def = ATL2_DEFAULT_RXD_COUNT;
3056 opt.arg.r.min = ATL2_MIN_RXD_COUNT;
3057 opt.arg.r.max = ATL2_MAX_RXD_COUNT;
3058#ifdef module_param_array
3059 if (num_RxMemBlock > bd) {
3060#endif
3061 val = RxMemBlock[bd];
3062 atl2_validate_option(&val, &opt);
3063 adapter->rxd_ring_size = (u32)val;
3064 /* FIXME */
3065 /* ((u16)val)&~1; */ /* even number */
3066#ifdef module_param_array
3067 } else
3068 adapter->rxd_ring_size = (u32)opt.def;
3069#endif
3070 /* init RXD Flow control value */
3071 adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
3072 adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
3073 (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
3074 (adapter->rxd_ring_size / 12);
3075
3076 /* Interrupt Moderate Timer */
3077 opt.type = range_option;
3078 opt.name = "Interrupt Moderate Timer";
3079 opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
3080 opt.def = INT_MOD_DEFAULT_CNT;
3081 opt.arg.r.min = INT_MOD_MIN_CNT;
3082 opt.arg.r.max = INT_MOD_MAX_CNT;
3083#ifdef module_param_array
3084 if (num_IntModTimer > bd) {
3085#endif
3086 val = IntModTimer[bd];
3087 atl2_validate_option(&val, &opt);
3088 adapter->imt = (u16) val;
3089#ifdef module_param_array
3090 } else
3091 adapter->imt = (u16)(opt.def);
3092#endif
3093 /* Flash Vendor */
3094 opt.type = range_option;
3095 opt.name = "SPI Flash Vendor";
3096 opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
3097 opt.def = FLASH_VENDOR_DEFAULT;
3098 opt.arg.r.min = FLASH_VENDOR_MIN;
3099 opt.arg.r.max = FLASH_VENDOR_MAX;
3100#ifdef module_param_array
3101 if (num_FlashVendor > bd) {
3102#endif
3103 val = FlashVendor[bd];
3104 atl2_validate_option(&val, &opt);
3105 adapter->hw.flash_vendor = (u8) val;
3106#ifdef module_param_array
3107 } else
3108 adapter->hw.flash_vendor = (u8)(opt.def);
3109#endif
3110 /* MediaType */
3111 opt.type = range_option;
3112 opt.name = "Speed/Duplex Selection";
3113 opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
3114 opt.def = MEDIA_TYPE_AUTO_SENSOR;
3115 opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
3116 opt.arg.r.max = MEDIA_TYPE_10M_HALF;
3117#ifdef module_param_array
3118 if (num_MediaType > bd) {
3119#endif
3120 val = MediaType[bd];
3121 atl2_validate_option(&val, &opt);
3122 adapter->hw.MediaType = (u16) val;
3123#ifdef module_param_array
3124 } else
3125 adapter->hw.MediaType = (u16)(opt.def);
3126#endif
3127}
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
new file mode 100644
index 000000000000..6e1f28ff227b
--- /dev/null
+++ b/drivers/net/atlx/atl2.h
@@ -0,0 +1,530 @@
1/* atl2.h -- atl2 driver definitions
2 *
3 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
4 * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
5 * Copyright(c) 2007 Chris Snook <csnook@redhat.com>
6 *
7 * Derived from Intel e1000 driver
8 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59
22 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25#ifndef _ATL2_H_
26#define _ATL2_H_
27
28#include <asm/atomic.h>
29#include <linux/netdevice.h>
30
31#ifndef _ATL2_HW_H_
32#define _ATL2_HW_H_
33
34#ifndef _ATL2_OSDEP_H_
35#define _ATL2_OSDEP_H_
36
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/if_ether.h>
41
42#include "atlx.h"
43
44#ifdef ETHTOOL_OPS_COMPAT
45extern int ethtool_ioctl(struct ifreq *ifr);
46#endif
47
48#define PCI_COMMAND_REGISTER PCI_COMMAND
49#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
50#define ETH_ADDR_LEN ETH_ALEN
51
52#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
53 ((a)->hw_addr + (reg))))
54
55#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
56
57#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
58
59#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
60 ((a)->hw_addr + (reg))))
61
62#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
63
64#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
65 ((a)->hw_addr + (reg))))
66
67#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
68
69#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
70 (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
71
72#define ATL2_READ_REG_ARRAY(a, reg, offset) \
73 (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
74
75#endif /* _ATL2_OSDEP_H_ */
76
77struct atl2_adapter;
78struct atl2_hw;
79
80/* function prototype */
81static s32 atl2_reset_hw(struct atl2_hw *hw);
82static s32 atl2_read_mac_addr(struct atl2_hw *hw);
83static s32 atl2_init_hw(struct atl2_hw *hw);
84static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
85 u16 *duplex);
86static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
87static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
88static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
89static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
90static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
91static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
92static void atl2_set_mac_addr(struct atl2_hw *hw);
93static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
94static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
95static s32 atl2_phy_init(struct atl2_hw *hw);
96static int atl2_check_eeprom_exist(struct atl2_hw *hw);
97static void atl2_force_ps(struct atl2_hw *hw);
98
99/* register definition */
100
101/* Block IDLE Status Register */
102#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */
103#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */
104#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */
105#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */
106
107/* MDIO Control Register */
108#define MDIO_WAIT_TIMES 10
109
110/* MAC Control Register */
111#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */
112#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */
113#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28
114#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */
115
116/* Internal SRAM Partition Register */
117#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM
118 * default: 2byte*1024 */
119#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM
120 * default: 2byte*1024 */
121
122/* Descriptor Control register */
123#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit
124 * Data Mem low 32-bit(dword align) */
125#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by
126 * double word , max 256KB) */
127#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit
128 * Status Memory low 32-bit(dword word
129 * align) */
130#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047
131 * bytes. */
132#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit
133 * Status Memory low 32-bit(unit 8
134 * bytes) */
135#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer
136 * number (unit 1536bytes, max
137 * 1536*2047) */
138
139/* DMAR Control Register */
140#define REG_DMAR 0x1580
141#define DMAR_EN 0x1 /* 1: Enable DMAR */
142
143/* TX Cur-Through (early tx threshold) Control Register */
144#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet
145 * threshold(unit word) */
146
147/* DMAW Control Register */
148#define REG_DMAW 0x15A0
149#define DMAW_EN 0x1
150
151/* Flow control register */
152#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow
153 * threshold configuration register */
154#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow
155 * threshold configuration register */
156
157/* Mailbox Register */
158#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */
159#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */
160
161/* Interrupt Status Register */
162#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */
163#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set
164 * when SW_MAN_INT_EN is set in Table 51
165 * Selene Master Control Register
166 * (Offset 0x1400). */
167#define ISR_RXF_OV 4 /* RXF overflow interrupt */
168#define ISR_TXF_UR 8 /* TXF underrun interrupt */
169#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full
170 * interrupt */
171#define ISR_RXS_OV 0x20 /* Internal receive status buffer full
172 * interrupt */
173#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */
174#define ISR_HOST_TXD_UR 0x80
175#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */
176#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should
177 * do Reset */
178#define ISR_DMAW_TO_RST 0x400
179#define ISR_PHY 0x800 /* phy interrupt */
180#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written
181 * to host */
182#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written
183 * to host. */
184#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one
185 * packet */
186
187#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
188 ISR_TS_UPDATE | ISR_TX_EARLY)
189#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
190 ISR_RS_UPDATE)
191
192#define IMR_NORMAL_MASK (\
193 /*ISR_LINK_CHG |*/\
194 ISR_MANUAL |\
195 ISR_DMAR_TO_RST |\
196 ISR_DMAW_TO_RST |\
197 ISR_PHY |\
198 ISR_PHY_LINKDOWN |\
199 ISR_TS_UPDATE |\
200 ISR_RS_UPDATE)
201
202/* Receive MAC Statistics Registers */
203#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */
204#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX
205 * FIFO overflow */
206#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX
207 * Status Buffer Overflow */
208#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to
209 * address filtering */
210
211/* MII definitions */
212
213/* PHY Common Register */
214#define MII_SMARTSPEED 0x14
215#define MII_DBG_ADDR 0x1D
216#define MII_DBG_DATA 0x1E
217
218/* PCI Command Register Bit Definitions */
219#define PCI_REG_COMMAND 0x04
220#define CMD_IO_SPACE 0x0001
221#define CMD_MEMORY_SPACE 0x0002
222#define CMD_BUS_MASTER 0x0004
223
224#define MEDIA_TYPE_100M_FULL 1
225#define MEDIA_TYPE_100M_HALF 2
226#define MEDIA_TYPE_10M_FULL 3
227#define MEDIA_TYPE_10M_HALF 4
228
229#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
230
231/* The size (in bytes) of a ethernet packet */
232#define ENET_HEADER_SIZE 14
233#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
234#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
235#define ETHERNET_FCS_SIZE 4
236#define MAX_JUMBO_FRAME_SIZE 0x2000
237#define VLAN_SIZE 4
238
239struct tx_pkt_header {
240 unsigned pkt_size:11;
241 unsigned:4; /* reserved */
242 unsigned ins_vlan:1; /* txmac should insert vlan */
243 unsigned short vlan; /* vlan tag */
244};
245/* FIXME: replace above bitfields with MASK/SHIFT defines below */
246#define TX_PKT_HEADER_SIZE_MASK 0x7FF
247#define TX_PKT_HEADER_SIZE_SHIFT 0
248#define TX_PKT_HEADER_INS_VLAN_MASK 0x1
249#define TX_PKT_HEADER_INS_VLAN_SHIFT 15
250#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF
251#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16
252
253struct tx_pkt_status {
254 unsigned pkt_size:11;
255 unsigned:5; /* reserved */
256 unsigned ok:1; /* current packet transmitted without error */
257 unsigned bcast:1; /* broadcast packet */
258 unsigned mcast:1; /* multicast packet */
259 unsigned pause:1; /* transmiited a pause frame */
260 unsigned ctrl:1;
261 unsigned defer:1; /* current packet is xmitted with defer */
262 unsigned exc_defer:1;
263 unsigned single_col:1;
264 unsigned multi_col:1;
265 unsigned late_col:1;
266 unsigned abort_col:1;
267 unsigned underun:1; /* current packet is aborted
268 * due to txram underrun */
269 unsigned:3; /* reserved */
270 unsigned update:1; /* always 1'b1 in tx_status_buf */
271};
272/* FIXME: replace above bitfields with MASK/SHIFT defines below */
273#define TX_PKT_STATUS_SIZE_MASK 0x7FF
274#define TX_PKT_STATUS_SIZE_SHIFT 0
275#define TX_PKT_STATUS_OK_MASK 0x1
276#define TX_PKT_STATUS_OK_SHIFT 16
277#define TX_PKT_STATUS_BCAST_MASK 0x1
278#define TX_PKT_STATUS_BCAST_SHIFT 17
279#define TX_PKT_STATUS_MCAST_MASK 0x1
280#define TX_PKT_STATUS_MCAST_SHIFT 18
281#define TX_PKT_STATUS_PAUSE_MASK 0x1
282#define TX_PKT_STATUS_PAUSE_SHIFT 19
283#define TX_PKT_STATUS_CTRL_MASK 0x1
284#define TX_PKT_STATUS_CTRL_SHIFT 20
285#define TX_PKT_STATUS_DEFER_MASK 0x1
286#define TX_PKT_STATUS_DEFER_SHIFT 21
287#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1
288#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22
289#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1
290#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23
291#define TX_PKT_STATUS_MULTI_COL_MASK 0x1
292#define TX_PKT_STATUS_MULTI_COL_SHIFT 24
293#define TX_PKT_STATUS_LATE_COL_MASK 0x1
294#define TX_PKT_STATUS_LATE_COL_SHIFT 25
295#define TX_PKT_STATUS_ABORT_COL_MASK 0x1
296#define TX_PKT_STATUS_ABORT_COL_SHIFT 26
297#define TX_PKT_STATUS_UNDERRUN_MASK 0x1
298#define TX_PKT_STATUS_UNDERRUN_SHIFT 27
299#define TX_PKT_STATUS_UPDATE_MASK 0x1
300#define TX_PKT_STATUS_UPDATE_SHIFT 31
301
302struct rx_pkt_status {
303 unsigned pkt_size:11; /* packet size, max 2047 bytes */
304 unsigned:5; /* reserved */
305 unsigned ok:1; /* current packet received ok without error */
306 unsigned bcast:1; /* current packet is broadcast */
307 unsigned mcast:1; /* current packet is multicast */
308 unsigned pause:1;
309 unsigned ctrl:1;
310 unsigned crc:1; /* received a packet with crc error */
311 unsigned code:1; /* received a packet with code error */
312 unsigned runt:1; /* received a packet less than 64 bytes
313 * with good crc */
314 unsigned frag:1; /* received a packet less than 64 bytes
315 * with bad crc */
316 unsigned trunc:1; /* current frame truncated due to rxram full */
317 unsigned align:1; /* this packet is alignment error */
318 unsigned vlan:1; /* this packet has vlan */
319 unsigned:3; /* reserved */
320 unsigned update:1;
321 unsigned short vtag; /* vlan tag */
322 unsigned:16;
323};
324/* FIXME: replace above bitfields with MASK/SHIFT defines below */
325#define RX_PKT_STATUS_SIZE_MASK 0x7FF
326#define RX_PKT_STATUS_SIZE_SHIFT 0
327#define RX_PKT_STATUS_OK_MASK 0x1
328#define RX_PKT_STATUS_OK_SHIFT 16
329#define RX_PKT_STATUS_BCAST_MASK 0x1
330#define RX_PKT_STATUS_BCAST_SHIFT 17
331#define RX_PKT_STATUS_MCAST_MASK 0x1
332#define RX_PKT_STATUS_MCAST_SHIFT 18
333#define RX_PKT_STATUS_PAUSE_MASK 0x1
334#define RX_PKT_STATUS_PAUSE_SHIFT 19
335#define RX_PKT_STATUS_CTRL_MASK 0x1
336#define RX_PKT_STATUS_CTRL_SHIFT 20
337#define RX_PKT_STATUS_CRC_MASK 0x1
338#define RX_PKT_STATUS_CRC_SHIFT 21
339#define RX_PKT_STATUS_CODE_MASK 0x1
340#define RX_PKT_STATUS_CODE_SHIFT 22
341#define RX_PKT_STATUS_RUNT_MASK 0x1
342#define RX_PKT_STATUS_RUNT_SHIFT 23
343#define RX_PKT_STATUS_FRAG_MASK 0x1
344#define RX_PKT_STATUS_FRAG_SHIFT 24
345#define RX_PKT_STATUS_TRUNK_MASK 0x1
346#define RX_PKT_STATUS_TRUNK_SHIFT 25
347#define RX_PKT_STATUS_ALIGN_MASK 0x1
348#define RX_PKT_STATUS_ALIGN_SHIFT 26
349#define RX_PKT_STATUS_VLAN_MASK 0x1
350#define RX_PKT_STATUS_VLAN_SHIFT 27
351#define RX_PKT_STATUS_UPDATE_MASK 0x1
352#define RX_PKT_STATUS_UPDATE_SHIFT 31
353#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF
354#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32
355
356struct rx_desc {
357 struct rx_pkt_status status;
358 unsigned char packet[1536-sizeof(struct rx_pkt_status)];
359};
360
361enum atl2_speed_duplex {
362 atl2_10_half = 0,
363 atl2_10_full = 1,
364 atl2_100_half = 2,
365 atl2_100_full = 3
366};
367
368struct atl2_spi_flash_dev {
369 const char *manu_name; /* manufacturer id */
370 /* op-code */
371 u8 cmdWRSR;
372 u8 cmdREAD;
373 u8 cmdPROGRAM;
374 u8 cmdWREN;
375 u8 cmdWRDI;
376 u8 cmdRDSR;
377 u8 cmdRDID;
378 u8 cmdSECTOR_ERASE;
379 u8 cmdCHIP_ERASE;
380};
381
382/* Structure containing variables used by the shared code (atl2_hw.c) */
383struct atl2_hw {
384 u8 __iomem *hw_addr;
385 void *back;
386
387 u8 preamble_len;
388 u8 max_retry; /* Retransmission maximum, afterwards the
389 * packet will be discarded. */
390 u8 jam_ipg; /* IPG to start JAM for collision based flow
391 * control in half-duplex mode. In unit of
392 * 8-bit time. */
393 u8 ipgt; /* Desired back to back inter-packet gap. The
394 * default is 96-bit time. */
395 u8 min_ifg; /* Minimum number of IFG to enforce in between
396 * RX frames. Frame gap below such IFP is
397 * dropped. */
398 u8 ipgr1; /* 64bit Carrier-Sense window */
399 u8 ipgr2; /* 96-bit IPG window */
400 u8 retry_buf; /* When half-duplex mode, should hold some
401 * bytes for mac retry . (8*4bytes unit) */
402
403 u16 fc_rxd_hi;
404 u16 fc_rxd_lo;
405 u16 lcol; /* Collision Window */
406 u16 max_frame_size;
407
408 u16 MediaType;
409 u16 autoneg_advertised;
410 u16 pci_cmd_word;
411
412 u16 mii_autoneg_adv_reg;
413
414 u32 mem_rang;
415 u32 txcw;
416 u32 mc_filter_type;
417 u32 num_mc_addrs;
418 u32 collision_delta;
419 u32 tx_packet_delta;
420 u16 phy_spd_default;
421
422 u16 device_id;
423 u16 vendor_id;
424 u16 subsystem_id;
425 u16 subsystem_vendor_id;
426 u8 revision_id;
427
428 /* spi flash */
429 u8 flash_vendor;
430
431 u8 dma_fairness;
432 u8 mac_addr[NODE_ADDRESS_SIZE];
433 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
434
435 /* FIXME */
436 /* bool phy_preamble_sup; */
437 bool phy_configured;
438};
439
440#endif /* _ATL2_HW_H_ */
441
442struct atl2_ring_header {
443 /* pointer to the descriptor ring memory */
444 void *desc;
445 /* physical adress of the descriptor ring */
446 dma_addr_t dma;
447 /* length of descriptor ring in bytes */
448 unsigned int size;
449};
450
451/* board specific private data structure */
452struct atl2_adapter {
453 /* OS defined structs */
454 struct net_device *netdev;
455 struct pci_dev *pdev;
456 struct net_device_stats net_stats;
457#ifdef NETIF_F_HW_VLAN_TX
458 struct vlan_group *vlgrp;
459#endif
460 u32 wol;
461 u16 link_speed;
462 u16 link_duplex;
463
464 spinlock_t stats_lock;
465 spinlock_t tx_lock;
466
467 struct work_struct reset_task;
468 struct work_struct link_chg_task;
469 struct timer_list watchdog_timer;
470 struct timer_list phy_config_timer;
471
472 unsigned long cfg_phy;
473 bool mac_disabled;
474
475 /* All Descriptor memory */
476 dma_addr_t ring_dma;
477 void *ring_vir_addr;
478 int ring_size;
479
480 struct tx_pkt_header *txd_ring;
481 dma_addr_t txd_dma;
482
483 struct tx_pkt_status *txs_ring;
484 dma_addr_t txs_dma;
485
486 struct rx_desc *rxd_ring;
487 dma_addr_t rxd_dma;
488
489 u32 txd_ring_size; /* bytes per unit */
490 u32 txs_ring_size; /* dwords per unit */
491 u32 rxd_ring_size; /* 1536 bytes per unit */
492
493 /* read /write ptr: */
494 /* host */
495 u32 txd_write_ptr;
496 u32 txs_next_clear;
497 u32 rxd_read_ptr;
498
499 /* nic */
500 atomic_t txd_read_ptr;
501 atomic_t txs_write_ptr;
502 u32 rxd_write_ptr;
503
504 /* Interrupt Moderator timer ( 2us resolution) */
505 u16 imt;
506 /* Interrupt Clear timer (2us resolution) */
507 u16 ict;
508
509 unsigned long flags;
510 /* structs defined in atl2_hw.h */
511 u32 bd_number; /* board number */
512 bool pci_using_64;
513 bool have_msi;
514 struct atl2_hw hw;
515
516 u32 usr_cmd;
517 /* FIXME */
518 /* u32 regs_buff[ATL2_REGS_LEN]; */
519 u32 pci_state[16];
520
521 u32 *config_space;
522};
523
524enum atl2_state_t {
525 __ATL2_TESTING,
526 __ATL2_RESETTING,
527 __ATL2_DOWN
528};
529
530#endif /* _ATL2_H_ */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 5ee1b0557a02..92c16c37ff23 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -653,6 +653,8 @@ static struct net_device * au1000_probe(int port_num)
653 653
654 aup = dev->priv; 654 aup = dev->priv;
655 655
656 spin_lock_init(&aup->lock);
657
656 /* Allocate the data buffers */ 658 /* Allocate the data buffers */
657 /* Snooping works fine with eth on all au1xxx */ 659 /* Snooping works fine with eth on all au1xxx */
658 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * 660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
@@ -753,7 +755,6 @@ static struct net_device * au1000_probe(int port_num)
753 aup->tx_db_inuse[i] = pDB; 755 aup->tx_db_inuse[i] = pDB;
754 } 756 }
755 757
756 spin_lock_init(&aup->lock);
757 dev->base_addr = base; 758 dev->base_addr = base;
758 dev->irq = irq; 759 dev->irq = irq;
759 dev->open = au1000_open; 760 dev->open = au1000_open;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index a886a4b9f7e5..4207d6efddc0 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -153,7 +153,7 @@ static void ax_reset_8390(struct net_device *dev)
153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
154 if (jiffies - reset_start_time > 2*HZ/100) { 154 if (jiffies - reset_start_time > 2*HZ/100) {
155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
156 __FUNCTION__, dev->name); 156 __func__, dev->name);
157 break; 157 break;
158 } 158 }
159 } 159 }
@@ -173,7 +173,7 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
173 if (ei_status.dmaing) { 173 if (ei_status.dmaing) {
174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
175 "[DMAstat:%d][irqlock:%d].\n", 175 "[DMAstat:%d][irqlock:%d].\n",
176 dev->name, __FUNCTION__, 176 dev->name, __func__,
177 ei_status.dmaing, ei_status.irqlock); 177 ei_status.dmaing, ei_status.irqlock);
178 return; 178 return;
179 } 179 }
@@ -215,7 +215,7 @@ static void ax_block_input(struct net_device *dev, int count,
215 dev_err(&ax->dev->dev, 215 dev_err(&ax->dev->dev,
216 "%s: DMAing conflict in %s " 216 "%s: DMAing conflict in %s "
217 "[DMAstat:%d][irqlock:%d].\n", 217 "[DMAstat:%d][irqlock:%d].\n",
218 dev->name, __FUNCTION__, 218 dev->name, __func__,
219 ei_status.dmaing, ei_status.irqlock); 219 ei_status.dmaing, ei_status.irqlock);
220 return; 220 return;
221 } 221 }
@@ -260,7 +260,7 @@ static void ax_block_output(struct net_device *dev, int count,
260 if (ei_status.dmaing) { 260 if (ei_status.dmaing) {
261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
262 "[DMAstat:%d][irqlock:%d]\n", 262 "[DMAstat:%d][irqlock:%d]\n",
263 dev->name, __FUNCTION__, 263 dev->name, __func__,
264 ei_status.dmaing, ei_status.irqlock); 264 ei_status.dmaing, ei_status.irqlock);
265 return; 265 return;
266 } 266 }
@@ -396,7 +396,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
396{ 396{
397 if (phy_debug) 397 if (phy_debug)
398 pr_debug("%s: dev %p, %04x, %04x, %d\n", 398 pr_debug("%s: dev %p, %04x, %04x, %d\n",
399 __FUNCTION__, dev, phy_addr, reg, opc); 399 __func__, dev, phy_addr, reg, opc);
400 400
401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */ 401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */ 402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
@@ -422,7 +422,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
422 spin_unlock_irqrestore(&ei_local->page_lock, flags); 422 spin_unlock_irqrestore(&ei_local->page_lock, flags);
423 423
424 if (phy_debug) 424 if (phy_debug)
425 pr_debug("%s: %04x.%04x => read %04x\n", __FUNCTION__, 425 pr_debug("%s: %04x.%04x => read %04x\n", __func__,
426 phy_addr, reg, result); 426 phy_addr, reg, result);
427 427
428 return result; 428 return result;
@@ -436,7 +436,7 @@ ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
436 unsigned long flags; 436 unsigned long flags;
437 437
438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", 438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
439 __FUNCTION__, dev, phy_addr, reg, value); 439 __func__, dev, phy_addr, reg, value);
440 440
441 spin_lock_irqsave(&ei->page_lock, flags); 441 spin_lock_irqsave(&ei->page_lock, flags);
442 442
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 3db7db1828e7..df896e23e2c5 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -811,7 +811,7 @@ static void bfin_mac_enable(void)
811{ 811{
812 u32 opmode; 812 u32 opmode;
813 813
814 pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); 814 pr_debug("%s: %s\n", DRV_NAME, __func__);
815 815
816 /* Set RX DMA */ 816 /* Set RX DMA */
817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -847,7 +847,7 @@ static void bfin_mac_enable(void)
847/* Our watchdog timed out. Called by the networking layer */ 847/* Our watchdog timed out. Called by the networking layer */
848static void bfin_mac_timeout(struct net_device *dev) 848static void bfin_mac_timeout(struct net_device *dev)
849{ 849{
850 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 850 pr_debug("%s: %s\n", dev->name, __func__);
851 851
852 bfin_mac_disable(); 852 bfin_mac_disable();
853 853
@@ -949,7 +949,7 @@ static int bfin_mac_open(struct net_device *dev)
949{ 949{
950 struct bfin_mac_local *lp = netdev_priv(dev); 950 struct bfin_mac_local *lp = netdev_priv(dev);
951 int retval; 951 int retval;
952 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 952 pr_debug("%s: %s\n", dev->name, __func__);
953 953
954 /* 954 /*
955 * Check that the address is valid. If its not, refuse 955 * Check that the address is valid. If its not, refuse
@@ -989,7 +989,7 @@ static int bfin_mac_open(struct net_device *dev)
989static int bfin_mac_close(struct net_device *dev) 989static int bfin_mac_close(struct net_device *dev)
990{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev); 991 struct bfin_mac_local *lp = netdev_priv(dev);
992 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 992 pr_debug("%s: %s\n", dev->name, __func__);
993 993
994 netif_stop_queue(dev); 994 netif_stop_queue(dev);
995 netif_carrier_off(dev); 995 netif_carrier_off(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 2486a656f12d..883e0a724107 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -69,7 +69,7 @@ static char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); 71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver"); 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION); 74MODULE_VERSION(DRV_MODULE_VERSION);
75 75
@@ -1127,7 +1127,7 @@ bnx2_init_all_rx_contexts(struct bnx2 *bp)
1127 } 1127 }
1128} 1128}
1129 1129
1130static int 1130static void
1131bnx2_set_mac_link(struct bnx2 *bp) 1131bnx2_set_mac_link(struct bnx2 *bp)
1132{ 1132{
1133 u32 val; 1133 u32 val;
@@ -1193,8 +1193,6 @@ bnx2_set_mac_link(struct bnx2 *bp)
1193 1193
1194 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195 bnx2_init_all_rx_contexts(bp); 1195 bnx2_init_all_rx_contexts(bp);
1196
1197 return 0;
1198} 1196}
1199 1197
1200static void 1198static void
@@ -5600,7 +5598,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5600 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 5598 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5601 u32 bmcr; 5599 u32 bmcr;
5602 5600
5603 bp->current_interval = bp->timer_interval; 5601 bp->current_interval = BNX2_TIMER_INTERVAL;
5604 5602
5605 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 5603 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5606 5604
@@ -5629,7 +5627,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5629 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 5627 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5630 } 5628 }
5631 } else 5629 } else
5632 bp->current_interval = bp->timer_interval; 5630 bp->current_interval = BNX2_TIMER_INTERVAL;
5633 5631
5634 if (check_link) { 5632 if (check_link) {
5635 u32 val; 5633 u32 val;
@@ -5674,11 +5672,11 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
5674 } else { 5672 } else {
5675 bnx2_disable_forced_2g5(bp); 5673 bnx2_disable_forced_2g5(bp);
5676 bp->serdes_an_pending = 2; 5674 bp->serdes_an_pending = 2;
5677 bp->current_interval = bp->timer_interval; 5675 bp->current_interval = BNX2_TIMER_INTERVAL;
5678 } 5676 }
5679 5677
5680 } else 5678 } else
5681 bp->current_interval = bp->timer_interval; 5679 bp->current_interval = BNX2_TIMER_INTERVAL;
5682 5680
5683 spin_unlock(&bp->phy_lock); 5681 spin_unlock(&bp->phy_lock);
5684} 5682}
@@ -7516,8 +7514,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7516 7514
7517 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7515 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7518 7516
7519 bp->timer_interval = HZ; 7517 bp->current_interval = BNX2_TIMER_INTERVAL;
7520 bp->current_interval = HZ;
7521 7518
7522 bp->phy_addr = 1; 7519 bp->phy_addr = 1;
7523 7520
@@ -7607,7 +7604,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7607 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 7604 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7608 7605
7609 init_timer(&bp->timer); 7606 init_timer(&bp->timer);
7610 bp->timer.expires = RUN_AT(bp->timer_interval); 7607 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7611 bp->timer.data = (unsigned long) bp; 7608 bp->timer.data = (unsigned long) bp;
7612 bp->timer.function = bnx2_timer; 7609 bp->timer.function = bnx2_timer;
7613 7610
@@ -7720,7 +7717,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7720 7717
7721 memcpy(dev->dev_addr, bp->mac_addr, 6); 7718 memcpy(dev->dev_addr, bp->mac_addr, 6);
7722 memcpy(dev->perm_addr, bp->mac_addr, 6); 7719 memcpy(dev->perm_addr, bp->mac_addr, 6);
7723 bp->name = board_info[ent->driver_data].name;
7724 7720
7725 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 7721 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7726 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7722 if (CHIP_NUM(bp) == CHIP_NUM_5709)
@@ -7747,7 +7743,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7747 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " 7743 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7748 "IRQ %d, node addr %s\n", 7744 "IRQ %d, node addr %s\n",
7749 dev->name, 7745 dev->name,
7750 bp->name, 7746 board_info[ent->driver_data].name,
7751 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 7747 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7752 ((CHIP_ID(bp) & 0x0ff0) >> 4), 7748 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7753 bnx2_bus_string(bp, str), 7749 bnx2_bus_string(bp, str),
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index c3c579f98ed0..682b8f077529 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6654,6 +6654,8 @@ struct bnx2_napi {
6654 struct bnx2_tx_ring_info tx_ring; 6654 struct bnx2_tx_ring_info tx_ring;
6655}; 6655};
6656 6656
6657#define BNX2_TIMER_INTERVAL HZ
6658
6657struct bnx2 { 6659struct bnx2 {
6658 /* Fields used in the tx and intr/napi performance paths are grouped */ 6660 /* Fields used in the tx and intr/napi performance paths are grouped */
6659 /* together in the beginning of the structure. */ 6661 /* together in the beginning of the structure. */
@@ -6701,9 +6703,6 @@ struct bnx2 {
6701 6703
6702 /* End of fields used in the performance code paths. */ 6704 /* End of fields used in the performance code paths. */
6703 6705
6704 char *name;
6705
6706 int timer_interval;
6707 int current_interval; 6706 int current_interval;
6708 struct timer_list timer; 6707 struct timer_list timer;
6709 struct work_struct reset_task; 6708 struct work_struct reset_task;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index a14dba1afcc5..fd705d1295a7 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -151,6 +151,8 @@ struct sw_rx_page {
151#define PAGES_PER_SGE_SHIFT 0 151#define PAGES_PER_SGE_SHIFT 0
152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) 152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
153 153
154#define BCM_RX_ETH_PAYLOAD_ALIGN 64
155
154/* SGE ring related macros */ 156/* SGE ring related macros */
155#define NUM_RX_SGE_PAGES 2 157#define NUM_RX_SGE_PAGES 2
156#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 158#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
@@ -750,8 +752,7 @@ struct bnx2x {
750 752
751 u32 rx_csum; 753 u32 rx_csum;
752 u32 rx_offset; 754 u32 rx_offset;
753 u32 rx_buf_use_size; /* useable size */ 755 u32 rx_buf_size;
754 u32 rx_buf_size; /* with alignment */
755#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 756#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */
756#define ETH_MIN_PACKET_SIZE 60 757#define ETH_MIN_PACKET_SIZE 60
757#define ETH_MAX_PACKET_SIZE 1500 758#define ETH_MAX_PACKET_SIZE 1500
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 82deea0a63f5..fce745148ff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.20" 62#define DRV_MODULE_VERSION "1.45.22"
63#define DRV_MODULE_RELDATE "2008/08/25" 63#define DRV_MODULE_RELDATE "2008/09/09"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -649,15 +649,16 @@ static void bnx2x_int_disable(struct bnx2x *bp)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650} 650}
651 651
652static void bnx2x_int_disable_sync(struct bnx2x *bp) 652static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653{ 653{
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i; 655 int i;
656 656
657 /* disable interrupt handling */ 657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem); 658 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */ 659 if (disable_hw)
660 bnx2x_int_disable(bp); 660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
661 662
662 /* make sure all ISRs are done */ 663 /* make sure all ISRs are done */
663 if (msix) { 664 if (msix) {
@@ -1027,7 +1028,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1027 if (unlikely(skb == NULL)) 1028 if (unlikely(skb == NULL))
1028 return -ENOMEM; 1029 return -ENOMEM;
1029 1030
1030 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1031 PCI_DMA_FROMDEVICE); 1032 PCI_DMA_FROMDEVICE);
1032 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1033 dev_kfree_skb(skb); 1034 dev_kfree_skb(skb);
@@ -1169,7 +1170,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1169 /* move empty skb from pool to prod and map it */ 1170 /* move empty skb from pool to prod and map it */
1170 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1171 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1172 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1173 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1174 1175
1175 /* move partial skb from cons to pool (don't unmap yet) */ 1176 /* move partial skb from cons to pool (don't unmap yet) */
@@ -1276,7 +1277,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1276 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1277 fails. */ 1278 fails. */
1278 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1279 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1280 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1280 1281
1281 if (likely(new_skb)) { 1282 if (likely(new_skb)) {
1282 /* fix ip xsum and give it to the stack */ 1283 /* fix ip xsum and give it to the stack */
@@ -1520,7 +1521,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1520 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { 1521 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521 pci_unmap_single(bp->pdev, 1522 pci_unmap_single(bp->pdev,
1522 pci_unmap_addr(rx_buf, mapping), 1523 pci_unmap_addr(rx_buf, mapping),
1523 bp->rx_buf_use_size, 1524 bp->rx_buf_size,
1524 PCI_DMA_FROMDEVICE); 1525 PCI_DMA_FROMDEVICE);
1525 skb_reserve(skb, pad); 1526 skb_reserve(skb, pad);
1526 skb_put(skb, len); 1527 skb_put(skb, len);
@@ -4229,7 +4230,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4229 if (fp->tpa_state[i] == BNX2X_TPA_START) 4230 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230 pci_unmap_single(bp->pdev, 4231 pci_unmap_single(bp->pdev,
4231 pci_unmap_addr(rx_buf, mapping), 4232 pci_unmap_addr(rx_buf, mapping),
4232 bp->rx_buf_use_size, 4233 bp->rx_buf_size,
4233 PCI_DMA_FROMDEVICE); 4234 PCI_DMA_FROMDEVICE);
4234 4235
4235 dev_kfree_skb(skb); 4236 dev_kfree_skb(skb);
@@ -4245,15 +4246,14 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4245 u16 ring_prod, cqe_ring_prod; 4246 u16 ring_prod, cqe_ring_prod;
4246 int i, j; 4247 int i, j;
4247 4248
4248 bp->rx_buf_use_size = bp->dev->mtu; 4249 bp->rx_buf_size = bp->dev->mtu;
4249 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD; 4250 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4250 bp->rx_buf_size = bp->rx_buf_use_size + 64; 4251 BCM_RX_ETH_PAYLOAD_ALIGN;
4251 4252
4252 if (bp->flags & TPA_ENABLE_FLAG) { 4253 if (bp->flags & TPA_ENABLE_FLAG) {
4253 DP(NETIF_MSG_IFUP, 4254 DP(NETIF_MSG_IFUP,
4254 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n", 4255 "rx_buf_size %d effective_mtu %d\n",
4255 bp->rx_buf_use_size, bp->rx_buf_size, 4256 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4256 bp->dev->mtu + ETH_OVREHEAD);
4257 4257
4258 for_each_queue(bp, j) { 4258 for_each_queue(bp, j) {
4259 struct bnx2x_fastpath *fp = &bp->fp[j]; 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
@@ -4462,9 +4462,10 @@ static void bnx2x_init_context(struct bnx2x *bp)
4462 context->ustorm_st_context.common.status_block_id = sb_id; 4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags = 4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT; 4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465 context->ustorm_st_context.common.mc_alignment_size = 64; 4465 context->ustorm_st_context.common.mc_alignment_size =
4466 BCM_RX_ETH_PAYLOAD_ALIGN;
4466 context->ustorm_st_context.common.bd_buff_size = 4467 context->ustorm_st_context.common.bd_buff_size =
4467 bp->rx_buf_use_size; 4468 bp->rx_buf_size;
4468 context->ustorm_st_context.common.bd_page_base_hi = 4469 context->ustorm_st_context.common.bd_page_base_hi =
4469 U64_HI(fp->rx_desc_mapping); 4470 U64_HI(fp->rx_desc_mapping);
4470 context->ustorm_st_context.common.bd_page_base_lo = 4471 context->ustorm_st_context.common.bd_page_base_lo =
@@ -4717,7 +4718,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4717 } 4718 }
4718 4719
4719 /* Init CQ ring mapping and aggregation size */ 4720 /* Init CQ ring mapping and aggregation size */
4720 max_agg_size = min((u32)(bp->rx_buf_use_size + 4721 max_agg_size = min((u32)(bp->rx_buf_size +
4721 8*BCM_PAGE_SIZE*PAGES_PER_SGE), 4722 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4722 (u32)0xffff); 4723 (u32)0xffff);
4723 for_each_queue(bp, i) { 4724 for_each_queue(bp, i) {
@@ -5940,7 +5941,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5940 5941
5941 pci_unmap_single(bp->pdev, 5942 pci_unmap_single(bp->pdev,
5942 pci_unmap_addr(rx_buf, mapping), 5943 pci_unmap_addr(rx_buf, mapping),
5943 bp->rx_buf_use_size, 5944 bp->rx_buf_size,
5944 PCI_DMA_FROMDEVICE); 5945 PCI_DMA_FROMDEVICE);
5945 5946
5946 rx_buf->skb = NULL; 5947 rx_buf->skb = NULL;
@@ -6086,9 +6087,9 @@ static void bnx2x_netif_start(struct bnx2x *bp)
6086 } 6087 }
6087} 6088}
6088 6089
6089static void bnx2x_netif_stop(struct bnx2x *bp) 6090static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6090{ 6091{
6091 bnx2x_int_disable_sync(bp); 6092 bnx2x_int_disable_sync(bp, disable_hw);
6092 if (netif_running(bp->dev)) { 6093 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp); 6094 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev); 6095 netif_tx_disable(bp->dev);
@@ -6475,7 +6476,7 @@ load_rings_free:
6475 for_each_queue(bp, i) 6476 for_each_queue(bp, i)
6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6477 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6477load_int_disable: 6478load_int_disable:
6478 bnx2x_int_disable_sync(bp); 6479 bnx2x_int_disable_sync(bp, 1);
6479 /* Release IRQs */ 6480 /* Release IRQs */
6480 bnx2x_free_irq(bp); 6481 bnx2x_free_irq(bp);
6481load_error: 6482load_error:
@@ -6650,7 +6651,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6650 bp->rx_mode = BNX2X_RX_MODE_NONE; 6651 bp->rx_mode = BNX2X_RX_MODE_NONE;
6651 bnx2x_set_storm_rx_mode(bp); 6652 bnx2x_set_storm_rx_mode(bp);
6652 6653
6653 bnx2x_netif_stop(bp); 6654 bnx2x_netif_stop(bp, 1);
6654 if (!netif_running(bp->dev)) 6655 if (!netif_running(bp->dev))
6655 bnx2x_napi_disable(bp); 6656 bnx2x_napi_disable(bp);
6656 del_timer_sync(&bp->timer); 6657 del_timer_sync(&bp->timer);
@@ -8791,7 +8792,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8791 if (!netif_running(bp->dev)) 8792 if (!netif_running(bp->dev))
8792 return BNX2X_LOOPBACK_FAILED; 8793 return BNX2X_LOOPBACK_FAILED;
8793 8794
8794 bnx2x_netif_stop(bp); 8795 bnx2x_netif_stop(bp, 1);
8795 8796
8796 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8797 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8797 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); 8798 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -10346,6 +10347,74 @@ static int bnx2x_resume(struct pci_dev *pdev)
10346 return rc; 10347 return rc;
10347} 10348}
10348 10349
10350static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10351{
10352 int i;
10353
10354 bp->state = BNX2X_STATE_ERROR;
10355
10356 bp->rx_mode = BNX2X_RX_MODE_NONE;
10357
10358 bnx2x_netif_stop(bp, 0);
10359
10360 del_timer_sync(&bp->timer);
10361 bp->stats_state = STATS_STATE_DISABLED;
10362 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10363
10364 /* Release IRQs */
10365 bnx2x_free_irq(bp);
10366
10367 if (CHIP_IS_E1(bp)) {
10368 struct mac_configuration_cmd *config =
10369 bnx2x_sp(bp, mcast_config);
10370
10371 for (i = 0; i < config->hdr.length_6b; i++)
10372 CAM_INVALIDATE(config->config_table[i]);
10373 }
10374
10375 /* Free SKBs, SGEs, TPA pool and driver internals */
10376 bnx2x_free_skbs(bp);
10377 for_each_queue(bp, i)
10378 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10379 bnx2x_free_mem(bp);
10380
10381 bp->state = BNX2X_STATE_CLOSED;
10382
10383 netif_carrier_off(bp->dev);
10384
10385 return 0;
10386}
10387
10388static void bnx2x_eeh_recover(struct bnx2x *bp)
10389{
10390 u32 val;
10391
10392 mutex_init(&bp->port.phy_mutex);
10393
10394 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10395 bp->link_params.shmem_base = bp->common.shmem_base;
10396 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10397
10398 if (!bp->common.shmem_base ||
10399 (bp->common.shmem_base < 0xA0000) ||
10400 (bp->common.shmem_base >= 0xC0000)) {
10401 BNX2X_DEV_INFO("MCP not active\n");
10402 bp->flags |= NO_MCP_FLAG;
10403 return;
10404 }
10405
10406 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10407 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10408 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10409 BNX2X_ERR("BAD MCP validity signature\n");
10410
10411 if (!BP_NOMCP(bp)) {
10412 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10413 & DRV_MSG_SEQ_NUMBER_MASK);
10414 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10415 }
10416}
10417
10349/** 10418/**
10350 * bnx2x_io_error_detected - called when PCI error is detected 10419 * bnx2x_io_error_detected - called when PCI error is detected
10351 * @pdev: Pointer to PCI device 10420 * @pdev: Pointer to PCI device
@@ -10365,7 +10434,7 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10365 netif_device_detach(dev); 10434 netif_device_detach(dev);
10366 10435
10367 if (netif_running(dev)) 10436 if (netif_running(dev))
10368 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 10437 bnx2x_eeh_nic_unload(bp);
10369 10438
10370 pci_disable_device(pdev); 10439 pci_disable_device(pdev);
10371 10440
@@ -10420,8 +10489,10 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
10420 10489
10421 rtnl_lock(); 10490 rtnl_lock();
10422 10491
10492 bnx2x_eeh_recover(bp);
10493
10423 if (netif_running(dev)) 10494 if (netif_running(dev))
10424 bnx2x_nic_load(bp, LOAD_OPEN); 10495 bnx2x_nic_load(bp, LOAD_NORMAL);
10425 10496
10426 netif_device_attach(dev); 10497 netif_device_attach(dev);
10427 10498
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index b211486a0ca3..ade5f3f6693b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -38,6 +38,7 @@
38#include <linux/in.h> 38#include <linux/in.h>
39#include <net/ipx.h> 39#include <net/ipx.h>
40#include <net/arp.h> 40#include <net/arp.h>
41#include <net/ipv6.h>
41#include <asm/byteorder.h> 42#include <asm/byteorder.h>
42#include "bonding.h" 43#include "bonding.h"
43#include "bond_alb.h" 44#include "bond_alb.h"
@@ -81,6 +82,7 @@
81#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC 82#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
82 83
83static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; 84static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
85static const u8 mac_v6_allmcast[ETH_ALEN] = {0x33,0x33,0x00,0x00,0x00,0x01};
84static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 86static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
85 87
86#pragma pack(1) 88#pragma pack(1)
@@ -710,7 +712,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
710 struct arp_pkt *arp = arp_pkt(skb); 712 struct arp_pkt *arp = arp_pkt(skb);
711 struct slave *tx_slave = NULL; 713 struct slave *tx_slave = NULL;
712 714
713 if (arp->op_code == __constant_htons(ARPOP_REPLY)) { 715 if (arp->op_code == htons(ARPOP_REPLY)) {
714 /* the arp must be sent on the selected 716 /* the arp must be sent on the selected
715 * rx channel 717 * rx channel
716 */ 718 */
@@ -719,7 +721,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
719 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN); 721 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
720 } 722 }
721 dprintk("Server sent ARP Reply packet\n"); 723 dprintk("Server sent ARP Reply packet\n");
722 } else if (arp->op_code == __constant_htons(ARPOP_REQUEST)) { 724 } else if (arp->op_code == htons(ARPOP_REQUEST)) {
723 /* Create an entry in the rx_hashtbl for this client as a 725 /* Create an entry in the rx_hashtbl for this client as a
724 * place holder. 726 * place holder.
725 * When the arp reply is received the entry will be updated 727 * When the arp reply is received the entry will be updated
@@ -1290,6 +1292,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1290 u32 hash_index = 0; 1292 u32 hash_index = 0;
1291 const u8 *hash_start = NULL; 1293 const u8 *hash_start = NULL;
1292 int res = 1; 1294 int res = 1;
1295 struct ipv6hdr *ip6hdr;
1293 1296
1294 skb_reset_mac_header(skb); 1297 skb_reset_mac_header(skb);
1295 eth_data = eth_hdr(skb); 1298 eth_data = eth_hdr(skb);
@@ -1319,11 +1322,32 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1319 } 1322 }
1320 break; 1323 break;
1321 case ETH_P_IPV6: 1324 case ETH_P_IPV6:
1325 /* IPv6 doesn't really use broadcast mac address, but leave
1326 * that here just in case.
1327 */
1322 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { 1328 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
1323 do_tx_balance = 0; 1329 do_tx_balance = 0;
1324 break; 1330 break;
1325 } 1331 }
1326 1332
1333 /* IPv6 uses all-nodes multicast as an equivalent to
1334 * broadcasts in IPv4.
1335 */
1336 if (memcmp(eth_data->h_dest, mac_v6_allmcast, ETH_ALEN) == 0) {
1337 do_tx_balance = 0;
1338 break;
1339 }
1340
1341 /* Additianally, DAD probes should not be tx-balanced as that
1342 * will lead to false positives for duplicate addresses and
1343 * prevent address configuration from working.
1344 */
1345 ip6hdr = ipv6_hdr(skb);
1346 if (ipv6_addr_any(&ip6hdr->saddr)) {
1347 do_tx_balance = 0;
1348 break;
1349 }
1350
1327 hash_start = (char *)&(ipv6_hdr(skb)->daddr); 1351 hash_start = (char *)&(ipv6_hdr(skb)->daddr);
1328 hash_size = sizeof(ipv6_hdr(skb)->daddr); 1352 hash_size = sizeof(ipv6_hdr(skb)->daddr);
1329 break; 1353 break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c792138511e6..8e2be24f3fe4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3702,7 +3702,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3702 struct ethhdr *data = (struct ethhdr *)skb->data; 3702 struct ethhdr *data = (struct ethhdr *)skb->data;
3703 struct iphdr *iph = ip_hdr(skb); 3703 struct iphdr *iph = ip_hdr(skb);
3704 3704
3705 if (skb->protocol == __constant_htons(ETH_P_IP)) { 3705 if (skb->protocol == htons(ETH_P_IP)) {
3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
3708 } 3708 }
@@ -3723,8 +3723,8 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3723 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); 3723 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
3724 int layer4_xor = 0; 3724 int layer4_xor = 0;
3725 3725
3726 if (skb->protocol == __constant_htons(ETH_P_IP)) { 3726 if (skb->protocol == htons(ETH_P_IP)) {
3727 if (!(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) && 3727 if (!(iph->frag_off & htons(IP_MF|IP_OFFSET)) &&
3728 (iph->protocol == IPPROTO_TCP || 3728 (iph->protocol == IPPROTO_TCP ||
3729 iph->protocol == IPPROTO_UDP)) { 3729 iph->protocol == IPPROTO_UDP)) {
3730 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); 3730 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
@@ -4493,6 +4493,12 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4493 4493
4494static const struct ethtool_ops bond_ethtool_ops = { 4494static const struct ethtool_ops bond_ethtool_ops = {
4495 .get_drvinfo = bond_ethtool_get_drvinfo, 4495 .get_drvinfo = bond_ethtool_get_drvinfo,
4496 .get_link = ethtool_op_get_link,
4497 .get_tx_csum = ethtool_op_get_tx_csum,
4498 .get_sg = ethtool_op_get_sg,
4499 .get_tso = ethtool_op_get_tso,
4500 .get_ufo = ethtool_op_get_ufo,
4501 .get_flags = ethtool_op_get_flags,
4496}; 4502};
4497 4503
4498/* 4504/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index fb730ec0396f..f7b40edabfd8 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -32,7 +32,7 @@
32#ifdef BONDING_DEBUG 32#ifdef BONDING_DEBUG
33#define dprintk(fmt, args...) \ 33#define dprintk(fmt, args...) \
34 printk(KERN_DEBUG \ 34 printk(KERN_DEBUG \
35 DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args ) 35 DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args )
36#else 36#else
37#define dprintk(fmt, args...) 37#define dprintk(fmt, args...)
38#endif /* BONDING_DEBUG */ 38#endif /* BONDING_DEBUG */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f1936d51b458..86909cfb14de 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -74,6 +74,7 @@
74#include <linux/slab.h> 74#include <linux/slab.h>
75#include <linux/delay.h> 75#include <linux/delay.h>
76#include <linux/init.h> 76#include <linux/init.h>
77#include <linux/vmalloc.h>
77#include <linux/ioport.h> 78#include <linux/ioport.h>
78#include <linux/pci.h> 79#include <linux/pci.h>
79#include <linux/mm.h> 80#include <linux/mm.h>
@@ -91,6 +92,7 @@
91#include <linux/ip.h> 92#include <linux/ip.h>
92#include <linux/tcp.h> 93#include <linux/tcp.h>
93#include <linux/mutex.h> 94#include <linux/mutex.h>
95#include <linux/firmware.h>
94 96
95#include <net/checksum.h> 97#include <net/checksum.h>
96 98
@@ -197,6 +199,7 @@ static int link_mode;
197MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 199MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
198MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 200MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
199MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_FIRMWARE("sun/cassini.bin");
200module_param(cassini_debug, int, 0); 203module_param(cassini_debug, int, 0);
201MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 204MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
202module_param(link_mode, int, 0); 205module_param(link_mode, int, 0);
@@ -812,9 +815,44 @@ static int cas_reset_mii_phy(struct cas *cp)
812 return (limit <= 0); 815 return (limit <= 0);
813} 816}
814 817
818static int cas_saturn_firmware_init(struct cas *cp)
819{
820 const struct firmware *fw;
821 const char fw_name[] = "sun/cassini.bin";
822 int err;
823
824 if (PHY_NS_DP83065 != cp->phy_id)
825 return 0;
826
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
828 if (err) {
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
830 fw_name);
831 return err;
832 }
833 if (fw->size < 2) {
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
835 fw->size, fw_name);
836 err = -EINVAL;
837 goto out;
838 }
839 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
840 cp->fw_size = fw->size - 2;
841 cp->fw_data = vmalloc(cp->fw_size);
842 if (!cp->fw_data) {
843 err = -ENOMEM;
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
845 goto out;
846 }
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
848out:
849 release_firmware(fw);
850 return err;
851}
852
815static void cas_saturn_firmware_load(struct cas *cp) 853static void cas_saturn_firmware_load(struct cas *cp)
816{ 854{
817 cas_saturn_patch_t *patch = cas_saturn_patch; 855 int i;
818 856
819 cas_phy_powerdown(cp); 857 cas_phy_powerdown(cp);
820 858
@@ -833,11 +871,9 @@ static void cas_saturn_firmware_load(struct cas *cp)
833 871
834 /* download new firmware */ 872 /* download new firmware */
835 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 873 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
836 cas_phy_write(cp, DP83065_MII_REGE, patch->addr); 874 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
837 while (patch->addr) { 875 for (i = 0; i < cp->fw_size; i++)
838 cas_phy_write(cp, DP83065_MII_REGD, patch->val); 876 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
839 patch++;
840 }
841 877
842 /* enable firmware */ 878 /* enable firmware */
843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 879 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
@@ -2182,7 +2218,7 @@ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2182 * do any additional locking here. stick the buffer 2218 * do any additional locking here. stick the buffer
2183 * at the end. 2219 * at the end.
2184 */ 2220 */
2185 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); 2221 __skb_queue_tail(flow, skb);
2186 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2222 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2187 while ((skb = __skb_dequeue(flow))) { 2223 while ((skb = __skb_dequeue(flow))) {
2188 cas_skb_release(skb); 2224 cas_skb_release(skb);
@@ -5108,6 +5144,9 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5108 cas_reset(cp, 0); 5144 cas_reset(cp, 0);
5109 if (cas_check_invariants(cp)) 5145 if (cas_check_invariants(cp))
5110 goto err_out_iounmap; 5146 goto err_out_iounmap;
5147 if (cp->cas_flags & CAS_FLAG_SATURN)
5148 if (cas_saturn_firmware_init(cp))
5149 goto err_out_iounmap;
5111 5150
5112 cp->init_block = (struct cas_init_block *) 5151 cp->init_block = (struct cas_init_block *)
5113 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5152 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
@@ -5217,6 +5256,9 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5217 cp = netdev_priv(dev); 5256 cp = netdev_priv(dev);
5218 unregister_netdev(dev); 5257 unregister_netdev(dev);
5219 5258
5259 if (cp->fw_data)
5260 vfree(cp->fw_data);
5261
5220 mutex_lock(&cp->pm_mutex); 5262 mutex_lock(&cp->pm_mutex);
5221 flush_scheduled_work(); 5263 flush_scheduled_work();
5222 if (cp->hw_running) 5264 if (cp->hw_running)
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index 552af89ca1cf..fd17a002b453 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2514,1523 +2514,6 @@ static cas_hp_inst_t cas_prog_null[] = { {NULL} };
2514#define CAS_HP_FIRMWARE cas_prog_null 2514#define CAS_HP_FIRMWARE cas_prog_null
2515#endif 2515#endif
2516 2516
2517/* firmware patch for NS_DP83065 */
2518typedef struct cas_saturn_patch {
2519 u16 addr;
2520 u16 val;
2521} cas_saturn_patch_t;
2522
2523#if 1
2524cas_saturn_patch_t cas_saturn_patch[] = {
2525{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
2526{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
2527{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
2528{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
2529{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
2530{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
2531{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
2532{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
2533{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
2534{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
2535{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
2536{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
2537{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
2538{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
2539{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
2540{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
2541{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
2542{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
2543{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
2544{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
2545{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
2546{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
2547{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
2548{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
2549{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
2550{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
2551{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
2552{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
2553{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
2554{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
2555{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
2556{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
2557{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
2558{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
2559{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
2560{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
2561{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
2562{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
2563{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
2564{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
2565{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
2566{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
2567{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
2568{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
2569{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
2570{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
2571{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
2572{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
2573{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
2574{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
2575{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
2576{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
2577{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
2578{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
2579{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
2580{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
2581{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
2582{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
2583{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
2584{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
2585{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
2586{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
2587{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
2588{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
2589{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
2590{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
2591{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
2592{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
2593{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
2594{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
2595{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
2596{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
2597{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
2598{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
2599{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
2600{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
2601{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
2602{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
2603{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
2604{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
2605{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
2606{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
2607{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
2608{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
2609{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
2610{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
2611{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
2612{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
2613{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
2614{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
2615{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
2616{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
2617{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
2618{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
2619{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
2620{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
2621{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
2622{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
2623{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
2624{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
2625{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
2626{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
2627{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
2628{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
2629{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
2630{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
2631{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
2632{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
2633{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
2634{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
2635{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
2636{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
2637{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
2638{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
2639{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
2640{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
2641{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
2642{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
2643{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
2644{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
2645{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
2646{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
2647{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
2648{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
2649{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
2650{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
2651{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
2652{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
2653{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
2654{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
2655{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
2656{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
2657{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
2658{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
2659{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
2660{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
2661{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
2662{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
2663{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
2664{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
2665{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
2666{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
2667{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
2668{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
2669{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
2670{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
2671{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
2672{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
2673{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
2674{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
2675{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
2676{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
2677{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
2678{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
2679{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
2680{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
2681{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
2682{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
2683{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
2684{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
2685{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
2686{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
2687{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
2688{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
2689{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
2690{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
2691{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
2692{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
2693{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
2694{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
2695{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
2696{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
2697{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
2698{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
2699{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
2700{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
2701{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
2702{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
2703{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
2704{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
2705{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
2706{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
2707{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
2708{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
2709{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
2710{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
2711{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
2712{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
2713{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
2714{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
2715{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
2716{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
2717{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
2718{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
2719{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
2720{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
2721{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
2722{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
2723{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
2724{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
2725{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
2726{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
2727{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
2728{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
2729{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
2730{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
2731{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
2732{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
2733{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
2734{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
2735{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
2736{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
2737{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
2738{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
2739{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
2740{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
2741{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
2742{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
2743{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
2744{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
2745{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
2746{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
2747{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
2748{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
2749{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
2750{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
2751{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
2752{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
2753{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
2754{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
2755{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
2756{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
2757{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
2758{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
2759{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
2760{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
2761{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
2762{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
2763{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
2764{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
2765{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
2766{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
2767{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
2768{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
2769{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
2770{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
2771{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
2772{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
2773{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
2774{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
2775{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
2776{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
2777{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
2778{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
2779{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
2780{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
2781{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
2782{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
2783{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
2784{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
2785{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
2786{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
2787{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
2788{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
2789{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
2790{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
2791{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
2792{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
2793{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
2794{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
2795{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
2796{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
2797{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
2798{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
2799{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
2800{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
2801{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
2802{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
2803{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
2804{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
2805{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
2806{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
2807{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
2808{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
2809{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
2810{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
2811{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
2812{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
2813{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
2814{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
2815{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
2816{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
2817{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
2818{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
2819{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
2820{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
2821{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
2822{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
2823{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
2824{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
2825{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
2826{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
2827{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
2828{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
2829{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
2830{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
2831{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
2832{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
2833{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
2834{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
2835{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
2836{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
2837{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
2838{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
2839{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
2840{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
2841{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
2842{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
2843{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
2844{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
2845{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
2846{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
2847{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
2848{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
2849{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
2850{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
2851{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
2852{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
2853{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
2854{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
2855{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
2856{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
2857{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
2858{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
2859{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
2860{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
2861{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
2862{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
2863{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
2864{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
2865{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
2866{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
2867{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
2868{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
2869{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
2870{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
2871{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
2872{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
2873{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
2874{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
2875{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
2876{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
2877{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
2878{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
2879{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
2880{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
2881{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
2882{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
2883{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
2884{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
2885{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
2886{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
2887{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
2888{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
2889{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
2890{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
2891{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
2892{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
2893{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
2894{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
2895{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
2896{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
2897{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
2898{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
2899{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
2900{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
2901{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
2902{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
2903{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
2904{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
2905{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
2906{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
2907{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
2908{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
2909{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
2910{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
2911{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
2912{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
2913{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
2914{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
2915{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
2916{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
2917{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
2918{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
2919{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
2920{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
2921{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
2922{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
2923{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
2924{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
2925{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
2926{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
2927{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
2928{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
2929{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
2930{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
2931{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
2932{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
2933{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
2934{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
2935{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
2936{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
2937{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
2938{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
2939{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
2940{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
2941{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
2942{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
2943{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
2944{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
2945{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
2946{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
2947{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
2948{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
2949{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
2950{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
2951{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
2952{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
2953{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
2954{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
2955{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
2956{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
2957{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
2958{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
2959{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
2960{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
2961{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
2962{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
2963{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
2964{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
2965{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
2966{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
2967{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
2968{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
2969{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
2970{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
2971{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
2972{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
2973{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
2974{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
2975{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
2976{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
2977{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
2978{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
2979{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
2980{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
2981{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
2982{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
2983{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
2984{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
2985{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
2986{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
2987{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
2988{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
2989{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
2990{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
2991{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
2992{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
2993{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
2994{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
2995{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
2996{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
2997{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
2998{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
2999{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3000{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3001{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3002{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3003{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3004{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3005{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3006{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3007{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3008{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3009{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3010{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3011{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3012{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3013{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3014{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3015{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3016{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3017{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3018{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3019{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3020{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3021{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3022{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3023{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3024{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3025{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3026{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3027{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3028{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3029{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3030{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3031{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3032{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3033{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3034{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000},
3035{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3036{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3037{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3038{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3039{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3040{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3041{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3042{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3043{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3044{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3045{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3046{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3047{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3048{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3049{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3050{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3051{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3052{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3053{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3054{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3055{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3056{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3057{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3058{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3059{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3060{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3061{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3062{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3063{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3064{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3065{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3066{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3067{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3068{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3069{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3070{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3071{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3072{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3073{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3074{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3075{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3076{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3077{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3078{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3079{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3080{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3081{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3082{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3083{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3084{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3085{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3086{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3087{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3088{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3089{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3090{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3091{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3092{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3093{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3094{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3095{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3096{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3097{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3098{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3099{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3100{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3101{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3102{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3103{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3104{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3105{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3106{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3107{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3108{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3109{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3110{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3111{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3112{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3113{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3114{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3115{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3116{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3117{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3118{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3119{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3120{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3121{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3122{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3123{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3124{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3125{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3126{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3127{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3128{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3129{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3130{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3131{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3132{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3133{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3134{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3135{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3136{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3137{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3138{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3139{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3140{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3141{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3142{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3143{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3144{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3145{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3146{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3147{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3148{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3149{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3150{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3151{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3152{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3153{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3154{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3155{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3156{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3157{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3158{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3159{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3160{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3161{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3162{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3163{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3164{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3165{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3166{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3167{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3168{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3169{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3170{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3171{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3172{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3173{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3174{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3175{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3176{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3177{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3178{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3179{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3180{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3181{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3182{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3183{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3184{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3185{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3186{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3187{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3188{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3189{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3190{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3191{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3192{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3193{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3194{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3195{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3196{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3197{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3198{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3199{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3200{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3201{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3202{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3203{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3204{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3205{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3206{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3207{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3208{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3209{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3210{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3211{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3212{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3213{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3214{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3215{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3216{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3217{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3218{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3219{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3220{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3221{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3222{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3223{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3224{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3225{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3226{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3227{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3228{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3229{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3230{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3231{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3232{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3233{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3234{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3235{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3236{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3237{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3238{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3239{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3240{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3241{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3242{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3243{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3244{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3245{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
3246{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
3247{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
3248{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
3249{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
3250{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
3251{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
3252{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
3253{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
3254{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
3255{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
3256{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
3257{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
3258{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
3259{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
3260{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
3261{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
3262{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
3263{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
3264{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
3265{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
3266{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
3267{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
3268{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
3269{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
3270{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
3271{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
3272{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
3273{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
3274{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
3275{0x8aca, 0x0039}, { 0x0, 0x0 }
3276};
3277#else
3278cas_saturn_patch_t cas_saturn_patch[] = {
3279{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
3280{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
3281{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
3282{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
3283{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
3284{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
3285{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
3286{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
3287{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
3288{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
3289{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
3290{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
3291{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
3292{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
3293{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
3294{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
3295{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
3296{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
3297{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
3298{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
3299{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
3300{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
3301{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
3302{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
3303{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
3304{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
3305{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
3306{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
3307{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
3308{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
3309{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
3310{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
3311{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
3312{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
3313{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
3314{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
3315{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
3316{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
3317{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
3318{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
3319{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
3320{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
3321{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
3322{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
3323{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
3324{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
3325{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
3326{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
3327{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
3328{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
3329{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
3330{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
3331{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
3332{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
3333{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
3334{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
3335{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
3336{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
3337{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
3338{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
3339{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
3340{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
3341{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
3342{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
3343{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
3344{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
3345{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
3346{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
3347{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
3348{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
3349{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
3350{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
3351{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
3352{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
3353{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
3354{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
3355{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
3356{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
3357{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
3358{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
3359{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
3360{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
3361{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
3362{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
3363{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
3364{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
3365{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
3366{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
3367{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
3368{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
3369{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
3370{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
3371{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
3372{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
3373{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
3374{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
3375{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
3376{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
3377{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
3378{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
3379{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
3380{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
3381{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
3382{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
3383{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
3384{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
3385{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
3386{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
3387{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
3388{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
3389{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
3390{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
3391{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
3392{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
3393{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
3394{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
3395{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
3396{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
3397{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
3398{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
3399{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
3400{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
3401{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
3402{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
3403{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
3404{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
3405{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
3406{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
3407{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
3408{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
3409{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
3410{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
3411{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
3412{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
3413{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
3414{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
3415{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
3416{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
3417{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
3418{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
3419{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
3420{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
3421{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
3422{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
3423{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
3424{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
3425{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
3426{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
3427{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
3428{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
3429{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
3430{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
3431{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
3432{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
3433{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
3434{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
3435{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
3436{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
3437{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
3438{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
3439{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
3440{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
3441{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
3442{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
3443{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
3444{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
3445{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
3446{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
3447{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
3448{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
3449{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
3450{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
3451{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
3452{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
3453{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
3454{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
3455{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
3456{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
3457{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
3458{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
3459{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
3460{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
3461{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
3462{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
3463{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
3464{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
3465{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
3466{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
3467{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
3468{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
3469{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
3470{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
3471{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
3472{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
3473{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
3474{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
3475{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
3476{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
3477{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
3478{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
3479{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
3480{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
3481{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
3482{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
3483{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
3484{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
3485{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
3486{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
3487{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
3488{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
3489{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
3490{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
3491{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
3492{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
3493{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
3494{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
3495{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
3496{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
3497{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
3498{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
3499{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
3500{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
3501{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
3502{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
3503{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
3504{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
3505{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
3506{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
3507{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
3508{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
3509{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
3510{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
3511{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
3512{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
3513{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
3514{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
3515{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
3516{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
3517{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
3518{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
3519{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
3520{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
3521{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
3522{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
3523{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
3524{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
3525{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
3526{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
3527{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
3528{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
3529{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
3530{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
3531{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
3532{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
3533{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
3534{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
3535{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
3536{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
3537{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
3538{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
3539{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
3540{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
3541{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
3542{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
3543{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
3544{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
3545{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
3546{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
3547{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
3548{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
3549{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
3550{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
3551{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
3552{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
3553{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
3554{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
3555{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
3556{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
3557{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
3558{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
3559{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
3560{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
3561{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
3562{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
3563{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
3564{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
3565{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
3566{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
3567{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
3568{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
3569{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
3570{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
3571{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
3572{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
3573{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
3574{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
3575{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
3576{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
3577{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
3578{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
3579{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
3580{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
3581{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
3582{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
3583{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
3584{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
3585{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
3586{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
3587{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
3588{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
3589{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
3590{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
3591{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
3592{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
3593{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
3594{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
3595{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
3596{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
3597{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
3598{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
3599{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
3600{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
3601{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
3602{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
3603{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
3604{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
3605{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
3606{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
3607{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
3608{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
3609{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
3610{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
3611{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
3612{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
3613{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
3614{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
3615{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
3616{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
3617{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
3618{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
3619{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
3620{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
3621{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
3622{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
3623{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
3624{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
3625{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
3626{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
3627{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
3628{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
3629{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
3630{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
3631{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
3632{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
3633{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
3634{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
3635{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
3636{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
3637{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
3638{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
3639{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
3640{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
3641{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
3642{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
3643{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
3644{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
3645{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
3646{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
3647{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
3648{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
3649{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
3650{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
3651{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
3652{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
3653{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
3654{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
3655{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
3656{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
3657{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
3658{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
3659{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
3660{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
3661{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
3662{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
3663{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
3664{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
3665{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
3666{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
3667{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
3668{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
3669{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
3670{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
3671{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
3672{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
3673{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
3674{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
3675{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
3676{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
3677{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
3678{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
3679{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
3680{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
3681{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
3682{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
3683{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
3684{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
3685{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
3686{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
3687{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
3688{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
3689{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
3690{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
3691{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
3692{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
3693{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
3694{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
3695{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
3696{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
3697{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
3698{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
3699{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
3700{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
3701{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
3702{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
3703{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
3704{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
3705{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
3706{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
3707{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
3708{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
3709{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
3710{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
3711{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
3712{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
3713{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
3714{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
3715{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
3716{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
3717{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
3718{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
3719{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
3720{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
3721{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
3722{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
3723{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
3724{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
3725{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
3726{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
3727{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
3728{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
3729{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
3730{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
3731{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
3732{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
3733{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
3734{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
3735{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
3736{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
3737{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
3738{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
3739{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
3740{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
3741{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
3742{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
3743{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
3744{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
3745{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
3746{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
3747{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
3748{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
3749{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
3750{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
3751{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
3752{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
3753{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3754{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3755{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3756{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3757{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3758{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3759{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3760{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3761{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3762{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3763{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3764{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3765{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3766{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3767{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3768{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3769{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3770{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3771{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3772{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3773{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3774{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3775{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3776{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3777{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3778{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3779{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3780{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3781{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3782{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3783{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3784{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3785{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3786{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3787{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3788{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e},
3789{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3790{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3791{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3792{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3793{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3794{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3795{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3796{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3797{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3798{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3799{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3800{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3801{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3802{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3803{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3804{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3805{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3806{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3807{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3808{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3809{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3810{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3811{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3812{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3813{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3814{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3815{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3816{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3817{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3818{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3819{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3820{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3821{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3822{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3823{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3824{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3825{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3826{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3827{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3828{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3829{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3830{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3831{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3832{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3833{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3834{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3835{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3836{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3837{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3838{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3839{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3840{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3841{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3842{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3843{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3844{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3845{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3846{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3847{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3848{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3849{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3850{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3851{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3852{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3853{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3854{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3855{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3856{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3857{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3858{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3859{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3860{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3861{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3862{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3863{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3864{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3865{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3866{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3867{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3868{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3869{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3870{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3871{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3872{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3873{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3874{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3875{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3876{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3877{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3878{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3879{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3880{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3881{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3882{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3883{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3884{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3885{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3886{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3887{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3888{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3889{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3890{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3891{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3892{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3893{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3894{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3895{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3896{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3897{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3898{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3899{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3900{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3901{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3902{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3903{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3904{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3905{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3906{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3907{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3908{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3909{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3910{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3911{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3912{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3913{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3914{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3915{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3916{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3917{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3918{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3919{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3920{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3921{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3922{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3923{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3924{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3925{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3926{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3927{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3928{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3929{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3930{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3931{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3932{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3933{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3934{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3935{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3936{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3937{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3938{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3939{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3940{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3941{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3942{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3943{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3944{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3945{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3946{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3947{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3948{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3949{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3950{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3951{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3952{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3953{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3954{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3955{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3956{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3957{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3958{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3959{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3960{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3961{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3962{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3963{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3964{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3965{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3966{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3967{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3968{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3969{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3970{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3971{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3972{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3973{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3974{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3975{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3976{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3977{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3978{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3979{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3980{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3981{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3982{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3983{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3984{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3985{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3986{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3987{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3988{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3989{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3990{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3991{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3992{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3993{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3994{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3995{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3996{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3997{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3998{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3999{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
4000{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
4001{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
4002{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
4003{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
4004{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
4005{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
4006{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
4007{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
4008{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
4009{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
4010{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
4011{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
4012{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
4013{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
4014{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
4015{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
4016{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
4017{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
4018{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
4019{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
4020{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
4021{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
4022{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
4023{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
4024{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
4025{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
4026{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
4027{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
4028{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
4029{0x8aca, 0x0039}, { 0x0, 0x0 }
4030};
4031#endif
4032
4033
4034/* phy types */ 2517/* phy types */
4035#define CAS_PHY_UNKNOWN 0x00 2518#define CAS_PHY_UNKNOWN 0x00
4036#define CAS_PHY_SERDES 0x01 2519#define CAS_PHY_SERDES 0x01
@@ -4389,6 +2872,11 @@ struct cas {
4389 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; 2872 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
4390 struct pci_dev *pdev; 2873 struct pci_dev *pdev;
4391 struct net_device *dev; 2874 struct net_device *dev;
2875
2876 /* Firmware Info */
2877 u16 fw_load_addr;
2878 u32 fw_size;
2879 u8 *fw_data;
4392}; 2880};
4393 2881
4394#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) 2882#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ea6144a9565e..b0b66766ed27 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1397,9 +1397,7 @@ net_open(struct net_device *dev)
1397release_dma: 1397release_dma:
1398#if ALLOW_DMA 1398#if ALLOW_DMA
1399 free_dma(dev->dma); 1399 free_dma(dev->dma);
1400#endif
1401release_irq: 1400release_irq:
1402#if ALLOW_DMA
1403 release_dma_buff(lp); 1401 release_dma_buff(lp);
1404#endif 1402#endif
1405 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); 1403 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 271140433b09..4f5cc6987ec1 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -124,8 +124,7 @@ struct sge_rspq { /* state for an SGE response queue */
124 dma_addr_t phys_addr; /* physical address of the ring */ 124 dma_addr_t phys_addr; /* physical address of the ring */
125 unsigned int cntxt_id; /* SGE context id for the response q */ 125 unsigned int cntxt_id; /* SGE context id for the response q */
126 spinlock_t lock; /* guards response processing */ 126 spinlock_t lock; /* guards response processing */
127 struct sk_buff *rx_head; /* offload packet receive queue head */ 127 struct sk_buff_head rx_queue; /* offload packet receive queue */
128 struct sk_buff *rx_tail; /* offload packet receive queue tail */
129 struct sk_buff *pg_skb; /* used to build frag list in napi handler */ 128 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
130 129
131 unsigned long offload_pkts; 130 unsigned long offload_pkts;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c5b3de1bb456..0f6fd63b2847 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1018,7 +1018,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1018 1018
1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1020 if (!skb) { 1020 if (!skb) {
1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__); 1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1022 return; 1022 return;
1023 } 1023 }
1024 skb->priority = CPL_PRIORITY_CONTROL; 1024 skb->priority = CPL_PRIORITY_CONTROL;
@@ -1049,14 +1049,14 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1049 return; 1049 return;
1050 if (!is_offloading(newdev)) { 1050 if (!is_offloading(newdev)) {
1051 printk(KERN_WARNING "%s: Redirect to non-offload " 1051 printk(KERN_WARNING "%s: Redirect to non-offload "
1052 "device ignored.\n", __FUNCTION__); 1052 "device ignored.\n", __func__);
1053 return; 1053 return;
1054 } 1054 }
1055 tdev = dev2t3cdev(olddev); 1055 tdev = dev2t3cdev(olddev);
1056 BUG_ON(!tdev); 1056 BUG_ON(!tdev);
1057 if (tdev != dev2t3cdev(newdev)) { 1057 if (tdev != dev2t3cdev(newdev)) {
1058 printk(KERN_WARNING "%s: Redirect to different " 1058 printk(KERN_WARNING "%s: Redirect to different "
1059 "offload device ignored.\n", __FUNCTION__); 1059 "offload device ignored.\n", __func__);
1060 return; 1060 return;
1061 } 1061 }
1062 1062
@@ -1064,7 +1064,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1064 e = t3_l2t_get(tdev, new->neighbour, newdev); 1064 e = t3_l2t_get(tdev, new->neighbour, newdev);
1065 if (!e) { 1065 if (!e) {
1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1067 __FUNCTION__); 1067 __func__);
1068 return; 1068 return;
1069 } 1069 }
1070 1070
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 825e510bd9ed..b2c5314582aa 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -86,6 +86,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
86 struct l2t_entry *e) 86 struct l2t_entry *e)
87{ 87{
88 struct cpl_l2t_write_req *req; 88 struct cpl_l2t_write_req *req;
89 struct sk_buff *tmp;
89 90
90 if (!skb) { 91 if (!skb) {
91 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
@@ -103,13 +104,11 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
103 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
104 skb->priority = CPL_PRIORITY_CONTROL; 105 skb->priority = CPL_PRIORITY_CONTROL;
105 cxgb3_ofld_send(dev, skb); 106 cxgb3_ofld_send(dev, skb);
106 while (e->arpq_head) { 107
107 skb = e->arpq_head; 108 skb_queue_walk_safe(&e->arpq, skb, tmp) {
108 e->arpq_head = skb->next; 109 __skb_unlink(skb, &e->arpq);
109 skb->next = NULL;
110 cxgb3_ofld_send(dev, skb); 110 cxgb3_ofld_send(dev, skb);
111 } 111 }
112 e->arpq_tail = NULL;
113 e->state = L2T_STATE_VALID; 112 e->state = L2T_STATE_VALID;
114 113
115 return 0; 114 return 0;
@@ -121,12 +120,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
121 */ 120 */
122static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 121static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123{ 122{
124 skb->next = NULL; 123 __skb_queue_tail(&e->arpq, skb);
125 if (e->arpq_head)
126 e->arpq_tail->next = skb;
127 else
128 e->arpq_head = skb;
129 e->arpq_tail = skb;
130} 124}
131 125
132int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 126int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
@@ -167,7 +161,7 @@ again:
167 break; 161 break;
168 162
169 spin_lock_bh(&e->lock); 163 spin_lock_bh(&e->lock);
170 if (e->arpq_head) 164 if (!skb_queue_empty(&e->arpq))
171 setup_l2e_send_pending(dev, skb, e); 165 setup_l2e_send_pending(dev, skb, e);
172 else /* we lost the race */ 166 else /* we lost the race */
173 __kfree_skb(skb); 167 __kfree_skb(skb);
@@ -357,14 +351,14 @@ EXPORT_SYMBOL(t3_l2t_get);
357 * XXX: maybe we should abandon the latter behavior and just require a failure 351 * XXX: maybe we should abandon the latter behavior and just require a failure
358 * handler. 352 * handler.
359 */ 353 */
360static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq) 354static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
361{ 355{
362 while (arpq) { 356 struct sk_buff *skb, *tmp;
363 struct sk_buff *skb = arpq; 357
358 skb_queue_walk_safe(arpq, skb, tmp) {
364 struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 359 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
365 360
366 arpq = skb->next; 361 __skb_unlink(skb, arpq);
367 skb->next = NULL;
368 if (cb->arp_failure_handler) 362 if (cb->arp_failure_handler)
369 cb->arp_failure_handler(dev, skb); 363 cb->arp_failure_handler(dev, skb);
370 else 364 else
@@ -378,8 +372,8 @@ static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
378 */ 372 */
379void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) 373void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
380{ 374{
375 struct sk_buff_head arpq;
381 struct l2t_entry *e; 376 struct l2t_entry *e;
382 struct sk_buff *arpq = NULL;
383 struct l2t_data *d = L2DATA(dev); 377 struct l2t_data *d = L2DATA(dev);
384 u32 addr = *(u32 *) neigh->primary_key; 378 u32 addr = *(u32 *) neigh->primary_key;
385 int ifidx = neigh->dev->ifindex; 379 int ifidx = neigh->dev->ifindex;
@@ -395,6 +389,8 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
395 return; 389 return;
396 390
397found: 391found:
392 __skb_queue_head_init(&arpq);
393
398 read_unlock(&d->lock); 394 read_unlock(&d->lock);
399 if (atomic_read(&e->refcnt)) { 395 if (atomic_read(&e->refcnt)) {
400 if (neigh != e->neigh) 396 if (neigh != e->neigh)
@@ -402,8 +398,7 @@ found:
402 398
403 if (e->state == L2T_STATE_RESOLVING) { 399 if (e->state == L2T_STATE_RESOLVING) {
404 if (neigh->nud_state & NUD_FAILED) { 400 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 401 skb_queue_splice_init(&e->arpq, &arpq);
406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 402 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 403 setup_l2e_send_pending(dev, NULL, e);
409 } else { 404 } else {
@@ -415,8 +410,8 @@ found:
415 } 410 }
416 spin_unlock_bh(&e->lock); 411 spin_unlock_bh(&e->lock);
417 412
418 if (arpq) 413 if (!skb_queue_empty(&arpq))
419 handle_failed_resolution(dev, arpq); 414 handle_failed_resolution(dev, &arpq);
420} 415}
421 416
422struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) 417struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index d79001336cfd..42ce65f76a87 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -64,8 +64,7 @@ struct l2t_entry {
64 struct neighbour *neigh; /* associated neighbour */ 64 struct neighbour *neigh; /* associated neighbour */
65 struct l2t_entry *first; /* start of hash chain */ 65 struct l2t_entry *first; /* start of hash chain */
66 struct l2t_entry *next; /* next l2t_entry on chain */ 66 struct l2t_entry *next; /* next l2t_entry on chain */
67 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ 67 struct sk_buff_head arpq; /* queue of packets awaiting resolution */
68 struct sk_buff *arpq_tail;
69 spinlock_t lock; 68 spinlock_t lock;
70 atomic_t refcnt; /* entry reference count */ 69 atomic_t refcnt; /* entry reference count */
71 u8 dmac[6]; /* neighbour's MAC address */ 70 u8 dmac[6]; /* neighbour's MAC address */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab7..89efd04be4e0 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1704,16 +1704,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1704 */ 1704 */
1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1706{ 1706{
1707 skb->next = skb->prev = NULL; 1707 int was_empty = skb_queue_empty(&q->rx_queue);
1708 if (q->rx_tail) 1708
1709 q->rx_tail->next = skb; 1709 __skb_queue_tail(&q->rx_queue, skb);
1710 else { 1710
1711 if (was_empty) {
1711 struct sge_qset *qs = rspq_to_qset(q); 1712 struct sge_qset *qs = rspq_to_qset(q);
1712 1713
1713 napi_schedule(&qs->napi); 1714 napi_schedule(&qs->napi);
1714 q->rx_head = skb;
1715 } 1715 }
1716 q->rx_tail = skb;
1717} 1716}
1718 1717
1719/** 1718/**
@@ -1754,26 +1753,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1754 int work_done = 0; 1753 int work_done = 0;
1755 1754
1756 while (work_done < budget) { 1755 while (work_done < budget) {
1757 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; 1756 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1757 struct sk_buff_head queue;
1758 int ngathered; 1758 int ngathered;
1759 1759
1760 spin_lock_irq(&q->lock); 1760 spin_lock_irq(&q->lock);
1761 head = q->rx_head; 1761 __skb_queue_head_init(&queue);
1762 if (!head) { 1762 skb_queue_splice_init(&q->rx_queue, &queue);
1763 if (skb_queue_empty(&queue)) {
1763 napi_complete(napi); 1764 napi_complete(napi);
1764 spin_unlock_irq(&q->lock); 1765 spin_unlock_irq(&q->lock);
1765 return work_done; 1766 return work_done;
1766 } 1767 }
1767
1768 tail = q->rx_tail;
1769 q->rx_head = q->rx_tail = NULL;
1770 spin_unlock_irq(&q->lock); 1768 spin_unlock_irq(&q->lock);
1771 1769
1772 for (ngathered = 0; work_done < budget && head; work_done++) { 1770 ngathered = 0;
1773 prefetch(head->data); 1771 skb_queue_walk_safe(&queue, skb, tmp) {
1774 skbs[ngathered] = head; 1772 if (work_done >= budget)
1775 head = head->next; 1773 break;
1776 skbs[ngathered]->next = NULL; 1774 work_done++;
1775
1776 __skb_unlink(skb, &queue);
1777 prefetch(skb->data);
1778 skbs[ngathered] = skb;
1777 if (++ngathered == RX_BUNDLE_SIZE) { 1779 if (++ngathered == RX_BUNDLE_SIZE) {
1778 q->offload_bundles++; 1780 q->offload_bundles++;
1779 adapter->tdev.recv(&adapter->tdev, skbs, 1781 adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1783,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1781 ngathered = 0; 1783 ngathered = 0;
1782 } 1784 }
1783 } 1785 }
1784 if (head) { /* splice remaining packets back onto Rx queue */ 1786 if (!skb_queue_empty(&queue)) {
1787 /* splice remaining packets back onto Rx queue */
1785 spin_lock_irq(&q->lock); 1788 spin_lock_irq(&q->lock);
1786 tail->next = q->rx_head; 1789 skb_queue_splice(&queue, &q->rx_queue);
1787 if (!q->rx_head)
1788 q->rx_tail = tail;
1789 q->rx_head = head;
1790 spin_unlock_irq(&q->lock); 1790 spin_unlock_irq(&q->lock);
1791 } 1791 }
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -1937,38 +1937,6 @@ static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); 1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938} 1938}
1939 1939
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph, 1940static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv) 1941 u64 *hdr_flags, void *priv)
1974{ 1942{
@@ -1981,9 +1949,6 @@ static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); 1949 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); 1950 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983 1951
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP; 1952 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0; 1953 return 0;
1989} 1954}
@@ -2934,6 +2899,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2934 q->rspq.gen = 1; 2899 q->rspq.gen = 1;
2935 q->rspq.size = p->rspq_size; 2900 q->rspq.size = p->rspq_size;
2936 spin_lock_init(&q->rspq.lock); 2901 spin_lock_init(&q->rspq.lock);
2902 skb_queue_head_init(&q->rspq.rx_queue);
2937 2903
2938 q->txq[TXQ_ETH].stop_thres = nports * 2904 q->txq[TXQ_ETH].stop_thres = nports *
2939 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2905 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 453115acaad2..7d7dfa512bfa 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -191,7 +191,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
191#define DPRINTK(nlevel, klevel, fmt, args...) \ 191#define DPRINTK(nlevel, klevel, fmt, args...) \
192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
194 __FUNCTION__ , ## args)) 194 __func__ , ## args))
195 195
196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 462351ca2c81..b2c910c52df9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -38,6 +38,7 @@
38 * 82573V Gigabit Ethernet Controller (Copper) 38 * 82573V Gigabit Ethernet Controller (Copper)
39 * 82573E Gigabit Ethernet Controller (Copper) 39 * 82573E Gigabit Ethernet Controller (Copper)
40 * 82573L Gigabit Ethernet Controller 40 * 82573L Gigabit Ethernet Controller
41 * 82574L Gigabit Network Connection
41 */ 42 */
42 43
43#include <linux/netdevice.h> 44#include <linux/netdevice.h>
@@ -54,6 +55,8 @@
54 55
55#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 56#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
56 57
58#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
59
57static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); 60static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
58static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); 61static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
59static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); 62static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -63,6 +66,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
63static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); 66static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
64static s32 e1000_setup_link_82571(struct e1000_hw *hw); 67static s32 e1000_setup_link_82571(struct e1000_hw *hw);
65static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); 68static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
69static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
70static s32 e1000_led_on_82574(struct e1000_hw *hw);
66 71
67/** 72/**
68 * e1000_init_phy_params_82571 - Init PHY func ptrs. 73 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -92,6 +97,9 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
92 case e1000_82573: 97 case e1000_82573:
93 phy->type = e1000_phy_m88; 98 phy->type = e1000_phy_m88;
94 break; 99 break;
100 case e1000_82574:
101 phy->type = e1000_phy_bm;
102 break;
95 default: 103 default:
96 return -E1000_ERR_PHY; 104 return -E1000_ERR_PHY;
97 break; 105 break;
@@ -111,6 +119,10 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
111 if (phy->id != M88E1111_I_PHY_ID) 119 if (phy->id != M88E1111_I_PHY_ID)
112 return -E1000_ERR_PHY; 120 return -E1000_ERR_PHY;
113 break; 121 break;
122 case e1000_82574:
123 if (phy->id != BME1000_E_PHY_ID_R2)
124 return -E1000_ERR_PHY;
125 break;
114 default: 126 default:
115 return -E1000_ERR_PHY; 127 return -E1000_ERR_PHY;
116 break; 128 break;
@@ -150,6 +162,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
150 162
151 switch (hw->mac.type) { 163 switch (hw->mac.type) {
152 case e1000_82573: 164 case e1000_82573:
165 case e1000_82574:
153 if (((eecd >> 15) & 0x3) == 0x3) { 166 if (((eecd >> 15) & 0x3) == 0x3) {
154 nvm->type = e1000_nvm_flash_hw; 167 nvm->type = e1000_nvm_flash_hw;
155 nvm->word_size = 2048; 168 nvm->word_size = 2048;
@@ -245,6 +258,17 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
245 break; 258 break;
246 } 259 }
247 260
261 switch (hw->mac.type) {
262 case e1000_82574:
263 func->check_mng_mode = e1000_check_mng_mode_82574;
264 func->led_on = e1000_led_on_82574;
265 break;
266 default:
267 func->check_mng_mode = e1000e_check_mng_mode_generic;
268 func->led_on = e1000e_led_on_generic;
269 break;
270 }
271
248 return 0; 272 return 0;
249} 273}
250 274
@@ -330,6 +354,8 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
330static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) 354static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
331{ 355{
332 struct e1000_phy_info *phy = &hw->phy; 356 struct e1000_phy_info *phy = &hw->phy;
357 s32 ret_val;
358 u16 phy_id = 0;
333 359
334 switch (hw->mac.type) { 360 switch (hw->mac.type) {
335 case e1000_82571: 361 case e1000_82571:
@@ -345,6 +371,20 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
345 case e1000_82573: 371 case e1000_82573:
346 return e1000e_get_phy_id(hw); 372 return e1000e_get_phy_id(hw);
347 break; 373 break;
374 case e1000_82574:
375 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
376 if (ret_val)
377 return ret_val;
378
379 phy->id = (u32)(phy_id << 16);
380 udelay(20);
381 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
382 if (ret_val)
383 return ret_val;
384
385 phy->id |= (u32)(phy_id);
386 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
387 break;
348 default: 388 default:
349 return -E1000_ERR_PHY; 389 return -E1000_ERR_PHY;
350 break; 390 break;
@@ -421,7 +461,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
421 if (ret_val) 461 if (ret_val)
422 return ret_val; 462 return ret_val;
423 463
424 if (hw->mac.type != e1000_82573) 464 if (hw->mac.type != e1000_82573 && hw->mac.type != e1000_82574)
425 ret_val = e1000e_acquire_nvm(hw); 465 ret_val = e1000e_acquire_nvm(hw);
426 466
427 if (ret_val) 467 if (ret_val)
@@ -461,6 +501,7 @@ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
461 501
462 switch (hw->mac.type) { 502 switch (hw->mac.type) {
463 case e1000_82573: 503 case e1000_82573:
504 case e1000_82574:
464 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); 505 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
465 break; 506 break;
466 case e1000_82571: 507 case e1000_82571:
@@ -735,7 +776,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
735 * Must acquire the MDIO ownership before MAC reset. 776 * Must acquire the MDIO ownership before MAC reset.
736 * Ownership defaults to firmware after a reset. 777 * Ownership defaults to firmware after a reset.
737 */ 778 */
738 if (hw->mac.type == e1000_82573) { 779 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
739 extcnf_ctrl = er32(EXTCNF_CTRL); 780 extcnf_ctrl = er32(EXTCNF_CTRL);
740 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 781 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
741 782
@@ -776,7 +817,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
776 * Need to wait for Phy configuration completion before accessing 817 * Need to wait for Phy configuration completion before accessing
777 * NVM and Phy. 818 * NVM and Phy.
778 */ 819 */
779 if (hw->mac.type == e1000_82573) 820 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574)
780 msleep(25); 821 msleep(25);
781 822
782 /* Clear any pending interrupt events. */ 823 /* Clear any pending interrupt events. */
@@ -843,7 +884,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
843 ew32(TXDCTL(0), reg_data); 884 ew32(TXDCTL(0), reg_data);
844 885
845 /* ...for both queues. */ 886 /* ...for both queues. */
846 if (mac->type != e1000_82573) { 887 if (mac->type != e1000_82573 && mac->type != e1000_82574) {
847 reg_data = er32(TXDCTL(1)); 888 reg_data = er32(TXDCTL(1));
848 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 889 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
849 E1000_TXDCTL_FULL_TX_DESC_WB | 890 E1000_TXDCTL_FULL_TX_DESC_WB |
@@ -918,19 +959,28 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
918 } 959 }
919 960
920 /* Device Control */ 961 /* Device Control */
921 if (hw->mac.type == e1000_82573) { 962 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
922 reg = er32(CTRL); 963 reg = er32(CTRL);
923 reg &= ~(1 << 29); 964 reg &= ~(1 << 29);
924 ew32(CTRL, reg); 965 ew32(CTRL, reg);
925 } 966 }
926 967
927 /* Extended Device Control */ 968 /* Extended Device Control */
928 if (hw->mac.type == e1000_82573) { 969 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
929 reg = er32(CTRL_EXT); 970 reg = er32(CTRL_EXT);
930 reg &= ~(1 << 23); 971 reg &= ~(1 << 23);
931 reg |= (1 << 22); 972 reg |= (1 << 22);
932 ew32(CTRL_EXT, reg); 973 ew32(CTRL_EXT, reg);
933 } 974 }
975
976 /* PCI-Ex Control Register */
977 if (hw->mac.type == e1000_82574) {
978 reg = er32(GCR);
979 reg |= (1 << 22);
980 ew32(GCR, reg);
981 }
982
983 return;
934} 984}
935 985
936/** 986/**
@@ -947,7 +997,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
947 u32 vfta_offset = 0; 997 u32 vfta_offset = 0;
948 u32 vfta_bit_in_reg = 0; 998 u32 vfta_bit_in_reg = 0;
949 999
950 if (hw->mac.type == e1000_82573) { 1000 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
951 if (hw->mng_cookie.vlan_id != 0) { 1001 if (hw->mng_cookie.vlan_id != 0) {
952 /* 1002 /*
953 * The VFTA is a 4096b bit-field, each identifying 1003 * The VFTA is a 4096b bit-field, each identifying
@@ -976,6 +1026,48 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
976} 1026}
977 1027
978/** 1028/**
1029 * e1000_check_mng_mode_82574 - Check manageability is enabled
1030 * @hw: pointer to the HW structure
1031 *
1032 * Reads the NVM Initialization Control Word 2 and returns true
1033 * (>0) if any manageability is enabled, else false (0).
1034 **/
1035static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
1036{
1037 u16 data;
1038
1039 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
1040 return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
1041}
1042
1043/**
1044 * e1000_led_on_82574 - Turn LED on
1045 * @hw: pointer to the HW structure
1046 *
1047 * Turn LED on.
1048 **/
1049static s32 e1000_led_on_82574(struct e1000_hw *hw)
1050{
1051 u32 ctrl;
1052 u32 i;
1053
1054 ctrl = hw->mac.ledctl_mode2;
1055 if (!(E1000_STATUS_LU & er32(STATUS))) {
1056 /*
1057 * If no link, then turn LED on by setting the invert bit
1058 * for each LED that's "on" (0x0E) in ledctl_mode2.
1059 */
1060 for (i = 0; i < 4; i++)
1061 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1062 E1000_LEDCTL_MODE_LED_ON)
1063 ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
1064 }
1065 ew32(LEDCTL, ctrl);
1066
1067 return 0;
1068}
1069
1070/**
979 * e1000_update_mc_addr_list_82571 - Update Multicast addresses 1071 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
980 * @hw: pointer to the HW structure 1072 * @hw: pointer to the HW structure
981 * @mc_addr_list: array of multicast addresses to program 1073 * @mc_addr_list: array of multicast addresses to program
@@ -1018,7 +1110,8 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1018 * the default flow control setting, so we explicitly 1110 * the default flow control setting, so we explicitly
1019 * set it to full. 1111 * set it to full.
1020 */ 1112 */
1021 if (hw->mac.type == e1000_82573) 1113 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
1114 hw->fc.type == e1000_fc_default)
1022 hw->fc.type = e1000_fc_full; 1115 hw->fc.type = e1000_fc_full;
1023 1116
1024 return e1000e_setup_link(hw); 1117 return e1000e_setup_link(hw);
@@ -1045,6 +1138,7 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
1045 1138
1046 switch (hw->phy.type) { 1139 switch (hw->phy.type) {
1047 case e1000_phy_m88: 1140 case e1000_phy_m88:
1141 case e1000_phy_bm:
1048 ret_val = e1000e_copper_link_setup_m88(hw); 1142 ret_val = e1000e_copper_link_setup_m88(hw);
1049 break; 1143 break;
1050 case e1000_phy_igp_2: 1144 case e1000_phy_igp_2:
@@ -1114,11 +1208,10 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1114 return ret_val; 1208 return ret_val;
1115 } 1209 }
1116 1210
1117 if (hw->mac.type == e1000_82573 && 1211 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
1118 *data == ID_LED_RESERVED_F746) 1212 *data == ID_LED_RESERVED_F746)
1119 *data = ID_LED_DEFAULT_82573; 1213 *data = ID_LED_DEFAULT_82573;
1120 else if (*data == ID_LED_RESERVED_0000 || 1214 else if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1121 *data == ID_LED_RESERVED_FFFF)
1122 *data = ID_LED_DEFAULT; 1215 *data = ID_LED_DEFAULT;
1123 1216
1124 return 0; 1217 return 0;
@@ -1265,13 +1358,13 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1265} 1358}
1266 1359
1267static struct e1000_mac_operations e82571_mac_ops = { 1360static struct e1000_mac_operations e82571_mac_ops = {
1268 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 1361 /* .check_mng_mode: mac type dependent */
1269 /* .check_for_link: media type dependent */ 1362 /* .check_for_link: media type dependent */
1270 .cleanup_led = e1000e_cleanup_led_generic, 1363 .cleanup_led = e1000e_cleanup_led_generic,
1271 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1364 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1272 .get_bus_info = e1000e_get_bus_info_pcie, 1365 .get_bus_info = e1000e_get_bus_info_pcie,
1273 /* .get_link_up_info: media type dependent */ 1366 /* .get_link_up_info: media type dependent */
1274 .led_on = e1000e_led_on_generic, 1367 /* .led_on: mac type dependent */
1275 .led_off = e1000e_led_off_generic, 1368 .led_off = e1000e_led_off_generic,
1276 .update_mc_addr_list = e1000_update_mc_addr_list_82571, 1369 .update_mc_addr_list = e1000_update_mc_addr_list_82571,
1277 .reset_hw = e1000_reset_hw_82571, 1370 .reset_hw = e1000_reset_hw_82571,
@@ -1312,6 +1405,22 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
1312 .write_phy_reg = e1000e_write_phy_reg_m88, 1405 .write_phy_reg = e1000e_write_phy_reg_m88,
1313}; 1406};
1314 1407
1408static struct e1000_phy_operations e82_phy_ops_bm = {
1409 .acquire_phy = e1000_get_hw_semaphore_82571,
1410 .check_reset_block = e1000e_check_reset_block_generic,
1411 .commit_phy = e1000e_phy_sw_reset,
1412 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1413 .get_cfg_done = e1000e_get_cfg_done,
1414 .get_cable_length = e1000e_get_cable_length_m88,
1415 .get_phy_info = e1000e_get_phy_info_m88,
1416 .read_phy_reg = e1000e_read_phy_reg_bm2,
1417 .release_phy = e1000_put_hw_semaphore_82571,
1418 .reset_phy = e1000e_phy_hw_reset_generic,
1419 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1420 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1421 .write_phy_reg = e1000e_write_phy_reg_bm2,
1422};
1423
1315static struct e1000_nvm_operations e82571_nvm_ops = { 1424static struct e1000_nvm_operations e82571_nvm_ops = {
1316 .acquire_nvm = e1000_acquire_nvm_82571, 1425 .acquire_nvm = e1000_acquire_nvm_82571,
1317 .read_nvm = e1000e_read_nvm_eerd, 1426 .read_nvm = e1000e_read_nvm_eerd,
@@ -1375,3 +1484,21 @@ struct e1000_info e1000_82573_info = {
1375 .nvm_ops = &e82571_nvm_ops, 1484 .nvm_ops = &e82571_nvm_ops,
1376}; 1485};
1377 1486
1487struct e1000_info e1000_82574_info = {
1488 .mac = e1000_82574,
1489 .flags = FLAG_HAS_HW_VLAN_FILTER
1490 | FLAG_HAS_MSIX
1491 | FLAG_HAS_JUMBO_FRAMES
1492 | FLAG_HAS_WOL
1493 | FLAG_APME_IN_CTRL3
1494 | FLAG_RX_CSUM_ENABLED
1495 | FLAG_HAS_SMART_POWER_DOWN
1496 | FLAG_HAS_AMT
1497 | FLAG_HAS_CTRLEXT_ON_LOAD,
1498 .pba = 20,
1499 .get_variants = e1000_get_variants_82571,
1500 .mac_ops = &e82571_mac_ops,
1501 .phy_ops = &e82_phy_ops_bm,
1502 .nvm_ops = &e82571_nvm_ops,
1503};
1504
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 14b0e6cd3b8d..48f79ecb82a0 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -71,9 +71,11 @@
71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
74#define E1000_CTRL_EXT_EIAME 0x01000000
74#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 75#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
75#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 76#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
76#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 77#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
78#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
77 79
78/* Receive Descriptor bit definitions */ 80/* Receive Descriptor bit definitions */
79#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 81#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -299,6 +301,7 @@
299#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 301#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
300 302
301/* Header split receive */ 303/* Header split receive */
304#define E1000_RFCTL_ACK_DIS 0x00001000
302#define E1000_RFCTL_EXTEN 0x00008000 305#define E1000_RFCTL_EXTEN 0x00008000
303#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 306#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
304#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 307#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
@@ -363,6 +366,11 @@
363#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 366#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
364#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 367#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
365#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 368#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
369#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
370#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
371#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
372#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
373#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
366 374
367/* 375/*
368 * This defines the bits that are set in the Interrupt Mask 376 * This defines the bits that are set in the Interrupt Mask
@@ -386,6 +394,11 @@
386#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 394#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
387#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 395#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
388#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 396#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
397#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
398#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
399#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
400#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
401#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
389 402
390/* Interrupt Cause Set */ 403/* Interrupt Cause Set */
391#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 404#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
@@ -505,6 +518,7 @@
505#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 518#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
506 519
507/* Autoneg Expansion Register */ 520/* Autoneg Expansion Register */
521#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
508 522
509/* 1000BASE-T Control Register */ 523/* 1000BASE-T Control Register */
510#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 524#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
@@ -540,6 +554,7 @@
540#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ 554#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
541#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ 555#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
542#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ 556#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
557#define E1000_EECD_PRES 0x00000100 /* NVM Present */
543#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ 558#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
544/* NVM Addressing bits based on type (0-small, 1-large) */ 559/* NVM Addressing bits based on type (0-small, 1-large) */
545#define E1000_EECD_ADDR_BITS 0x00000400 560#define E1000_EECD_ADDR_BITS 0x00000400
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ac4e506b4f88..0a1916b0419d 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,6 +62,11 @@ struct e1000_info;
62 e_printk(KERN_NOTICE, adapter, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
63 63
64 64
65/* Interrupt modes, as used by the IntMode paramter */
66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1
68#define E1000E_INT_MODE_MSIX 2
69
65/* Tx/Rx descriptor defines */ 70/* Tx/Rx descriptor defines */
66#define E1000_DEFAULT_TXD 256 71#define E1000_DEFAULT_TXD 256
67#define E1000_MAX_TXD 4096 72#define E1000_MAX_TXD 4096
@@ -95,9 +100,11 @@ enum e1000_boards {
95 board_82571, 100 board_82571,
96 board_82572, 101 board_82572,
97 board_82573, 102 board_82573,
103 board_82574,
98 board_80003es2lan, 104 board_80003es2lan,
99 board_ich8lan, 105 board_ich8lan,
100 board_ich9lan, 106 board_ich9lan,
107 board_ich10lan,
101}; 108};
102 109
103struct e1000_queue_stats { 110struct e1000_queue_stats {
@@ -146,6 +153,12 @@ struct e1000_ring {
146 /* array of buffer information structs */ 153 /* array of buffer information structs */
147 struct e1000_buffer *buffer_info; 154 struct e1000_buffer *buffer_info;
148 155
156 char name[IFNAMSIZ + 5];
157 u32 ims_val;
158 u32 itr_val;
159 u16 itr_register;
160 int set_itr;
161
149 struct sk_buff *rx_skb_top; 162 struct sk_buff *rx_skb_top;
150 163
151 struct e1000_queue_stats stats; 164 struct e1000_queue_stats stats;
@@ -274,6 +287,9 @@ struct e1000_adapter {
274 u32 test_icr; 287 u32 test_icr;
275 288
276 u32 msg_enable; 289 u32 msg_enable;
290 struct msix_entry *msix_entries;
291 int int_mode;
292 u32 eiac_mask;
277 293
278 u32 eeprom_wol; 294 u32 eeprom_wol;
279 u32 wol; 295 u32 wol;
@@ -306,6 +322,7 @@ struct e1000_info {
306#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 322#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
307#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 323#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
308#define FLAG_IS_ICH (1 << 9) 324#define FLAG_IS_ICH (1 << 9)
325#define FLAG_HAS_MSIX (1 << 10)
309#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 326#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
310#define FLAG_IS_QUAD_PORT_A (1 << 12) 327#define FLAG_IS_QUAD_PORT_A (1 << 12)
311#define FLAG_IS_QUAD_PORT (1 << 13) 328#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -364,6 +381,8 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
364extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 381extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
365extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 382extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
366extern void e1000e_update_stats(struct e1000_adapter *adapter); 383extern void e1000e_update_stats(struct e1000_adapter *adapter);
384extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
385extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
367 386
368extern unsigned int copybreak; 387extern unsigned int copybreak;
369 388
@@ -372,8 +391,10 @@ extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
372extern struct e1000_info e1000_82571_info; 391extern struct e1000_info e1000_82571_info;
373extern struct e1000_info e1000_82572_info; 392extern struct e1000_info e1000_82572_info;
374extern struct e1000_info e1000_82573_info; 393extern struct e1000_info e1000_82573_info;
394extern struct e1000_info e1000_82574_info;
375extern struct e1000_info e1000_ich8_info; 395extern struct e1000_info e1000_ich8_info;
376extern struct e1000_info e1000_ich9_info; 396extern struct e1000_info e1000_ich9_info;
397extern struct e1000_info e1000_ich10_info;
377extern struct e1000_info e1000_es2_info; 398extern struct e1000_info e1000_es2_info;
378 399
379extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 400extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -446,10 +467,13 @@ extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
446extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); 467extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
447extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 468extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
448extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 469extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
470extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
449extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 471extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
450extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); 472extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
451extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); 473extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
452extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); 474extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
475extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
476extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
453extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 477extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
454extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 478extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
455extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 479extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
@@ -520,7 +544,12 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
520 return hw->phy.ops.get_phy_info(hw); 544 return hw->phy.ops.get_phy_info(hw);
521} 545}
522 546
523extern bool e1000e_check_mng_mode(struct e1000_hw *hw); 547static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
548{
549 return hw->mac.ops.check_mng_mode(hw);
550}
551
552extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
524extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); 553extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
525extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); 554extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
526 555
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index dc552d7d6fac..da9c09c248ed 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1247,7 +1247,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1247} 1247}
1248 1248
1249static struct e1000_mac_operations es2_mac_ops = { 1249static struct e1000_mac_operations es2_mac_ops = {
1250 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 1250 .check_mng_mode = e1000e_check_mng_mode_generic,
1251 /* check_for_link dependent on media type */ 1251 /* check_for_link dependent on media type */
1252 .cleanup_led = e1000e_cleanup_led_generic, 1252 .cleanup_led = e1000e_cleanup_led_generic,
1253 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, 1253 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e21c9e0f3738..52b762eb1745 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -568,6 +568,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
568 * and flush shadow RAM for 82573 controllers 568 * and flush shadow RAM for 82573 controllers
569 */ 569 */
570 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 570 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
571 (hw->mac.type == e1000_82574) ||
571 (hw->mac.type == e1000_82573))) 572 (hw->mac.type == e1000_82573)))
572 e1000e_update_nvm_checksum(hw); 573 e1000e_update_nvm_checksum(hw);
573 574
@@ -779,8 +780,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
779 toggle = 0x7FFFF3FF; 780 toggle = 0x7FFFF3FF;
780 break; 781 break;
781 case e1000_82573: 782 case e1000_82573:
783 case e1000_82574:
782 case e1000_ich8lan: 784 case e1000_ich8lan:
783 case e1000_ich9lan: 785 case e1000_ich9lan:
786 case e1000_ich10lan:
784 toggle = 0x7FFFF033; 787 toggle = 0x7FFFF033;
785 break; 788 break;
786 default: 789 default:
@@ -833,7 +836,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
833 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 836 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
834 for (i = 0; i < mac->rar_entry_count; i++) 837 for (i = 0; i < mac->rar_entry_count; i++)
835 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 838 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
836 0x8003FFFF, 0xFFFFFFFF); 839 ((mac->type == e1000_ich10lan) ?
840 0x8007FFFF : 0x8003FFFF),
841 0xFFFFFFFF);
837 842
838 for (i = 0; i < mac->mta_reg_count; i++) 843 for (i = 0; i < mac->mta_reg_count; i++)
839 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 844 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -884,10 +889,18 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
884 u32 shared_int = 1; 889 u32 shared_int = 1;
885 u32 irq = adapter->pdev->irq; 890 u32 irq = adapter->pdev->irq;
886 int i; 891 int i;
892 int ret_val = 0;
893 int int_mode = E1000E_INT_MODE_LEGACY;
887 894
888 *data = 0; 895 *data = 0;
889 896
890 /* NOTE: we don't test MSI interrupts here, yet */ 897 /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
898 if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
899 int_mode = adapter->int_mode;
900 e1000e_reset_interrupt_capability(adapter);
901 adapter->int_mode = E1000E_INT_MODE_LEGACY;
902 e1000e_set_interrupt_capability(adapter);
903 }
891 /* Hook up test interrupt handler just for this test */ 904 /* Hook up test interrupt handler just for this test */
892 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 905 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
893 netdev)) { 906 netdev)) {
@@ -895,7 +908,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
895 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 908 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
896 netdev->name, netdev)) { 909 netdev->name, netdev)) {
897 *data = 1; 910 *data = 1;
898 return -1; 911 ret_val = -1;
912 goto out;
899 } 913 }
900 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); 914 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
901 915
@@ -905,12 +919,23 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
905 919
906 /* Test each interrupt */ 920 /* Test each interrupt */
907 for (i = 0; i < 10; i++) { 921 for (i = 0; i < 10; i++) {
908 if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
909 continue;
910
911 /* Interrupt to test */ 922 /* Interrupt to test */
912 mask = 1 << i; 923 mask = 1 << i;
913 924
925 if (adapter->flags & FLAG_IS_ICH) {
926 switch (mask) {
927 case E1000_ICR_RXSEQ:
928 continue;
929 case 0x00000100:
930 if (adapter->hw.mac.type == e1000_ich8lan ||
931 adapter->hw.mac.type == e1000_ich9lan)
932 continue;
933 break;
934 default:
935 break;
936 }
937 }
938
914 if (!shared_int) { 939 if (!shared_int) {
915 /* 940 /*
916 * Disable the interrupt to be reported in 941 * Disable the interrupt to be reported in
@@ -974,7 +999,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
974 /* Unhook test interrupt handler */ 999 /* Unhook test interrupt handler */
975 free_irq(irq, netdev); 1000 free_irq(irq, netdev);
976 1001
977 return *data; 1002out:
1003 if (int_mode == E1000E_INT_MODE_MSIX) {
1004 e1000e_reset_interrupt_capability(adapter);
1005 adapter->int_mode = int_mode;
1006 e1000e_set_interrupt_capability(adapter);
1007 }
1008
1009 return ret_val;
978} 1010}
979 1011
980static void e1000_free_desc_rings(struct e1000_adapter *adapter) 1012static void e1000_free_desc_rings(struct e1000_adapter *adapter)
@@ -1755,11 +1787,13 @@ static void e1000_led_blink_callback(unsigned long data)
1755static int e1000_phys_id(struct net_device *netdev, u32 data) 1787static int e1000_phys_id(struct net_device *netdev, u32 data)
1756{ 1788{
1757 struct e1000_adapter *adapter = netdev_priv(netdev); 1789 struct e1000_adapter *adapter = netdev_priv(netdev);
1790 struct e1000_hw *hw = &adapter->hw;
1758 1791
1759 if (!data) 1792 if (!data)
1760 data = INT_MAX; 1793 data = INT_MAX;
1761 1794
1762 if (adapter->hw.phy.type == e1000_phy_ife) { 1795 if ((hw->phy.type == e1000_phy_ife) ||
1796 (hw->mac.type == e1000_82574)) {
1763 if (!adapter->blink_timer.function) { 1797 if (!adapter->blink_timer.function) {
1764 init_timer(&adapter->blink_timer); 1798 init_timer(&adapter->blink_timer);
1765 adapter->blink_timer.function = 1799 adapter->blink_timer.function =
@@ -1769,16 +1803,16 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1769 mod_timer(&adapter->blink_timer, jiffies); 1803 mod_timer(&adapter->blink_timer, jiffies);
1770 msleep_interruptible(data * 1000); 1804 msleep_interruptible(data * 1000);
1771 del_timer_sync(&adapter->blink_timer); 1805 del_timer_sync(&adapter->blink_timer);
1772 e1e_wphy(&adapter->hw, 1806 if (hw->phy.type == e1000_phy_ife)
1773 IFE_PHY_SPECIAL_CONTROL_LED, 0); 1807 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1774 } else { 1808 } else {
1775 e1000e_blink_led(&adapter->hw); 1809 e1000e_blink_led(hw);
1776 msleep_interruptible(data * 1000); 1810 msleep_interruptible(data * 1000);
1777 } 1811 }
1778 1812
1779 adapter->hw.mac.ops.led_off(&adapter->hw); 1813 hw->mac.ops.led_off(hw);
1780 clear_bit(E1000_LED_ON, &adapter->led_status); 1814 clear_bit(E1000_LED_ON, &adapter->led_status);
1781 adapter->hw.mac.ops.cleanup_led(&adapter->hw); 1815 hw->mac.ops.cleanup_led(hw);
1782 1816
1783 return 0; 1817 return 0;
1784} 1818}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 74f263acb172..f66ed37a7f76 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -65,7 +65,11 @@ enum e1e_registers {
65 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ 65 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ 66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ 67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
68 E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ 69 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
70 E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
71 E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
72#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
69 E1000_RCTL = 0x00100, /* Rx Control - RW */ 73 E1000_RCTL = 0x00100, /* Rx Control - RW */
70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ 74 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
71 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ 75 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
@@ -332,6 +336,7 @@ enum e1e_registers {
332#define E1000_DEV_ID_82573E 0x108B 336#define E1000_DEV_ID_82573E 0x108B
333#define E1000_DEV_ID_82573E_IAMT 0x108C 337#define E1000_DEV_ID_82573E_IAMT 0x108C
334#define E1000_DEV_ID_82573L 0x109A 338#define E1000_DEV_ID_82573L 0x109A
339#define E1000_DEV_ID_82574L 0x10D3
335 340
336#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 341#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
337#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 342#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
@@ -346,6 +351,7 @@ enum e1e_registers {
346#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 351#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
347#define E1000_DEV_ID_ICH8_IGP_M 0x104D 352#define E1000_DEV_ID_ICH8_IGP_M 0x104D
348#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 353#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
354#define E1000_DEV_ID_ICH9_BM 0x10E5
349#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 355#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
350#define E1000_DEV_ID_ICH9_IGP_M 0x10BF 356#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
351#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB 357#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
@@ -356,6 +362,10 @@ enum e1e_registers {
356#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC 362#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
357#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD 363#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
358#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE 364#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
365#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
366#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
367
368#define E1000_REVISION_4 4
359 369
360#define E1000_FUNC_1 1 370#define E1000_FUNC_1 1
361 371
@@ -363,9 +373,11 @@ enum e1000_mac_type {
363 e1000_82571, 373 e1000_82571,
364 e1000_82572, 374 e1000_82572,
365 e1000_82573, 375 e1000_82573,
376 e1000_82574,
366 e1000_80003es2lan, 377 e1000_80003es2lan,
367 e1000_ich8lan, 378 e1000_ich8lan,
368 e1000_ich9lan, 379 e1000_ich9lan,
380 e1000_ich10lan,
369}; 381};
370 382
371enum e1000_media_type { 383enum e1000_media_type {
@@ -696,8 +708,7 @@ struct e1000_host_mng_command_info {
696 708
697/* Function pointers and static data for the MAC. */ 709/* Function pointers and static data for the MAC. */
698struct e1000_mac_operations { 710struct e1000_mac_operations {
699 u32 mng_mode_enab; 711 bool (*check_mng_mode)(struct e1000_hw *);
700
701 s32 (*check_for_link)(struct e1000_hw *); 712 s32 (*check_for_link)(struct e1000_hw *);
702 s32 (*cleanup_led)(struct e1000_hw *); 713 s32 (*cleanup_led)(struct e1000_hw *);
703 void (*clear_hw_cntrs)(struct e1000_hw *); 714 void (*clear_hw_cntrs)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e38452a738c..019b9c0bcdcb 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -43,7 +43,9 @@
43 * 82567LM-2 Gigabit Network Connection 43 * 82567LM-2 Gigabit Network Connection
44 * 82567LF-2 Gigabit Network Connection 44 * 82567LF-2 Gigabit Network Connection
45 * 82567V-2 Gigabit Network Connection 45 * 82567V-2 Gigabit Network Connection
46 * 82562GT-3 10/100 Network Connection 46 * 82567LF-3 Gigabit Network Connection
47 * 82567LM-3 Gigabit Network Connection
48 * 82567LM-4 Gigabit Network Connection
47 */ 49 */
48 50
49#include <linux/netdevice.h> 51#include <linux/netdevice.h>
@@ -157,12 +159,15 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
157static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 159static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
158static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 160static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
159 u32 offset, u8 byte); 161 u32 offset, u8 byte);
162static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
163 u8 *data);
160static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 164static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
161 u16 *data); 165 u16 *data);
162static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 166static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
163 u8 size, u16 *data); 167 u8 size, u16 *data);
164static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 168static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
165static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 169static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
170static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
166 171
167static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 172static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
168{ 173{
@@ -417,6 +422,22 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
417} 422}
418 423
419/** 424/**
425 * e1000_check_mng_mode_ich8lan - Checks management mode
426 * @hw: pointer to the HW structure
427 *
428 * This checks if the adapter has manageability enabled.
429 * This is a function pointer entry point only called by read/write
430 * routines for the PHY and NVM parts.
431 **/
432static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
433{
434 u32 fwsm = er32(FWSM);
435
436 return (fwsm & E1000_FWSM_MODE_MASK) ==
437 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
438}
439
440/**
420 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 441 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
421 * @hw: pointer to the HW structure 442 * @hw: pointer to the HW structure
422 * 443 *
@@ -897,6 +918,56 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
897} 918}
898 919
899/** 920/**
921 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
922 * @hw: pointer to the HW structure
923 * @bank: pointer to the variable that returns the active bank
924 *
925 * Reads signature byte from the NVM using the flash access registers.
926 **/
927static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
928{
929 struct e1000_nvm_info *nvm = &hw->nvm;
930 /* flash bank size is in words */
931 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
932 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
933 u8 bank_high_byte = 0;
934
935 if (hw->mac.type != e1000_ich10lan) {
936 if (er32(EECD) & E1000_EECD_SEC1VAL)
937 *bank = 1;
938 else
939 *bank = 0;
940 } else {
941 /*
942 * Make sure the signature for bank 0 is valid,
943 * if not check for bank1
944 */
945 e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte);
946 if ((bank_high_byte & 0xC0) == 0x80) {
947 *bank = 0;
948 } else {
949 /*
950 * find if segment 1 is valid by verifying
951 * bit 15:14 = 10b in word 0x13
952 */
953 e1000_read_flash_byte_ich8lan(hw,
954 act_offset + bank1_offset,
955 &bank_high_byte);
956
957 /* bank1 has a valid signature equivalent to SEC1V */
958 if ((bank_high_byte & 0xC0) == 0x80) {
959 *bank = 1;
960 } else {
961 hw_dbg(hw, "ERROR: EEPROM not present\n");
962 return -E1000_ERR_NVM;
963 }
964 }
965 }
966
967 return 0;
968}
969
970/**
900 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 971 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
901 * @hw: pointer to the HW structure 972 * @hw: pointer to the HW structure
902 * @offset: The offset (in bytes) of the word(s) to read. 973 * @offset: The offset (in bytes) of the word(s) to read.
@@ -912,6 +983,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
912 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 983 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
913 u32 act_offset; 984 u32 act_offset;
914 s32 ret_val; 985 s32 ret_val;
986 u32 bank = 0;
915 u16 i, word; 987 u16 i, word;
916 988
917 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 989 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@@ -924,10 +996,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
924 if (ret_val) 996 if (ret_val)
925 return ret_val; 997 return ret_val;
926 998
927 /* Start with the bank offset, then add the relative offset. */ 999 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
928 act_offset = (er32(EECD) & E1000_EECD_SEC1VAL) 1000 if (ret_val)
929 ? nvm->flash_bank_size 1001 return ret_val;
930 : 0; 1002
1003 act_offset = (bank) ? nvm->flash_bank_size : 0;
931 act_offset += offset; 1004 act_offset += offset;
932 1005
933 for (i = 0; i < words; i++) { 1006 for (i = 0; i < words; i++) {
@@ -1075,6 +1148,29 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
1075} 1148}
1076 1149
1077/** 1150/**
1151 * e1000_read_flash_byte_ich8lan - Read byte from flash
1152 * @hw: pointer to the HW structure
1153 * @offset: The offset of the byte to read.
1154 * @data: Pointer to a byte to store the value read.
1155 *
1156 * Reads a single byte from the NVM using the flash access registers.
1157 **/
1158static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
1159 u8 *data)
1160{
1161 s32 ret_val;
1162 u16 word = 0;
1163
1164 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
1165 if (ret_val)
1166 return ret_val;
1167
1168 *data = (u8)word;
1169
1170 return 0;
1171}
1172
1173/**
1078 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 1174 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
1079 * @hw: pointer to the HW structure 1175 * @hw: pointer to the HW structure
1080 * @offset: The offset (in bytes) of the byte or word to read. 1176 * @offset: The offset (in bytes) of the byte or word to read.
@@ -1205,7 +1301,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1205{ 1301{
1206 struct e1000_nvm_info *nvm = &hw->nvm; 1302 struct e1000_nvm_info *nvm = &hw->nvm;
1207 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 1303 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1208 u32 i, act_offset, new_bank_offset, old_bank_offset; 1304 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
1209 s32 ret_val; 1305 s32 ret_val;
1210 u16 data; 1306 u16 data;
1211 1307
@@ -1225,7 +1321,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1225 * write to bank 0 etc. We also need to erase the segment that 1321 * write to bank 0 etc. We also need to erase the segment that
1226 * is going to be written 1322 * is going to be written
1227 */ 1323 */
1228 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1325 if (ret_val)
1326 return ret_val;
1327
1328 if (bank == 0) {
1229 new_bank_offset = nvm->flash_bank_size; 1329 new_bank_offset = nvm->flash_bank_size;
1230 old_bank_offset = 0; 1330 old_bank_offset = 0;
1231 e1000_erase_flash_bank_ich8lan(hw, 1); 1331 e1000_erase_flash_bank_ich8lan(hw, 1);
@@ -2189,13 +2289,14 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2189 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 2289 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
2190 * to a lower speed. 2290 * to a lower speed.
2191 * 2291 *
2192 * Should only be called for ICH9 devices. 2292 * Should only be called for ICH9 and ICH10 devices.
2193 **/ 2293 **/
2194void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 2294void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2195{ 2295{
2196 u32 phy_ctrl; 2296 u32 phy_ctrl;
2197 2297
2198 if (hw->mac.type == e1000_ich9lan) { 2298 if ((hw->mac.type == e1000_ich10lan) ||
2299 (hw->mac.type == e1000_ich9lan)) {
2199 phy_ctrl = er32(PHY_CTRL); 2300 phy_ctrl = er32(PHY_CTRL);
2200 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 2301 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
2201 E1000_PHY_CTRL_GBE_DISABLE; 2302 E1000_PHY_CTRL_GBE_DISABLE;
@@ -2253,6 +2354,39 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
2253} 2354}
2254 2355
2255/** 2356/**
2357 * e1000_get_cfg_done_ich8lan - Read config done bit
2358 * @hw: pointer to the HW structure
2359 *
2360 * Read the management control register for the config done bit for
2361 * completion status. NOTE: silicon which is EEPROM-less will fail trying
2362 * to read the config done bit, so an error is *ONLY* logged and returns
2363 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
2364 * would not be able to be reset or change link.
2365 **/
2366static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
2367{
2368 u32 bank = 0;
2369
2370 e1000e_get_cfg_done(hw);
2371
2372 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
2373 if (hw->mac.type != e1000_ich10lan) {
2374 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
2375 (hw->phy.type == e1000_phy_igp_3)) {
2376 e1000e_phy_init_script_igp3(hw);
2377 }
2378 } else {
2379 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
2380 /* Maybe we should do a basic PHY config */
2381 hw_dbg(hw, "EEPROM not present\n");
2382 return -E1000_ERR_CONFIG;
2383 }
2384 }
2385
2386 return 0;
2387}
2388
2389/**
2256 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 2390 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
2257 * @hw: pointer to the HW structure 2391 * @hw: pointer to the HW structure
2258 * 2392 *
@@ -2282,7 +2416,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2282} 2416}
2283 2417
2284static struct e1000_mac_operations ich8_mac_ops = { 2418static struct e1000_mac_operations ich8_mac_ops = {
2285 .mng_mode_enab = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 2419 .check_mng_mode = e1000_check_mng_mode_ich8lan,
2286 .check_for_link = e1000e_check_for_copper_link, 2420 .check_for_link = e1000e_check_for_copper_link,
2287 .cleanup_led = e1000_cleanup_led_ich8lan, 2421 .cleanup_led = e1000_cleanup_led_ich8lan,
2288 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 2422 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
@@ -2302,7 +2436,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
2302 .check_reset_block = e1000_check_reset_block_ich8lan, 2436 .check_reset_block = e1000_check_reset_block_ich8lan,
2303 .commit_phy = NULL, 2437 .commit_phy = NULL,
2304 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, 2438 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan,
2305 .get_cfg_done = e1000e_get_cfg_done, 2439 .get_cfg_done = e1000_get_cfg_done_ich8lan,
2306 .get_cable_length = e1000e_get_cable_length_igp_2, 2440 .get_cable_length = e1000e_get_cable_length_igp_2,
2307 .get_phy_info = e1000_get_phy_info_ich8lan, 2441 .get_phy_info = e1000_get_phy_info_ich8lan,
2308 .read_phy_reg = e1000e_read_phy_reg_igp, 2442 .read_phy_reg = e1000e_read_phy_reg_igp,
@@ -2357,3 +2491,20 @@ struct e1000_info e1000_ich9_info = {
2357 .nvm_ops = &ich8_nvm_ops, 2491 .nvm_ops = &ich8_nvm_ops,
2358}; 2492};
2359 2493
2494struct e1000_info e1000_ich10_info = {
2495 .mac = e1000_ich10lan,
2496 .flags = FLAG_HAS_JUMBO_FRAMES
2497 | FLAG_IS_ICH
2498 | FLAG_HAS_WOL
2499 | FLAG_RX_CSUM_ENABLED
2500 | FLAG_HAS_CTRLEXT_ON_LOAD
2501 | FLAG_HAS_AMT
2502 | FLAG_HAS_ERT
2503 | FLAG_HAS_FLASH
2504 | FLAG_APME_IN_WUC,
2505 .pba = 10,
2506 .get_variants = e1000_get_variants_ich8lan,
2507 .mac_ops = &ich8_mac_ops,
2508 .phy_ops = &ich8_phy_ops,
2509 .nvm_ops = &ich8_nvm_ops,
2510};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index f1f4e9dfd0a0..c7337306ffa7 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2222,17 +2222,18 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2222} 2222}
2223 2223
2224/** 2224/**
2225 * e1000e_check_mng_mode - check management mode 2225 * e1000e_check_mng_mode_generic - check management mode
2226 * @hw: pointer to the HW structure 2226 * @hw: pointer to the HW structure
2227 * 2227 *
2228 * Reads the firmware semaphore register and returns true (>0) if 2228 * Reads the firmware semaphore register and returns true (>0) if
2229 * manageability is enabled, else false (0). 2229 * manageability is enabled, else false (0).
2230 **/ 2230 **/
2231bool e1000e_check_mng_mode(struct e1000_hw *hw) 2231bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2232{ 2232{
2233 u32 fwsm = er32(FWSM); 2233 u32 fwsm = er32(FWSM);
2234 2234
2235 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab; 2235 return (fwsm & E1000_FWSM_MODE_MASK) ==
2236 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2236} 2237}
2237 2238
2238/** 2239/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d266510c8a94..0925204cd2d8 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -55,9 +55,11 @@ static const struct e1000_info *e1000_info_tbl[] = {
55 [board_82571] = &e1000_82571_info, 55 [board_82571] = &e1000_82571_info,
56 [board_82572] = &e1000_82572_info, 56 [board_82572] = &e1000_82572_info,
57 [board_82573] = &e1000_82573_info, 57 [board_82573] = &e1000_82573_info,
58 [board_82574] = &e1000_82574_info,
58 [board_80003es2lan] = &e1000_es2_info, 59 [board_80003es2lan] = &e1000_es2_info,
59 [board_ich8lan] = &e1000_ich8_info, 60 [board_ich8lan] = &e1000_ich8_info,
60 [board_ich9lan] = &e1000_ich9_info, 61 [board_ich9lan] = &e1000_ich9_info,
62 [board_ich10lan] = &e1000_ich10_info,
61}; 63};
62 64
63#ifdef DEBUG 65#ifdef DEBUG
@@ -1179,8 +1181,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
1179 struct net_device *netdev = data; 1181 struct net_device *netdev = data;
1180 struct e1000_adapter *adapter = netdev_priv(netdev); 1182 struct e1000_adapter *adapter = netdev_priv(netdev);
1181 struct e1000_hw *hw = &adapter->hw; 1183 struct e1000_hw *hw = &adapter->hw;
1182
1183 u32 rctl, icr = er32(ICR); 1184 u32 rctl, icr = er32(ICR);
1185
1184 if (!icr) 1186 if (!icr)
1185 return IRQ_NONE; /* Not our interrupt */ 1187 return IRQ_NONE; /* Not our interrupt */
1186 1188
@@ -1236,6 +1238,263 @@ static irqreturn_t e1000_intr(int irq, void *data)
1236 return IRQ_HANDLED; 1238 return IRQ_HANDLED;
1237} 1239}
1238 1240
1241static irqreturn_t e1000_msix_other(int irq, void *data)
1242{
1243 struct net_device *netdev = data;
1244 struct e1000_adapter *adapter = netdev_priv(netdev);
1245 struct e1000_hw *hw = &adapter->hw;
1246 u32 icr = er32(ICR);
1247
1248 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1249 ew32(IMS, E1000_IMS_OTHER);
1250 return IRQ_NONE;
1251 }
1252
1253 if (icr & adapter->eiac_mask)
1254 ew32(ICS, (icr & adapter->eiac_mask));
1255
1256 if (icr & E1000_ICR_OTHER) {
1257 if (!(icr & E1000_ICR_LSC))
1258 goto no_link_interrupt;
1259 hw->mac.get_link_status = 1;
1260 /* guard against interrupt when we're going down */
1261 if (!test_bit(__E1000_DOWN, &adapter->state))
1262 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1263 }
1264
1265no_link_interrupt:
1266 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1267
1268 return IRQ_HANDLED;
1269}
1270
1271
1272static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1273{
1274 struct net_device *netdev = data;
1275 struct e1000_adapter *adapter = netdev_priv(netdev);
1276 struct e1000_hw *hw = &adapter->hw;
1277 struct e1000_ring *tx_ring = adapter->tx_ring;
1278
1279
1280 adapter->total_tx_bytes = 0;
1281 adapter->total_tx_packets = 0;
1282
1283 if (!e1000_clean_tx_irq(adapter))
1284 /* Ring was not completely cleaned, so fire another interrupt */
1285 ew32(ICS, tx_ring->ims_val);
1286
1287 return IRQ_HANDLED;
1288}
1289
1290static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1291{
1292 struct net_device *netdev = data;
1293 struct e1000_adapter *adapter = netdev_priv(netdev);
1294
1295 /* Write the ITR value calculated at the end of the
1296 * previous interrupt.
1297 */
1298 if (adapter->rx_ring->set_itr) {
1299 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1300 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1301 adapter->rx_ring->set_itr = 0;
1302 }
1303
1304 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1305 adapter->total_rx_bytes = 0;
1306 adapter->total_rx_packets = 0;
1307 __netif_rx_schedule(netdev, &adapter->napi);
1308 }
1309 return IRQ_HANDLED;
1310}
1311
1312/**
1313 * e1000_configure_msix - Configure MSI-X hardware
1314 *
1315 * e1000_configure_msix sets up the hardware to properly
1316 * generate MSI-X interrupts.
1317 **/
1318static void e1000_configure_msix(struct e1000_adapter *adapter)
1319{
1320 struct e1000_hw *hw = &adapter->hw;
1321 struct e1000_ring *rx_ring = adapter->rx_ring;
1322 struct e1000_ring *tx_ring = adapter->tx_ring;
1323 int vector = 0;
1324 u32 ctrl_ext, ivar = 0;
1325
1326 adapter->eiac_mask = 0;
1327
1328 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1329 if (hw->mac.type == e1000_82574) {
1330 u32 rfctl = er32(RFCTL);
1331 rfctl |= E1000_RFCTL_ACK_DIS;
1332 ew32(RFCTL, rfctl);
1333 }
1334
1335#define E1000_IVAR_INT_ALLOC_VALID 0x8
1336 /* Configure Rx vector */
1337 rx_ring->ims_val = E1000_IMS_RXQ0;
1338 adapter->eiac_mask |= rx_ring->ims_val;
1339 if (rx_ring->itr_val)
1340 writel(1000000000 / (rx_ring->itr_val * 256),
1341 hw->hw_addr + rx_ring->itr_register);
1342 else
1343 writel(1, hw->hw_addr + rx_ring->itr_register);
1344 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1345
1346 /* Configure Tx vector */
1347 tx_ring->ims_val = E1000_IMS_TXQ0;
1348 vector++;
1349 if (tx_ring->itr_val)
1350 writel(1000000000 / (tx_ring->itr_val * 256),
1351 hw->hw_addr + tx_ring->itr_register);
1352 else
1353 writel(1, hw->hw_addr + tx_ring->itr_register);
1354 adapter->eiac_mask |= tx_ring->ims_val;
1355 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1356
1357 /* set vector for Other Causes, e.g. link changes */
1358 vector++;
1359 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1360 if (rx_ring->itr_val)
1361 writel(1000000000 / (rx_ring->itr_val * 256),
1362 hw->hw_addr + E1000_EITR_82574(vector));
1363 else
1364 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1365
1366 /* Cause Tx interrupts on every write back */
1367 ivar |= (1 << 31);
1368
1369 ew32(IVAR, ivar);
1370
1371 /* enable MSI-X PBA support */
1372 ctrl_ext = er32(CTRL_EXT);
1373 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1374
1375 /* Auto-Mask Other interrupts upon ICR read */
1376#define E1000_EIAC_MASK_82574 0x01F00000
1377 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1378 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1379 ew32(CTRL_EXT, ctrl_ext);
1380 e1e_flush();
1381}
1382
1383void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1384{
1385 if (adapter->msix_entries) {
1386 pci_disable_msix(adapter->pdev);
1387 kfree(adapter->msix_entries);
1388 adapter->msix_entries = NULL;
1389 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1390 pci_disable_msi(adapter->pdev);
1391 adapter->flags &= ~FLAG_MSI_ENABLED;
1392 }
1393
1394 return;
1395}
1396
1397/**
1398 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1399 *
1400 * Attempt to configure interrupts using the best available
1401 * capabilities of the hardware and kernel.
1402 **/
1403void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1404{
1405 int err;
1406 int numvecs, i;
1407
1408
1409 switch (adapter->int_mode) {
1410 case E1000E_INT_MODE_MSIX:
1411 if (adapter->flags & FLAG_HAS_MSIX) {
1412 numvecs = 3; /* RxQ0, TxQ0 and other */
1413 adapter->msix_entries = kcalloc(numvecs,
1414 sizeof(struct msix_entry),
1415 GFP_KERNEL);
1416 if (adapter->msix_entries) {
1417 for (i = 0; i < numvecs; i++)
1418 adapter->msix_entries[i].entry = i;
1419
1420 err = pci_enable_msix(adapter->pdev,
1421 adapter->msix_entries,
1422 numvecs);
1423 if (err == 0)
1424 return;
1425 }
1426 /* MSI-X failed, so fall through and try MSI */
1427 e_err("Failed to initialize MSI-X interrupts. "
1428 "Falling back to MSI interrupts.\n");
1429 e1000e_reset_interrupt_capability(adapter);
1430 }
1431 adapter->int_mode = E1000E_INT_MODE_MSI;
1432 /* Fall through */
1433 case E1000E_INT_MODE_MSI:
1434 if (!pci_enable_msi(adapter->pdev)) {
1435 adapter->flags |= FLAG_MSI_ENABLED;
1436 } else {
1437 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1438 e_err("Failed to initialize MSI interrupts. Falling "
1439 "back to legacy interrupts.\n");
1440 }
1441 /* Fall through */
1442 case E1000E_INT_MODE_LEGACY:
1443 /* Don't do anything; this is the system default */
1444 break;
1445 }
1446
1447 return;
1448}
1449
1450/**
1451 * e1000_request_msix - Initialize MSI-X interrupts
1452 *
1453 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1454 * kernel.
1455 **/
1456static int e1000_request_msix(struct e1000_adapter *adapter)
1457{
1458 struct net_device *netdev = adapter->netdev;
1459 int err = 0, vector = 0;
1460
1461 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1462 sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name);
1463 else
1464 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1465 err = request_irq(adapter->msix_entries[vector].vector,
1466 &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1467 netdev);
1468 if (err)
1469 goto out;
1470 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1471 adapter->rx_ring->itr_val = adapter->itr;
1472 vector++;
1473
1474 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1475 sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name);
1476 else
1477 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1478 err = request_irq(adapter->msix_entries[vector].vector,
1479 &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1480 netdev);
1481 if (err)
1482 goto out;
1483 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1484 adapter->tx_ring->itr_val = adapter->itr;
1485 vector++;
1486
1487 err = request_irq(adapter->msix_entries[vector].vector,
1488 &e1000_msix_other, 0, netdev->name, netdev);
1489 if (err)
1490 goto out;
1491
1492 e1000_configure_msix(adapter);
1493 return 0;
1494out:
1495 return err;
1496}
1497
1239/** 1498/**
1240 * e1000_request_irq - initialize interrupts 1499 * e1000_request_irq - initialize interrupts
1241 * 1500 *
@@ -1245,28 +1504,32 @@ static irqreturn_t e1000_intr(int irq, void *data)
1245static int e1000_request_irq(struct e1000_adapter *adapter) 1504static int e1000_request_irq(struct e1000_adapter *adapter)
1246{ 1505{
1247 struct net_device *netdev = adapter->netdev; 1506 struct net_device *netdev = adapter->netdev;
1248 int irq_flags = IRQF_SHARED;
1249 int err; 1507 int err;
1250 1508
1251 if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { 1509 if (adapter->msix_entries) {
1252 err = pci_enable_msi(adapter->pdev); 1510 err = e1000_request_msix(adapter);
1253 if (!err) { 1511 if (!err)
1254 adapter->flags |= FLAG_MSI_ENABLED; 1512 return err;
1255 irq_flags = 0; 1513 /* fall back to MSI */
1256 } 1514 e1000e_reset_interrupt_capability(adapter);
1515 adapter->int_mode = E1000E_INT_MODE_MSI;
1516 e1000e_set_interrupt_capability(adapter);
1517 }
1518 if (adapter->flags & FLAG_MSI_ENABLED) {
1519 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
1520 netdev->name, netdev);
1521 if (!err)
1522 return err;
1523
1524 /* fall back to legacy interrupt */
1525 e1000e_reset_interrupt_capability(adapter);
1526 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1257 } 1527 }
1258 1528
1259 err = request_irq(adapter->pdev->irq, 1529 err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
1260 ((adapter->flags & FLAG_MSI_ENABLED) ? 1530 netdev->name, netdev);
1261 &e1000_intr_msi : &e1000_intr), 1531 if (err)
1262 irq_flags, netdev->name, netdev);
1263 if (err) {
1264 if (adapter->flags & FLAG_MSI_ENABLED) {
1265 pci_disable_msi(adapter->pdev);
1266 adapter->flags &= ~FLAG_MSI_ENABLED;
1267 }
1268 e_err("Unable to allocate interrupt, Error: %d\n", err); 1532 e_err("Unable to allocate interrupt, Error: %d\n", err);
1269 }
1270 1533
1271 return err; 1534 return err;
1272} 1535}
@@ -1275,11 +1538,21 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
1275{ 1538{
1276 struct net_device *netdev = adapter->netdev; 1539 struct net_device *netdev = adapter->netdev;
1277 1540
1278 free_irq(adapter->pdev->irq, netdev); 1541 if (adapter->msix_entries) {
1279 if (adapter->flags & FLAG_MSI_ENABLED) { 1542 int vector = 0;
1280 pci_disable_msi(adapter->pdev); 1543
1281 adapter->flags &= ~FLAG_MSI_ENABLED; 1544 free_irq(adapter->msix_entries[vector].vector, netdev);
1545 vector++;
1546
1547 free_irq(adapter->msix_entries[vector].vector, netdev);
1548 vector++;
1549
1550 /* Other Causes interrupt vector */
1551 free_irq(adapter->msix_entries[vector].vector, netdev);
1552 return;
1282 } 1553 }
1554
1555 free_irq(adapter->pdev->irq, netdev);
1283} 1556}
1284 1557
1285/** 1558/**
@@ -1290,6 +1563,8 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
1290 struct e1000_hw *hw = &adapter->hw; 1563 struct e1000_hw *hw = &adapter->hw;
1291 1564
1292 ew32(IMC, ~0); 1565 ew32(IMC, ~0);
1566 if (adapter->msix_entries)
1567 ew32(EIAC_82574, 0);
1293 e1e_flush(); 1568 e1e_flush();
1294 synchronize_irq(adapter->pdev->irq); 1569 synchronize_irq(adapter->pdev->irq);
1295} 1570}
@@ -1301,7 +1576,12 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1301{ 1576{
1302 struct e1000_hw *hw = &adapter->hw; 1577 struct e1000_hw *hw = &adapter->hw;
1303 1578
1304 ew32(IMS, IMS_ENABLE_MASK); 1579 if (adapter->msix_entries) {
1580 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1581 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1582 } else {
1583 ew32(IMS, IMS_ENABLE_MASK);
1584 }
1305 e1e_flush(); 1585 e1e_flush();
1306} 1586}
1307 1587
@@ -1551,9 +1831,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1551 * traffic pattern. Constants in this function were computed 1831 * traffic pattern. Constants in this function were computed
1552 * based on theoretical maximum wire speed and thresholds were set based 1832 * based on theoretical maximum wire speed and thresholds were set based
1553 * on testing data as well as attempting to minimize response time 1833 * on testing data as well as attempting to minimize response time
1554 * while increasing bulk throughput. 1834 * while increasing bulk throughput. This functionality is controlled
1555 * this functionality is controlled by the InterruptThrottleRate module 1835 * by the InterruptThrottleRate module parameter.
1556 * parameter (see e1000_param.c)
1557 **/ 1836 **/
1558static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 1837static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1559 u16 itr_setting, int packets, 1838 u16 itr_setting, int packets,
@@ -1661,11 +1940,37 @@ set_itr_now:
1661 min(adapter->itr + (new_itr >> 2), new_itr) : 1940 min(adapter->itr + (new_itr >> 2), new_itr) :
1662 new_itr; 1941 new_itr;
1663 adapter->itr = new_itr; 1942 adapter->itr = new_itr;
1664 ew32(ITR, 1000000000 / (new_itr * 256)); 1943 adapter->rx_ring->itr_val = new_itr;
1944 if (adapter->msix_entries)
1945 adapter->rx_ring->set_itr = 1;
1946 else
1947 ew32(ITR, 1000000000 / (new_itr * 256));
1665 } 1948 }
1666} 1949}
1667 1950
1668/** 1951/**
1952 * e1000_alloc_queues - Allocate memory for all rings
1953 * @adapter: board private structure to initialize
1954 **/
1955static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1956{
1957 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1958 if (!adapter->tx_ring)
1959 goto err;
1960
1961 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1962 if (!adapter->rx_ring)
1963 goto err;
1964
1965 return 0;
1966err:
1967 e_err("Unable to allocate memory for queues\n");
1968 kfree(adapter->rx_ring);
1969 kfree(adapter->tx_ring);
1970 return -ENOMEM;
1971}
1972
1973/**
1669 * e1000_clean - NAPI Rx polling callback 1974 * e1000_clean - NAPI Rx polling callback
1670 * @napi: struct associated with this polling callback 1975 * @napi: struct associated with this polling callback
1671 * @budget: amount of packets driver is allowed to process this poll 1976 * @budget: amount of packets driver is allowed to process this poll
@@ -1673,12 +1978,17 @@ set_itr_now:
1673static int e1000_clean(struct napi_struct *napi, int budget) 1978static int e1000_clean(struct napi_struct *napi, int budget)
1674{ 1979{
1675 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 1980 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1981 struct e1000_hw *hw = &adapter->hw;
1676 struct net_device *poll_dev = adapter->netdev; 1982 struct net_device *poll_dev = adapter->netdev;
1677 int tx_cleaned = 0, work_done = 0; 1983 int tx_cleaned = 0, work_done = 0;
1678 1984
1679 /* Must NOT use netdev_priv macro here. */ 1985 /* Must NOT use netdev_priv macro here. */
1680 adapter = poll_dev->priv; 1986 adapter = poll_dev->priv;
1681 1987
1988 if (adapter->msix_entries &&
1989 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
1990 goto clean_rx;
1991
1682 /* 1992 /*
1683 * e1000_clean is called per-cpu. This lock protects 1993 * e1000_clean is called per-cpu. This lock protects
1684 * tx_ring from being cleaned by multiple cpus 1994 * tx_ring from being cleaned by multiple cpus
@@ -1690,6 +2000,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1690 spin_unlock(&adapter->tx_queue_lock); 2000 spin_unlock(&adapter->tx_queue_lock);
1691 } 2001 }
1692 2002
2003clean_rx:
1693 adapter->clean_rx(adapter, &work_done, budget); 2004 adapter->clean_rx(adapter, &work_done, budget);
1694 2005
1695 if (tx_cleaned) 2006 if (tx_cleaned)
@@ -1700,7 +2011,10 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1700 if (adapter->itr_setting & 3) 2011 if (adapter->itr_setting & 3)
1701 e1000_set_itr(adapter); 2012 e1000_set_itr(adapter);
1702 netif_rx_complete(poll_dev, napi); 2013 netif_rx_complete(poll_dev, napi);
1703 e1000_irq_enable(adapter); 2014 if (adapter->msix_entries)
2015 ew32(IMS, adapter->rx_ring->ims_val);
2016 else
2017 e1000_irq_enable(adapter);
1704 } 2018 }
1705 2019
1706 return work_done; 2020 return work_done;
@@ -2496,6 +2810,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2496 clear_bit(__E1000_DOWN, &adapter->state); 2810 clear_bit(__E1000_DOWN, &adapter->state);
2497 2811
2498 napi_enable(&adapter->napi); 2812 napi_enable(&adapter->napi);
2813 if (adapter->msix_entries)
2814 e1000_configure_msix(adapter);
2499 e1000_irq_enable(adapter); 2815 e1000_irq_enable(adapter);
2500 2816
2501 /* fire a link change interrupt to start the watchdog */ 2817 /* fire a link change interrupt to start the watchdog */
@@ -2579,13 +2895,10 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2579 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2895 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2580 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2896 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2581 2897
2582 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2898 e1000e_set_interrupt_capability(adapter);
2583 if (!adapter->tx_ring)
2584 goto err;
2585 2899
2586 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2900 if (e1000_alloc_queues(adapter))
2587 if (!adapter->rx_ring) 2901 return -ENOMEM;
2588 goto err;
2589 2902
2590 spin_lock_init(&adapter->tx_queue_lock); 2903 spin_lock_init(&adapter->tx_queue_lock);
2591 2904
@@ -2596,12 +2909,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2596 2909
2597 set_bit(__E1000_DOWN, &adapter->state); 2910 set_bit(__E1000_DOWN, &adapter->state);
2598 return 0; 2911 return 0;
2599
2600err:
2601 e_err("Unable to allocate memory for queues\n");
2602 kfree(adapter->rx_ring);
2603 kfree(adapter->tx_ring);
2604 return -ENOMEM;
2605} 2912}
2606 2913
2607/** 2914/**
@@ -2643,6 +2950,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2643 2950
2644 /* free the real vector and request a test handler */ 2951 /* free the real vector and request a test handler */
2645 e1000_free_irq(adapter); 2952 e1000_free_irq(adapter);
2953 e1000e_reset_interrupt_capability(adapter);
2646 2954
2647 /* Assume that the test fails, if it succeeds then the test 2955 /* Assume that the test fails, if it succeeds then the test
2648 * MSI irq handler will unset this flag */ 2956 * MSI irq handler will unset this flag */
@@ -2673,6 +2981,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2673 rmb(); 2981 rmb();
2674 2982
2675 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 2983 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
2984 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2676 err = -EIO; 2985 err = -EIO;
2677 e_info("MSI interrupt test failed!\n"); 2986 e_info("MSI interrupt test failed!\n");
2678 } 2987 }
@@ -2686,7 +2995,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2686 /* okay so the test worked, restore settings */ 2995 /* okay so the test worked, restore settings */
2687 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); 2996 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
2688msi_test_failed: 2997msi_test_failed:
2689 /* restore the original vector, even if it failed */ 2998 e1000e_set_interrupt_capability(adapter);
2690 e1000_request_irq(adapter); 2999 e1000_request_irq(adapter);
2691 return err; 3000 return err;
2692} 3001}
@@ -2796,7 +3105,7 @@ static int e1000_open(struct net_device *netdev)
2796 * ignore e1000e MSI messages, which means we need to test our MSI 3105 * ignore e1000e MSI messages, which means we need to test our MSI
2797 * interrupt now 3106 * interrupt now
2798 */ 3107 */
2799 { 3108 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
2800 err = e1000_test_msi(adapter); 3109 err = e1000_test_msi(adapter);
2801 if (err) { 3110 if (err) {
2802 e_err("Interrupt allocation failed\n"); 3111 e_err("Interrupt allocation failed\n");
@@ -2988,7 +3297,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2988 3297
2989 adapter->stats.algnerrc += er32(ALGNERRC); 3298 adapter->stats.algnerrc += er32(ALGNERRC);
2990 adapter->stats.rxerrc += er32(RXERRC); 3299 adapter->stats.rxerrc += er32(RXERRC);
2991 adapter->stats.tncrs += er32(TNCRS); 3300 if (hw->mac.type != e1000_82574)
3301 adapter->stats.tncrs += er32(TNCRS);
2992 adapter->stats.cexterr += er32(CEXTERR); 3302 adapter->stats.cexterr += er32(CEXTERR);
2993 adapter->stats.tsctc += er32(TSCTC); 3303 adapter->stats.tsctc += er32(TSCTC);
2994 adapter->stats.tsctfc += er32(TSCTFC); 3304 adapter->stats.tsctfc += er32(TSCTFC);
@@ -3201,6 +3511,27 @@ static void e1000_watchdog_task(struct work_struct *work)
3201 &adapter->link_duplex); 3511 &adapter->link_duplex);
3202 e1000_print_link_info(adapter); 3512 e1000_print_link_info(adapter);
3203 /* 3513 /*
3514 * On supported PHYs, check for duplex mismatch only
3515 * if link has autonegotiated at 10/100 half
3516 */
3517 if ((hw->phy.type == e1000_phy_igp_3 ||
3518 hw->phy.type == e1000_phy_bm) &&
3519 (hw->mac.autoneg == true) &&
3520 (adapter->link_speed == SPEED_10 ||
3521 adapter->link_speed == SPEED_100) &&
3522 (adapter->link_duplex == HALF_DUPLEX)) {
3523 u16 autoneg_exp;
3524
3525 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3526
3527 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3528 e_info("Autonegotiated half duplex but"
3529 " link partner cannot autoneg. "
3530 " Try forcing full duplex if "
3531 "link gets many collisions.\n");
3532 }
3533
3534 /*
3204 * tweak tx_queue_len according to speed/duplex 3535 * tweak tx_queue_len according to speed/duplex
3205 * and adjust the timeout factor 3536 * and adjust the timeout factor
3206 */ 3537 */
@@ -3315,7 +3646,10 @@ link_up:
3315 } 3646 }
3316 3647
3317 /* Cause software interrupt to ensure Rx ring is cleaned */ 3648 /* Cause software interrupt to ensure Rx ring is cleaned */
3318 ew32(ICS, E1000_ICS_RXDMT0); 3649 if (adapter->msix_entries)
3650 ew32(ICS, adapter->rx_ring->ims_val);
3651 else
3652 ew32(ICS, E1000_ICS_RXDMT0);
3319 3653
3320 /* Force detection of hung controller every watchdog period */ 3654 /* Force detection of hung controller every watchdog period */
3321 adapter->detect_tx_hung = 1; 3655 adapter->detect_tx_hung = 1;
@@ -4032,6 +4366,7 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4032 e1000e_down(adapter); 4366 e1000e_down(adapter);
4033 e1000_free_irq(adapter); 4367 e1000_free_irq(adapter);
4034 } 4368 }
4369 e1000e_reset_interrupt_capability(adapter);
4035 4370
4036 retval = pci_save_state(pdev); 4371 retval = pci_save_state(pdev);
4037 if (retval) 4372 if (retval)
@@ -4158,6 +4493,7 @@ static int e1000_resume(struct pci_dev *pdev)
4158 pci_enable_wake(pdev, PCI_D3hot, 0); 4493 pci_enable_wake(pdev, PCI_D3hot, 0);
4159 pci_enable_wake(pdev, PCI_D3cold, 0); 4494 pci_enable_wake(pdev, PCI_D3cold, 0);
4160 4495
4496 e1000e_set_interrupt_capability(adapter);
4161 if (netif_running(netdev)) { 4497 if (netif_running(netdev)) {
4162 err = e1000_request_irq(adapter); 4498 err = e1000_request_irq(adapter);
4163 if (err) 4499 if (err)
@@ -4467,6 +4803,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4467 4803
4468 adapter->bd_number = cards_found++; 4804 adapter->bd_number = cards_found++;
4469 4805
4806 e1000e_check_options(adapter);
4807
4470 /* setup adapter struct */ 4808 /* setup adapter struct */
4471 err = e1000_sw_init(adapter); 4809 err = e1000_sw_init(adapter);
4472 if (err) 4810 if (err)
@@ -4573,8 +4911,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4573 INIT_WORK(&adapter->reset_task, e1000_reset_task); 4911 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4574 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 4912 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4575 4913
4576 e1000e_check_options(adapter);
4577
4578 /* Initialize link parameters. User can change them with ethtool */ 4914 /* Initialize link parameters. User can change them with ethtool */
4579 adapter->hw.mac.autoneg = 1; 4915 adapter->hw.mac.autoneg = 1;
4580 adapter->fc_autoneg = 1; 4916 adapter->fc_autoneg = 1;
@@ -4704,6 +5040,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4704 if (!e1000_check_reset_block(&adapter->hw)) 5040 if (!e1000_check_reset_block(&adapter->hw))
4705 e1000_phy_hw_reset(&adapter->hw); 5041 e1000_phy_hw_reset(&adapter->hw);
4706 5042
5043 e1000e_reset_interrupt_capability(adapter);
4707 kfree(adapter->tx_ring); 5044 kfree(adapter->tx_ring);
4708 kfree(adapter->rx_ring); 5045 kfree(adapter->rx_ring);
4709 5046
@@ -4745,6 +5082,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
4745 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 5082 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4746 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 5083 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4747 5084
5085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5086
4748 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 5087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4749 board_80003es2lan }, 5088 board_80003es2lan },
4750 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 5089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4767,6 +5106,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4767 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 5106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4768 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 5107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4769 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 5108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
4770 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 5110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
4771 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 5111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
4772 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 5112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
@@ -4775,6 +5115,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
4775 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 5115 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
4776 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 5116 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
4777 5117
5118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5119 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5120
4778 { } /* terminate list */ 5121 { } /* terminate list */
4779}; 5122};
4780MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5123MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index ed912e023a72..f46db6cda487 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -114,6 +114,15 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
114#define DEFAULT_ITR 3 114#define DEFAULT_ITR 3
115#define MAX_ITR 100000 115#define MAX_ITR 100000
116#define MIN_ITR 100 116#define MIN_ITR 100
117/* IntMode (Interrupt Mode)
118 *
119 * Valid Range: 0 - 2
120 *
121 * Default Value: 2 (MSI-X)
122 */
123E1000_PARAM(IntMode, "Interrupt Mode");
124#define MAX_INTMODE 2
125#define MIN_INTMODE 0
117 126
118/* 127/*
119 * Enable Smart Power Down of the PHY 128 * Enable Smart Power Down of the PHY
@@ -352,6 +361,24 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
352 adapter->itr = 20000; 361 adapter->itr = 20000;
353 } 362 }
354 } 363 }
364 { /* Interrupt Mode */
365 struct e1000_option opt = {
366 .type = range_option,
367 .name = "Interrupt Mode",
368 .err = "defaulting to 2 (MSI-X)",
369 .def = E1000E_INT_MODE_MSIX,
370 .arg = { .r = { .min = MIN_INTMODE,
371 .max = MAX_INTMODE } }
372 };
373
374 if (num_IntMode > bd) {
375 unsigned int int_mode = IntMode[bd];
376 e1000_validate_option(&int_mode, &opt, adapter);
377 adapter->int_mode = int_mode;
378 } else {
379 adapter->int_mode = opt.def;
380 }
381 }
355 { /* Smart Power Down */ 382 { /* Smart Power Down */
356 const struct e1000_option opt = { 383 const struct e1000_option opt = {
357 .type = enable_option, 384 .type = enable_option,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b133dcf0e950..6cd333ae61d0 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -476,7 +476,9 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
476 if (ret_val) 476 if (ret_val)
477 return ret_val; 477 return ret_val;
478 478
479 if ((phy->type == e1000_phy_m88) && (phy->revision < 4)) { 479 if ((phy->type == e1000_phy_m88) &&
480 (phy->revision < E1000_REVISION_4) &&
481 (phy->id != BME1000_E_PHY_ID_R2)) {
480 /* 482 /*
481 * Force TX_CLK in the Extended PHY Specific Control Register 483 * Force TX_CLK in the Extended PHY Specific Control Register
482 * to 25MHz clock. 484 * to 25MHz clock.
@@ -504,6 +506,18 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
504 return ret_val; 506 return ret_val;
505 } 507 }
506 508
509 if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
510 /* Set PHY page 0, register 29 to 0x0003 */
511 ret_val = e1e_wphy(hw, 29, 0x0003);
512 if (ret_val)
513 return ret_val;
514
515 /* Set PHY page 0, register 30 to 0x0000 */
516 ret_val = e1e_wphy(hw, 30, 0x0000);
517 if (ret_val)
518 return ret_val;
519 }
520
507 /* Commit the changes. */ 521 /* Commit the changes. */
508 ret_val = e1000e_commit_phy(hw); 522 ret_val = e1000e_commit_phy(hw);
509 if (ret_val) 523 if (ret_val)
@@ -1720,6 +1734,91 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw)
1720 return 0; 1734 return 0;
1721} 1735}
1722 1736
1737/**
1738 * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
1739 * @hw: pointer to the HW structure
1740 *
1741 * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
1742 **/
1743s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
1744{
1745 hw_dbg(hw, "Running IGP 3 PHY init script\n");
1746
1747 /* PHY init IGP 3 */
1748 /* Enable rise/fall, 10-mode work in class-A */
1749 e1e_wphy(hw, 0x2F5B, 0x9018);
1750 /* Remove all caps from Replica path filter */
1751 e1e_wphy(hw, 0x2F52, 0x0000);
1752 /* Bias trimming for ADC, AFE and Driver (Default) */
1753 e1e_wphy(hw, 0x2FB1, 0x8B24);
1754 /* Increase Hybrid poly bias */
1755 e1e_wphy(hw, 0x2FB2, 0xF8F0);
1756 /* Add 4% to Tx amplitude in Gig mode */
1757 e1e_wphy(hw, 0x2010, 0x10B0);
1758 /* Disable trimming (TTT) */
1759 e1e_wphy(hw, 0x2011, 0x0000);
1760 /* Poly DC correction to 94.6% + 2% for all channels */
1761 e1e_wphy(hw, 0x20DD, 0x249A);
1762 /* ABS DC correction to 95.9% */
1763 e1e_wphy(hw, 0x20DE, 0x00D3);
1764 /* BG temp curve trim */
1765 e1e_wphy(hw, 0x28B4, 0x04CE);
1766 /* Increasing ADC OPAMP stage 1 currents to max */
1767 e1e_wphy(hw, 0x2F70, 0x29E4);
1768 /* Force 1000 ( required for enabling PHY regs configuration) */
1769 e1e_wphy(hw, 0x0000, 0x0140);
1770 /* Set upd_freq to 6 */
1771 e1e_wphy(hw, 0x1F30, 0x1606);
1772 /* Disable NPDFE */
1773 e1e_wphy(hw, 0x1F31, 0xB814);
1774 /* Disable adaptive fixed FFE (Default) */
1775 e1e_wphy(hw, 0x1F35, 0x002A);
1776 /* Enable FFE hysteresis */
1777 e1e_wphy(hw, 0x1F3E, 0x0067);
1778 /* Fixed FFE for short cable lengths */
1779 e1e_wphy(hw, 0x1F54, 0x0065);
1780 /* Fixed FFE for medium cable lengths */
1781 e1e_wphy(hw, 0x1F55, 0x002A);
1782 /* Fixed FFE for long cable lengths */
1783 e1e_wphy(hw, 0x1F56, 0x002A);
1784 /* Enable Adaptive Clip Threshold */
1785 e1e_wphy(hw, 0x1F72, 0x3FB0);
1786 /* AHT reset limit to 1 */
1787 e1e_wphy(hw, 0x1F76, 0xC0FF);
1788 /* Set AHT master delay to 127 msec */
1789 e1e_wphy(hw, 0x1F77, 0x1DEC);
1790 /* Set scan bits for AHT */
1791 e1e_wphy(hw, 0x1F78, 0xF9EF);
1792 /* Set AHT Preset bits */
1793 e1e_wphy(hw, 0x1F79, 0x0210);
1794 /* Change integ_factor of channel A to 3 */
1795 e1e_wphy(hw, 0x1895, 0x0003);
1796 /* Change prop_factor of channels BCD to 8 */
1797 e1e_wphy(hw, 0x1796, 0x0008);
1798 /* Change cg_icount + enable integbp for channels BCD */
1799 e1e_wphy(hw, 0x1798, 0xD008);
1800 /*
1801 * Change cg_icount + enable integbp + change prop_factor_master
1802 * to 8 for channel A
1803 */
1804 e1e_wphy(hw, 0x1898, 0xD918);
1805 /* Disable AHT in Slave mode on channel A */
1806 e1e_wphy(hw, 0x187A, 0x0800);
1807 /*
1808 * Enable LPLU and disable AN to 1000 in non-D0a states,
1809 * Enable SPD+B2B
1810 */
1811 e1e_wphy(hw, 0x0019, 0x008D);
1812 /* Enable restart AN on an1000_dis change */
1813 e1e_wphy(hw, 0x001B, 0x2080);
1814 /* Enable wh_fifo read clock in 10/100 modes */
1815 e1e_wphy(hw, 0x0014, 0x0045);
1816 /* Restart AN, Speed selection is 1000 */
1817 e1e_wphy(hw, 0x0000, 0x1340);
1818
1819 return 0;
1820}
1821
1723/* Internal function pointers */ 1822/* Internal function pointers */
1724 1823
1725/** 1824/**
@@ -1969,6 +2068,99 @@ out:
1969} 2068}
1970 2069
1971/** 2070/**
2071 * e1000e_read_phy_reg_bm2 - Read BM PHY register
2072 * @hw: pointer to the HW structure
2073 * @offset: register offset to be read
2074 * @data: pointer to the read data
2075 *
2076 * Acquires semaphore, if necessary, then reads the PHY register at offset
2077 * and storing the retrieved information in data. Release any acquired
2078 * semaphores before exiting.
2079 **/
2080s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2081{
2082 s32 ret_val;
2083 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2084
2085 /* Page 800 works differently than the rest so it has its own func */
2086 if (page == BM_WUC_PAGE) {
2087 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2088 true);
2089 return ret_val;
2090 }
2091
2092 ret_val = hw->phy.ops.acquire_phy(hw);
2093 if (ret_val)
2094 return ret_val;
2095
2096 hw->phy.addr = 1;
2097
2098 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2099
2100 /* Page is shifted left, PHY expects (page x 32) */
2101 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2102 page);
2103
2104 if (ret_val) {
2105 hw->phy.ops.release_phy(hw);
2106 return ret_val;
2107 }
2108 }
2109
2110 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2111 data);
2112 hw->phy.ops.release_phy(hw);
2113
2114 return ret_val;
2115}
2116
2117/**
2118 * e1000e_write_phy_reg_bm2 - Write BM PHY register
2119 * @hw: pointer to the HW structure
2120 * @offset: register offset to write to
2121 * @data: data to write at register offset
2122 *
2123 * Acquires semaphore, if necessary, then writes the data to PHY register
2124 * at the offset. Release any acquired semaphores before exiting.
2125 **/
2126s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2127{
2128 s32 ret_val;
2129 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2130
2131 /* Page 800 works differently than the rest so it has its own func */
2132 if (page == BM_WUC_PAGE) {
2133 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2134 false);
2135 return ret_val;
2136 }
2137
2138 ret_val = hw->phy.ops.acquire_phy(hw);
2139 if (ret_val)
2140 return ret_val;
2141
2142 hw->phy.addr = 1;
2143
2144 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2145 /* Page is shifted left, PHY expects (page x 32) */
2146 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2147 page);
2148
2149 if (ret_val) {
2150 hw->phy.ops.release_phy(hw);
2151 return ret_val;
2152 }
2153 }
2154
2155 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2156 data);
2157
2158 hw->phy.ops.release_phy(hw);
2159
2160 return ret_val;
2161}
2162
2163/**
1972 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register 2164 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
1973 * @hw: pointer to the HW structure 2165 * @hw: pointer to the HW structure
1974 * @offset: register offset to be read or written 2166 * @offset: register offset to be read or written
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index e01926b7b5b7..5524271eedca 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,13 +40,13 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0092" 43#define DRV_VERSION "EHEA_0093"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
47#define DLPAR_MEM_ADD 2 47#define DLPAR_MEM_ADD 2
48#define DLPAR_MEM_REM 4 48#define DLPAR_MEM_REM 4
49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) 49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50 50
51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b70c5314f537..c765ec609462 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -219,9 +219,11 @@ static void ehea_update_firmware_handles(void)
219 } 219 }
220 220
221out_update: 221out_update:
222 mutex_lock(&ehea_fw_handles.lock);
222 kfree(ehea_fw_handles.arr); 223 kfree(ehea_fw_handles.arr);
223 ehea_fw_handles.arr = arr; 224 ehea_fw_handles.arr = arr;
224 ehea_fw_handles.num_entries = i; 225 ehea_fw_handles.num_entries = i;
226 mutex_unlock(&ehea_fw_handles.lock);
225} 227}
226 228
227static void ehea_update_bcmc_registrations(void) 229static void ehea_update_bcmc_registrations(void)
@@ -293,9 +295,11 @@ static void ehea_update_bcmc_registrations(void)
293 } 295 }
294 296
295out_update: 297out_update:
298 spin_lock(&ehea_bcmc_regs.lock);
296 kfree(ehea_bcmc_regs.arr); 299 kfree(ehea_bcmc_regs.arr);
297 ehea_bcmc_regs.arr = arr; 300 ehea_bcmc_regs.arr = arr;
298 ehea_bcmc_regs.num_entries = i; 301 ehea_bcmc_regs.num_entries = i;
302 spin_unlock(&ehea_bcmc_regs.lock);
299} 303}
300 304
301static struct net_device_stats *ehea_get_stats(struct net_device *dev) 305static struct net_device_stats *ehea_get_stats(struct net_device *dev)
@@ -1770,8 +1774,6 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1770 1774
1771 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1775 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1772 1776
1773 spin_lock(&ehea_bcmc_regs.lock);
1774
1775 /* Deregister old MAC in pHYP */ 1777 /* Deregister old MAC in pHYP */
1776 if (port->state == EHEA_PORT_UP) { 1778 if (port->state == EHEA_PORT_UP) {
1777 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1779 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@@ -1792,7 +1794,6 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1792 1794
1793out_upregs: 1795out_upregs:
1794 ehea_update_bcmc_registrations(); 1796 ehea_update_bcmc_registrations();
1795 spin_unlock(&ehea_bcmc_regs.lock);
1796out_free: 1797out_free:
1797 kfree(cb0); 1798 kfree(cb0);
1798out: 1799out:
@@ -1954,8 +1955,6 @@ static void ehea_set_multicast_list(struct net_device *dev)
1954 } 1955 }
1955 ehea_promiscuous(dev, 0); 1956 ehea_promiscuous(dev, 0);
1956 1957
1957 spin_lock(&ehea_bcmc_regs.lock);
1958
1959 if (dev->flags & IFF_ALLMULTI) { 1958 if (dev->flags & IFF_ALLMULTI) {
1960 ehea_allmulti(dev, 1); 1959 ehea_allmulti(dev, 1);
1961 goto out; 1960 goto out;
@@ -1985,7 +1984,6 @@ static void ehea_set_multicast_list(struct net_device *dev)
1985 } 1984 }
1986out: 1985out:
1987 ehea_update_bcmc_registrations(); 1986 ehea_update_bcmc_registrations();
1988 spin_unlock(&ehea_bcmc_regs.lock);
1989 return; 1987 return;
1990} 1988}
1991 1989
@@ -2466,8 +2464,6 @@ static int ehea_up(struct net_device *dev)
2466 if (port->state == EHEA_PORT_UP) 2464 if (port->state == EHEA_PORT_UP)
2467 return 0; 2465 return 0;
2468 2466
2469 mutex_lock(&ehea_fw_handles.lock);
2470
2471 ret = ehea_port_res_setup(port, port->num_def_qps, 2467 ret = ehea_port_res_setup(port, port->num_def_qps,
2472 port->num_add_tx_qps); 2468 port->num_add_tx_qps);
2473 if (ret) { 2469 if (ret) {
@@ -2504,8 +2500,6 @@ static int ehea_up(struct net_device *dev)
2504 } 2500 }
2505 } 2501 }
2506 2502
2507 spin_lock(&ehea_bcmc_regs.lock);
2508
2509 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2503 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2510 if (ret) { 2504 if (ret) {
2511 ret = -EIO; 2505 ret = -EIO;
@@ -2527,10 +2521,8 @@ out:
2527 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2521 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2528 2522
2529 ehea_update_bcmc_registrations(); 2523 ehea_update_bcmc_registrations();
2530 spin_unlock(&ehea_bcmc_regs.lock);
2531 2524
2532 ehea_update_firmware_handles(); 2525 ehea_update_firmware_handles();
2533 mutex_unlock(&ehea_fw_handles.lock);
2534 2526
2535 return ret; 2527 return ret;
2536} 2528}
@@ -2580,9 +2572,6 @@ static int ehea_down(struct net_device *dev)
2580 if (port->state == EHEA_PORT_DOWN) 2572 if (port->state == EHEA_PORT_DOWN)
2581 return 0; 2573 return 0;
2582 2574
2583 mutex_lock(&ehea_fw_handles.lock);
2584
2585 spin_lock(&ehea_bcmc_regs.lock);
2586 ehea_drop_multicast_list(dev); 2575 ehea_drop_multicast_list(dev);
2587 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2576 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2588 2577
@@ -2591,7 +2580,6 @@ static int ehea_down(struct net_device *dev)
2591 port->state = EHEA_PORT_DOWN; 2580 port->state = EHEA_PORT_DOWN;
2592 2581
2593 ehea_update_bcmc_registrations(); 2582 ehea_update_bcmc_registrations();
2594 spin_unlock(&ehea_bcmc_regs.lock);
2595 2583
2596 ret = ehea_clean_all_portres(port); 2584 ret = ehea_clean_all_portres(port);
2597 if (ret) 2585 if (ret)
@@ -2599,7 +2587,6 @@ static int ehea_down(struct net_device *dev)
2599 dev->name, ret); 2587 dev->name, ret);
2600 2588
2601 ehea_update_firmware_handles(); 2589 ehea_update_firmware_handles();
2602 mutex_unlock(&ehea_fw_handles.lock);
2603 2590
2604 return ret; 2591 return ret;
2605} 2592}
@@ -3378,7 +3365,6 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3378 ehea_error("Invalid ibmebus device probed"); 3365 ehea_error("Invalid ibmebus device probed");
3379 return -EINVAL; 3366 return -EINVAL;
3380 } 3367 }
3381 mutex_lock(&ehea_fw_handles.lock);
3382 3368
3383 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3369 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3384 if (!adapter) { 3370 if (!adapter) {
@@ -3462,7 +3448,6 @@ out_free_ad:
3462 3448
3463out: 3449out:
3464 ehea_update_firmware_handles(); 3450 ehea_update_firmware_handles();
3465 mutex_unlock(&ehea_fw_handles.lock);
3466 return ret; 3451 return ret;
3467} 3452}
3468 3453
@@ -3481,8 +3466,6 @@ static int __devexit ehea_remove(struct of_device *dev)
3481 3466
3482 flush_scheduled_work(); 3467 flush_scheduled_work();
3483 3468
3484 mutex_lock(&ehea_fw_handles.lock);
3485
3486 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3469 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3487 tasklet_kill(&adapter->neq_tasklet); 3470 tasklet_kill(&adapter->neq_tasklet);
3488 3471
@@ -3492,7 +3475,6 @@ static int __devexit ehea_remove(struct of_device *dev)
3492 kfree(adapter); 3475 kfree(adapter);
3493 3476
3494 ehea_update_firmware_handles(); 3477 ehea_update_firmware_handles();
3495 mutex_unlock(&ehea_fw_handles.lock);
3496 3478
3497 return 0; 3479 return 0;
3498} 3480}
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 156eb6320b4e..2a33a613d9e6 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -535,7 +535,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
535 cb_logaddr, /* R5 */ 535 cb_logaddr, /* R5 */
536 0, 0, 0, 0, 0); /* R6-R10 */ 536 0, 0, 0, 0, 0); /* R6-R10 */
537#ifdef DEBUG 537#ifdef DEBUG
538 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 538 ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539#endif 539#endif
540 return hret; 540 return hret;
541} 541}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 140f05baafd8..db8a9257e680 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -595,7 +595,8 @@ static int ehea_create_busmap_callback(unsigned long pfn,
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg; 596 mr_len = *(unsigned long *)arg;
597 597
598 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 598 if (!ehea_bmap)
599 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
599 if (!ehea_bmap) 600 if (!ehea_bmap)
600 return -ENOMEM; 601 return -ENOMEM;
601 602
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index aa0bf6e1c694..e1b441effbbe 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -110,7 +110,7 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
110 } 110 }
111 if (ret && netif_msg_drv(priv)) 111 if (ret && netif_msg_drv(priv))
112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
113 __FUNCTION__, ret); 113 __func__, ret);
114 114
115 return ret; 115 return ret;
116} 116}
@@ -131,7 +131,7 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); 131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
132 if (ret && netif_msg_drv(priv)) 132 if (ret && netif_msg_drv(priv))
133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
134 __FUNCTION__, ret); 134 __func__, ret);
135 } 135 }
136 return ret; 136 return ret;
137} 137}
@@ -156,7 +156,7 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); 156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
157 if (ret) 157 if (ret)
158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
159 __FUNCTION__, ret); 159 __func__, ret);
160 else 160 else
161 val = rx_buf[slen - 1]; 161 val = rx_buf[slen - 1];
162 162
@@ -176,14 +176,14 @@ static int spi_write_op(struct enc28j60_net *priv, u8 op,
176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); 176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
177 if (ret && netif_msg_drv(priv)) 177 if (ret && netif_msg_drv(priv))
178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
179 __FUNCTION__, ret); 179 __func__, ret);
180 return ret; 180 return ret;
181} 181}
182 182
183static void enc28j60_soft_reset(struct enc28j60_net *priv) 183static void enc28j60_soft_reset(struct enc28j60_net *priv)
184{ 184{
185 if (netif_msg_hw(priv)) 185 if (netif_msg_hw(priv))
186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
187 187
188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); 188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
189 /* Errata workaround #1, CLKRDY check is unreliable, 189 /* Errata workaround #1, CLKRDY check is unreliable,
@@ -357,7 +357,7 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
357 reg = nolock_regw_read(priv, ERDPTL); 357 reg = nolock_regw_read(priv, ERDPTL);
358 if (reg != addr) 358 if (reg != addr)
359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " 359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
360 "(0x%04x - 0x%04x)\n", __FUNCTION__, reg, addr); 360 "(0x%04x - 0x%04x)\n", __func__, reg, addr);
361 } 361 }
362#endif 362#endif
363 spi_read_buf(priv, len, data); 363 spi_read_buf(priv, len, data);
@@ -380,7 +380,7 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
380 if (reg != TXSTART_INIT) 380 if (reg != TXSTART_INIT)
381 printk(KERN_DEBUG DRV_NAME 381 printk(KERN_DEBUG DRV_NAME
382 ": %s() ERWPT:0x%04x != 0x%04x\n", 382 ": %s() ERWPT:0x%04x != 0x%04x\n",
383 __FUNCTION__, reg, TXSTART_INIT); 383 __func__, reg, TXSTART_INIT);
384 } 384 }
385#endif 385#endif
386 /* Set the TXND pointer to correspond to the packet size given */ 386 /* Set the TXND pointer to correspond to the packet size given */
@@ -390,13 +390,13 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
390 if (netif_msg_hw(priv)) 390 if (netif_msg_hw(priv))
391 printk(KERN_DEBUG DRV_NAME 391 printk(KERN_DEBUG DRV_NAME
392 ": %s() after control byte ERWPT:0x%04x\n", 392 ": %s() after control byte ERWPT:0x%04x\n",
393 __FUNCTION__, nolock_regw_read(priv, EWRPTL)); 393 __func__, nolock_regw_read(priv, EWRPTL));
394 /* copy the packet into the transmit buffer */ 394 /* copy the packet into the transmit buffer */
395 spi_write_buf(priv, len, data); 395 spi_write_buf(priv, len, data);
396 if (netif_msg_hw(priv)) 396 if (netif_msg_hw(priv))
397 printk(KERN_DEBUG DRV_NAME 397 printk(KERN_DEBUG DRV_NAME
398 ": %s() after write packet ERWPT:0x%04x, len=%d\n", 398 ": %s() after write packet ERWPT:0x%04x, len=%d\n",
399 __FUNCTION__, nolock_regw_read(priv, EWRPTL), len); 399 __func__, nolock_regw_read(priv, EWRPTL), len);
400 mutex_unlock(&priv->lock); 400 mutex_unlock(&priv->lock);
401} 401}
402 402
@@ -495,7 +495,7 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
495 if (netif_msg_drv(priv)) 495 if (netif_msg_drv(priv))
496 printk(KERN_DEBUG DRV_NAME 496 printk(KERN_DEBUG DRV_NAME
497 ": %s() Hardware must be disabled to set " 497 ": %s() Hardware must be disabled to set "
498 "Mac address\n", __FUNCTION__); 498 "Mac address\n", __func__);
499 ret = -EBUSY; 499 ret = -EBUSY;
500 } 500 }
501 mutex_unlock(&priv->lock); 501 mutex_unlock(&priv->lock);
@@ -575,7 +575,7 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
575 if (start > 0x1FFF || end > 0x1FFF || start > end) { 575 if (start > 0x1FFF || end > 0x1FFF || start > end) {
576 if (netif_msg_drv(priv)) 576 if (netif_msg_drv(priv))
577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " 577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
578 "bad parameters!\n", __FUNCTION__, start, end); 578 "bad parameters!\n", __func__, start, end);
579 return; 579 return;
580 } 580 }
581 /* set receive buffer start + end */ 581 /* set receive buffer start + end */
@@ -591,7 +591,7 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
591 if (start > 0x1FFF || end > 0x1FFF || start > end) { 591 if (start > 0x1FFF || end > 0x1FFF || start > end) {
592 if (netif_msg_drv(priv)) 592 if (netif_msg_drv(priv))
593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " 593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
594 "bad parameters!\n", __FUNCTION__, start, end); 594 "bad parameters!\n", __func__, start, end);
595 return; 595 return;
596 } 596 }
597 /* set transmit buffer start + end */ 597 /* set transmit buffer start + end */
@@ -630,7 +630,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
630 u8 reg; 630 u8 reg;
631 631
632 if (netif_msg_drv(priv)) 632 if (netif_msg_drv(priv))
633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __FUNCTION__, 633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
634 priv->full_duplex ? "FullDuplex" : "HalfDuplex"); 634 priv->full_duplex ? "FullDuplex" : "HalfDuplex");
635 635
636 mutex_lock(&priv->lock); 636 mutex_lock(&priv->lock);
@@ -661,7 +661,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
661 if (reg == 0x00 || reg == 0xff) { 661 if (reg == 0x00 || reg == 0xff) {
662 if (netif_msg_drv(priv)) 662 if (netif_msg_drv(priv))
663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", 663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
664 __FUNCTION__, reg); 664 __func__, reg);
665 return 0; 665 return 0;
666 } 666 }
667 667
@@ -724,7 +724,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
724 /* enable interrupts */ 724 /* enable interrupts */
725 if (netif_msg_hw(priv)) 725 if (netif_msg_hw(priv))
726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", 726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
727 __FUNCTION__); 727 __func__);
728 728
729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); 729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
730 730
@@ -888,7 +888,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
888 if (netif_msg_rx_err(priv)) 888 if (netif_msg_rx_err(priv))
889 dev_err(&ndev->dev, 889 dev_err(&ndev->dev,
890 "%s() Invalid packet address!! 0x%04x\n", 890 "%s() Invalid packet address!! 0x%04x\n",
891 __FUNCTION__, priv->next_pk_ptr); 891 __func__, priv->next_pk_ptr);
892 /* packet address corrupted: reset RX logic */ 892 /* packet address corrupted: reset RX logic */
893 mutex_lock(&priv->lock); 893 mutex_lock(&priv->lock);
894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); 894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -917,7 +917,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
917 rxstat |= rsv[4]; 917 rxstat |= rsv[4];
918 918
919 if (netif_msg_rx_status(priv)) 919 if (netif_msg_rx_status(priv))
920 enc28j60_dump_rsv(priv, __FUNCTION__, next_packet, len, rxstat); 920 enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
921 921
922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) { 922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) {
923 if (netif_msg_rx_err(priv)) 923 if (netif_msg_rx_err(priv))
@@ -941,7 +941,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
942 len, skb_put(skb, len)); 942 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 943 if (netif_msg_pktdata(priv))
944 dump_packet(__FUNCTION__, skb->len, skb->data); 944 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 945 skb->protocol = eth_type_trans(skb, ndev);
946 /* update statistics */ 946 /* update statistics */
947 ndev->stats.rx_packets++; 947 ndev->stats.rx_packets++;
@@ -958,7 +958,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); 958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
959 if (netif_msg_hw(priv)) 959 if (netif_msg_hw(priv))
960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", 960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
961 __FUNCTION__, erxrdpt); 961 __func__, erxrdpt);
962 962
963 mutex_lock(&priv->lock); 963 mutex_lock(&priv->lock);
964 nolock_regw_write(priv, ERXRDPTL, erxrdpt); 964 nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -968,7 +968,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
968 reg = nolock_regw_read(priv, ERXRDPTL); 968 reg = nolock_regw_read(priv, ERXRDPTL);
969 if (reg != erxrdpt) 969 if (reg != erxrdpt)
970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " 970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
971 "error (0x%04x - 0x%04x)\n", __FUNCTION__, 971 "error (0x%04x - 0x%04x)\n", __func__,
972 reg, erxrdpt); 972 reg, erxrdpt);
973 } 973 }
974#endif 974#endif
@@ -1006,7 +1006,7 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
1006 mutex_unlock(&priv->lock); 1006 mutex_unlock(&priv->lock);
1007 if (netif_msg_rx_status(priv)) 1007 if (netif_msg_rx_status(priv))
1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", 1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
1009 __FUNCTION__, free_space); 1009 __func__, free_space);
1010 return free_space; 1010 return free_space;
1011} 1011}
1012 1012
@@ -1022,7 +1022,7 @@ static void enc28j60_check_link_status(struct net_device *ndev)
1022 reg = enc28j60_phy_read(priv, PHSTAT2); 1022 reg = enc28j60_phy_read(priv, PHSTAT2);
1023 if (netif_msg_hw(priv)) 1023 if (netif_msg_hw(priv))
1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " 1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
1025 "PHSTAT2: %04x\n", __FUNCTION__, 1025 "PHSTAT2: %04x\n", __func__,
1026 enc28j60_phy_read(priv, PHSTAT1), reg); 1026 enc28j60_phy_read(priv, PHSTAT1), reg);
1027 duplex = reg & PHSTAT2_DPXSTAT; 1027 duplex = reg & PHSTAT2_DPXSTAT;
1028 1028
@@ -1095,7 +1095,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1095 int intflags, loop; 1095 int intflags, loop;
1096 1096
1097 if (netif_msg_intr(priv)) 1097 if (netif_msg_intr(priv))
1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1099 /* disable further interrupts */ 1099 /* disable further interrupts */
1100 locked_reg_bfclr(priv, EIE, EIE_INTIE); 1100 locked_reg_bfclr(priv, EIE, EIE_INTIE);
1101 1101
@@ -1198,7 +1198,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1198 /* re-enable interrupts */ 1198 /* re-enable interrupts */
1199 locked_reg_bfset(priv, EIE, EIE_INTIE); 1199 locked_reg_bfset(priv, EIE, EIE_INTIE);
1200 if (netif_msg_intr(priv)) 1200 if (netif_msg_intr(priv))
1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__); 1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
1202} 1202}
1203 1203
1204/* 1204/*
@@ -1213,7 +1213,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
1213 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1213 ": Tx Packet Len:%d\n", priv->tx_skb->len);
1214 1214
1215 if (netif_msg_pktdata(priv)) 1215 if (netif_msg_pktdata(priv))
1216 dump_packet(__FUNCTION__, 1216 dump_packet(__func__,
1217 priv->tx_skb->len, priv->tx_skb->data); 1217 priv->tx_skb->len, priv->tx_skb->data);
1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); 1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
1219 1219
@@ -1254,7 +1254,7 @@ static int enc28j60_send_packet(struct sk_buff *skb, struct net_device *dev)
1254 struct enc28j60_net *priv = netdev_priv(dev); 1254 struct enc28j60_net *priv = netdev_priv(dev);
1255 1255
1256 if (netif_msg_tx_queued(priv)) 1256 if (netif_msg_tx_queued(priv))
1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1258 1258
1259 /* If some error occurs while trying to transmit this 1259 /* If some error occurs while trying to transmit this
1260 * packet, you should return '1' from this function. 1260 * packet, you should return '1' from this function.
@@ -1325,7 +1325,7 @@ static int enc28j60_net_open(struct net_device *dev)
1325 struct enc28j60_net *priv = netdev_priv(dev); 1325 struct enc28j60_net *priv = netdev_priv(dev);
1326 1326
1327 if (netif_msg_drv(priv)) 1327 if (netif_msg_drv(priv))
1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1329 1329
1330 if (!is_valid_ether_addr(dev->dev_addr)) { 1330 if (!is_valid_ether_addr(dev->dev_addr)) {
1331 if (netif_msg_ifup(priv)) { 1331 if (netif_msg_ifup(priv)) {
@@ -1363,7 +1363,7 @@ static int enc28j60_net_close(struct net_device *dev)
1363 struct enc28j60_net *priv = netdev_priv(dev); 1363 struct enc28j60_net *priv = netdev_priv(dev);
1364 1364
1365 if (netif_msg_drv(priv)) 1365 if (netif_msg_drv(priv))
1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1367 1367
1368 enc28j60_hw_disable(priv); 1368 enc28j60_hw_disable(priv);
1369 enc28j60_lowpower(priv, true); 1369 enc28j60_lowpower(priv, true);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
new file mode 100644
index 000000000000..391c3bce5b79
--- /dev/null
+++ b/drivers/net/enic/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o
2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o
5
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
new file mode 100644
index 000000000000..c036a8bfd043
--- /dev/null
+++ b/drivers/net/enic/cq_desc.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_DESC_H_
21#define _CQ_DESC_H_
22
23/*
24 * Completion queue descriptor types
25 */
26enum cq_desc_types {
27 CQ_DESC_TYPE_WQ_ENET = 0,
28 CQ_DESC_TYPE_DESC_COPY = 1,
29 CQ_DESC_TYPE_WQ_EXCH = 2,
30 CQ_DESC_TYPE_RQ_ENET = 3,
31 CQ_DESC_TYPE_RQ_FCP = 4,
32};
33
34/* Completion queue descriptor: 16B
35 *
36 * All completion queues have this basic layout. The
37 * type_specfic area is unique for each completion
38 * queue type.
39 */
40struct cq_desc {
41 __le16 completed_index;
42 __le16 q_number;
43 u8 type_specfic[11];
44 u8 type_color;
45};
46
47#define CQ_DESC_TYPE_BITS 7
48#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
49#define CQ_DESC_COLOR_MASK 1
50#define CQ_DESC_Q_NUM_BITS 10
51#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
52#define CQ_DESC_COMP_NDX_BITS 12
53#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
54
55static inline void cq_desc_dec(const struct cq_desc *desc_arg,
56 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
57{
58 const struct cq_desc *desc = desc_arg;
59 const u8 type_color = desc->type_color;
60
61 *color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
62
63 /*
64 * Make sure color bit is read from desc *before* other fields
65 * are read from desc. Hardware guarantees color bit is last
66 * bit (byte) written. Adding the rmb() prevents the compiler
67 * and/or CPU from reordering the reads which would potentially
68 * result in reading stale values.
69 */
70
71 rmb();
72
73 *type = type_color & CQ_DESC_TYPE_MASK;
74 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
75 *completed_index = le16_to_cpu(desc->completed_index) &
76 CQ_DESC_COMP_NDX_MASK;
77}
78
79#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..03dce9ed612c
--- /dev/null
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_ENET_DESC_H_
21#define _CQ_ENET_DESC_H_
22
23#include "cq_desc.h"
24
25/* Ethernet completion queue descriptor: 16B */
26struct cq_enet_wq_desc {
27 __le16 completed_index;
28 __le16 q_number;
29 u8 reserved[11];
30 u8 type_color;
31};
32
33static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
34 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
35{
36 cq_desc_dec((struct cq_desc *)desc, type,
37 color, q_number, completed_index);
38}
39
40/* Completion queue descriptor: Ethernet receive queue, 16B */
41struct cq_enet_rq_desc {
42 __le16 completed_index_flags;
43 __le16 q_number_rss_type_flags;
44 __le32 rss_hash;
45 __le16 bytes_written_flags;
46 __le16 vlan;
47 __le16 checksum_fcoe;
48 u8 flags;
49 u8 type_color;
50};
51
52#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
53#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
54#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
55#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
56
57#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
58#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
59 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
60#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
65#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
66#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
67
68#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
69
70#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
71#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
72 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
73#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
74#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
75
76#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
77#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
78 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
79#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
80#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
81 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
82#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
83
84#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
85#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
86#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
87#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
88#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
91#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
92#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
93#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
94
95static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
96 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
97 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
98 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
99 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
100 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
103{
104 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
105 u16 q_number_rss_type_flags =
106 le16_to_cpu(desc->q_number_rss_type_flags);
107 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
108
109 cq_desc_dec((struct cq_desc *)desc, type,
110 color, q_number, completed_index);
111
112 *ingress_port = (completed_index_flags &
113 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
114 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
115 1 : 0;
116 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
117 1 : 0;
118 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
119 1 : 0;
120
121 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
122 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
123 *csum_not_calc = (q_number_rss_type_flags &
124 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
125
126 *rss_hash = le32_to_cpu(desc->rss_hash);
127
128 *bytes_written = bytes_written_flags &
129 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
130 *packet_error = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
132 *vlan_stripped = (bytes_written_flags &
133 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
134
135 *vlan = le16_to_cpu(desc->vlan);
136
137 if (*fcoe) {
138 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
139 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
140 *fcoe_fc_crc_ok = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
142 *fcoe_enc_error = (desc->flags &
143 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
144 *fcoe_eof = (u8)((desc->checksum_fcoe >>
145 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
146 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
147 *checksum = 0;
148 } else {
149 *fcoe_sof = 0;
150 *fcoe_fc_crc_ok = 0;
151 *fcoe_enc_error = 0;
152 *fcoe_eof = 0;
153 *checksum = le16_to_cpu(desc->checksum_fcoe);
154 }
155
156 *tcp_udp_csum_ok =
157 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
158 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
159 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
160 *ipv4_csum_ok =
161 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
162 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
163 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
164 *ipv4_fragment =
165 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
166 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
167}
168
169#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
new file mode 100644
index 000000000000..fb83c926da58
--- /dev/null
+++ b/drivers/net/enic/enic.h
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_H_
21#define _ENIC_H_
22
23#include <linux/inet_lro.h>
24
25#include "vnic_enet.h"
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28#include "vnic_rq.h"
29#include "vnic_cq.h"
30#include "vnic_intr.h"
31#include "vnic_stats.h"
32#include "vnic_rss.h"
33
34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
36#define DRV_VERSION "0.0.1.18163.472"
37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
38#define PFX DRV_NAME ": "
39
40#define ENIC_LRO_MAX_DESC 8
41#define ENIC_LRO_MAX_AGGR 64
42
43enum enic_cq_index {
44 ENIC_CQ_RQ,
45 ENIC_CQ_WQ,
46 ENIC_CQ_MAX,
47};
48
49enum enic_intx_intr_index {
50 ENIC_INTX_WQ_RQ,
51 ENIC_INTX_ERR,
52 ENIC_INTX_NOTIFY,
53 ENIC_INTX_MAX,
54};
55
56enum enic_msix_intr_index {
57 ENIC_MSIX_RQ,
58 ENIC_MSIX_WQ,
59 ENIC_MSIX_ERR,
60 ENIC_MSIX_NOTIFY,
61 ENIC_MSIX_MAX,
62};
63
64struct enic_msix_entry {
65 int requested;
66 char devname[IFNAMSIZ];
67 irqreturn_t (*isr)(int, void *);
68 void *devid;
69};
70
71/* Per-instance private data structure */
72struct enic {
73 struct net_device *netdev;
74 struct pci_dev *pdev;
75 struct vnic_enet_config config;
76 struct vnic_dev_bar bar0;
77 struct vnic_dev *vdev;
78 struct net_device_stats net_stats;
79 struct timer_list notify_timer;
80 struct work_struct reset;
81 struct msix_entry msix_entry[ENIC_MSIX_MAX];
82 struct enic_msix_entry msix[ENIC_MSIX_MAX];
83 u32 msg_enable;
84 spinlock_t devcmd_lock;
85 u8 mac_addr[ETH_ALEN];
86 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
87 unsigned int mc_count;
88 int csum_rx_enabled;
89 u32 port_mtu;
90
91 /* work queue cache line section */
92 ____cacheline_aligned struct vnic_wq wq[1];
93 spinlock_t wq_lock[1];
94 unsigned int wq_count;
95 struct vlan_group *vlan_group;
96
97 /* receive queue cache line section */
98 ____cacheline_aligned struct vnic_rq rq[1];
99 unsigned int rq_count;
100 int (*rq_alloc_buf)(struct vnic_rq *rq);
101 struct napi_struct napi;
102 struct net_lro_mgr lro_mgr;
103 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
104
105 /* interrupt resource cache line section */
106 ____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
107 unsigned int intr_count;
108 u32 __iomem *legacy_pba; /* memory-mapped */
109
110 /* completion queue cache line section */
111 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
112 unsigned int cq_count;
113};
114
115#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
new file mode 100644
index 000000000000..4cf5ec76c993
--- /dev/null
+++ b/drivers/net/enic/enic_main.c
@@ -0,0 +1,1949 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/workqueue.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/if_vlan.h>
32#include <linux/ethtool.h>
33#include <linux/in.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37
38#include "cq_enet_desc.h"
39#include "vnic_dev.h"
40#include "vnic_intr.h"
41#include "vnic_stats.h"
42#include "enic_res.h"
43#include "enic.h"
44
45#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
46#define ENIC_JUMBO_FIRST_BUF_SIZE 256
47
48/* Supported devices */
49static struct pci_device_id enic_id_table[] = {
50 { PCI_VDEVICE(CISCO, 0x0043) },
51 { 0, } /* end of table */
52};
53
54MODULE_DESCRIPTION(DRV_DESCRIPTION);
55MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
56MODULE_LICENSE("GPL");
57MODULE_VERSION(DRV_VERSION);
58MODULE_DEVICE_TABLE(pci, enic_id_table);
59
60struct enic_stat {
61 char name[ETH_GSTRING_LEN];
62 unsigned int offset;
63};
64
65#define ENIC_TX_STAT(stat) \
66 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
67#define ENIC_RX_STAT(stat) \
68 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
69
70static const struct enic_stat enic_tx_stats[] = {
71 ENIC_TX_STAT(tx_frames_ok),
72 ENIC_TX_STAT(tx_unicast_frames_ok),
73 ENIC_TX_STAT(tx_multicast_frames_ok),
74 ENIC_TX_STAT(tx_broadcast_frames_ok),
75 ENIC_TX_STAT(tx_bytes_ok),
76 ENIC_TX_STAT(tx_unicast_bytes_ok),
77 ENIC_TX_STAT(tx_multicast_bytes_ok),
78 ENIC_TX_STAT(tx_broadcast_bytes_ok),
79 ENIC_TX_STAT(tx_drops),
80 ENIC_TX_STAT(tx_errors),
81 ENIC_TX_STAT(tx_tso),
82};
83
84static const struct enic_stat enic_rx_stats[] = {
85 ENIC_RX_STAT(rx_frames_ok),
86 ENIC_RX_STAT(rx_frames_total),
87 ENIC_RX_STAT(rx_unicast_frames_ok),
88 ENIC_RX_STAT(rx_multicast_frames_ok),
89 ENIC_RX_STAT(rx_broadcast_frames_ok),
90 ENIC_RX_STAT(rx_bytes_ok),
91 ENIC_RX_STAT(rx_unicast_bytes_ok),
92 ENIC_RX_STAT(rx_multicast_bytes_ok),
93 ENIC_RX_STAT(rx_broadcast_bytes_ok),
94 ENIC_RX_STAT(rx_drop),
95 ENIC_RX_STAT(rx_no_bufs),
96 ENIC_RX_STAT(rx_errors),
97 ENIC_RX_STAT(rx_rss),
98 ENIC_RX_STAT(rx_crc_errors),
99 ENIC_RX_STAT(rx_frames_64),
100 ENIC_RX_STAT(rx_frames_127),
101 ENIC_RX_STAT(rx_frames_255),
102 ENIC_RX_STAT(rx_frames_511),
103 ENIC_RX_STAT(rx_frames_1023),
104 ENIC_RX_STAT(rx_frames_1518),
105 ENIC_RX_STAT(rx_frames_to_max),
106};
107
108static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
109static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
110
111static int enic_get_settings(struct net_device *netdev,
112 struct ethtool_cmd *ecmd)
113{
114 struct enic *enic = netdev_priv(netdev);
115
116 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
117 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
118 ecmd->port = PORT_FIBRE;
119 ecmd->transceiver = XCVR_EXTERNAL;
120
121 if (netif_carrier_ok(netdev)) {
122 ecmd->speed = vnic_dev_port_speed(enic->vdev);
123 ecmd->duplex = DUPLEX_FULL;
124 } else {
125 ecmd->speed = -1;
126 ecmd->duplex = -1;
127 }
128
129 ecmd->autoneg = AUTONEG_DISABLE;
130
131 return 0;
132}
133
134static void enic_get_drvinfo(struct net_device *netdev,
135 struct ethtool_drvinfo *drvinfo)
136{
137 struct enic *enic = netdev_priv(netdev);
138 struct vnic_devcmd_fw_info *fw_info;
139
140 spin_lock(&enic->devcmd_lock);
141 vnic_dev_fw_info(enic->vdev, &fw_info);
142 spin_unlock(&enic->devcmd_lock);
143
144 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
145 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
146 strncpy(drvinfo->fw_version, fw_info->fw_version,
147 sizeof(drvinfo->fw_version));
148 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
149 sizeof(drvinfo->bus_info));
150}
151
152static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
153{
154 unsigned int i;
155
156 switch (stringset) {
157 case ETH_SS_STATS:
158 for (i = 0; i < enic_n_tx_stats; i++) {
159 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
160 data += ETH_GSTRING_LEN;
161 }
162 for (i = 0; i < enic_n_rx_stats; i++) {
163 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
164 data += ETH_GSTRING_LEN;
165 }
166 break;
167 }
168}
169
170static int enic_get_stats_count(struct net_device *netdev)
171{
172 return enic_n_tx_stats + enic_n_rx_stats;
173}
174
175static void enic_get_ethtool_stats(struct net_device *netdev,
176 struct ethtool_stats *stats, u64 *data)
177{
178 struct enic *enic = netdev_priv(netdev);
179 struct vnic_stats *vstats;
180 unsigned int i;
181
182 spin_lock(&enic->devcmd_lock);
183 vnic_dev_stats_dump(enic->vdev, &vstats);
184 spin_unlock(&enic->devcmd_lock);
185
186 for (i = 0; i < enic_n_tx_stats; i++)
187 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
188 for (i = 0; i < enic_n_rx_stats; i++)
189 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
190}
191
192static u32 enic_get_rx_csum(struct net_device *netdev)
193{
194 struct enic *enic = netdev_priv(netdev);
195 return enic->csum_rx_enabled;
196}
197
198static int enic_set_rx_csum(struct net_device *netdev, u32 data)
199{
200 struct enic *enic = netdev_priv(netdev);
201
202 enic->csum_rx_enabled =
203 (data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0;
204
205 return 0;
206}
207
208static int enic_set_tx_csum(struct net_device *netdev, u32 data)
209{
210 struct enic *enic = netdev_priv(netdev);
211
212 if (data && ENIC_SETTING(enic, TXCSUM))
213 netdev->features |= NETIF_F_HW_CSUM;
214 else
215 netdev->features &= ~NETIF_F_HW_CSUM;
216
217 return 0;
218}
219
220static int enic_set_tso(struct net_device *netdev, u32 data)
221{
222 struct enic *enic = netdev_priv(netdev);
223
224 if (data && ENIC_SETTING(enic, TSO))
225 netdev->features |=
226 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
227 else
228 netdev->features &=
229 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
230
231 return 0;
232}
233
234static u32 enic_get_msglevel(struct net_device *netdev)
235{
236 struct enic *enic = netdev_priv(netdev);
237 return enic->msg_enable;
238}
239
240static void enic_set_msglevel(struct net_device *netdev, u32 value)
241{
242 struct enic *enic = netdev_priv(netdev);
243 enic->msg_enable = value;
244}
245
246static struct ethtool_ops enic_ethtool_ops = {
247 .get_settings = enic_get_settings,
248 .get_drvinfo = enic_get_drvinfo,
249 .get_msglevel = enic_get_msglevel,
250 .set_msglevel = enic_set_msglevel,
251 .get_link = ethtool_op_get_link,
252 .get_strings = enic_get_strings,
253 .get_stats_count = enic_get_stats_count,
254 .get_ethtool_stats = enic_get_ethtool_stats,
255 .get_rx_csum = enic_get_rx_csum,
256 .set_rx_csum = enic_set_rx_csum,
257 .get_tx_csum = ethtool_op_get_tx_csum,
258 .set_tx_csum = enic_set_tx_csum,
259 .get_sg = ethtool_op_get_sg,
260 .set_sg = ethtool_op_set_sg,
261 .get_tso = ethtool_op_get_tso,
262 .set_tso = enic_set_tso,
263};
264
265static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
266{
267 struct enic *enic = vnic_dev_priv(wq->vdev);
268
269 if (buf->sop)
270 pci_unmap_single(enic->pdev, buf->dma_addr,
271 buf->len, PCI_DMA_TODEVICE);
272 else
273 pci_unmap_page(enic->pdev, buf->dma_addr,
274 buf->len, PCI_DMA_TODEVICE);
275
276 if (buf->os_buf)
277 dev_kfree_skb_any(buf->os_buf);
278}
279
280static void enic_wq_free_buf(struct vnic_wq *wq,
281 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
282{
283 enic_free_wq_buf(wq, buf);
284}
285
286static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
287 u8 type, u16 q_number, u16 completed_index, void *opaque)
288{
289 struct enic *enic = vnic_dev_priv(vdev);
290
291 spin_lock(&enic->wq_lock[q_number]);
292
293 vnic_wq_service(&enic->wq[q_number], cq_desc,
294 completed_index, enic_wq_free_buf,
295 opaque);
296
297 if (netif_queue_stopped(enic->netdev) &&
298 vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
299 netif_wake_queue(enic->netdev);
300
301 spin_unlock(&enic->wq_lock[q_number]);
302
303 return 0;
304}
305
306static void enic_log_q_error(struct enic *enic)
307{
308 unsigned int i;
309 u32 error_status;
310
311 for (i = 0; i < enic->wq_count; i++) {
312 error_status = vnic_wq_error_status(&enic->wq[i]);
313 if (error_status)
314 printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
315 enic->netdev->name, i, error_status);
316 }
317
318 for (i = 0; i < enic->rq_count; i++) {
319 error_status = vnic_rq_error_status(&enic->rq[i]);
320 if (error_status)
321 printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
322 enic->netdev->name, i, error_status);
323 }
324}
325
326static void enic_link_check(struct enic *enic)
327{
328 int link_status = vnic_dev_link_status(enic->vdev);
329 int carrier_ok = netif_carrier_ok(enic->netdev);
330
331 if (link_status && !carrier_ok) {
332 printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
333 netif_carrier_on(enic->netdev);
334 } else if (!link_status && carrier_ok) {
335 printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
336 netif_carrier_off(enic->netdev);
337 }
338}
339
340static void enic_mtu_check(struct enic *enic)
341{
342 u32 mtu = vnic_dev_mtu(enic->vdev);
343
344 if (mtu != enic->port_mtu) {
345 if (mtu < enic->netdev->mtu)
346 printk(KERN_WARNING PFX
347 "%s: interface MTU (%d) set higher "
348 "than switch port MTU (%d)\n",
349 enic->netdev->name, enic->netdev->mtu, mtu);
350 enic->port_mtu = mtu;
351 }
352}
353
354static void enic_msglvl_check(struct enic *enic)
355{
356 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
357
358 if (msg_enable != enic->msg_enable) {
359 printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
360 enic->netdev->name, enic->msg_enable, msg_enable);
361 enic->msg_enable = msg_enable;
362 }
363}
364
365static void enic_notify_check(struct enic *enic)
366{
367 enic_msglvl_check(enic);
368 enic_mtu_check(enic);
369 enic_link_check(enic);
370}
371
372#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
373
374static irqreturn_t enic_isr_legacy(int irq, void *data)
375{
376 struct net_device *netdev = data;
377 struct enic *enic = netdev_priv(netdev);
378 u32 pba;
379
380 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
381
382 pba = vnic_intr_legacy_pba(enic->legacy_pba);
383 if (!pba) {
384 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
385 return IRQ_NONE; /* not our interrupt */
386 }
387
388 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
389 enic_notify_check(enic);
390
391 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
392 enic_log_q_error(enic);
393 /* schedule recovery from WQ/RQ error */
394 schedule_work(&enic->reset);
395 return IRQ_HANDLED;
396 }
397
398 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
399 if (netif_rx_schedule_prep(netdev, &enic->napi))
400 __netif_rx_schedule(netdev, &enic->napi);
401 } else {
402 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
403 }
404
405 return IRQ_HANDLED;
406}
407
408static irqreturn_t enic_isr_msi(int irq, void *data)
409{
410 struct enic *enic = data;
411
412 /* With MSI, there is no sharing of interrupts, so this is
413 * our interrupt and there is no need to ack it. The device
414 * is not providing per-vector masking, so the OS will not
415 * write to PCI config space to mask/unmask the interrupt.
416 * We're using mask_on_assertion for MSI, so the device
417 * automatically masks the interrupt when the interrupt is
418 * generated. Later, when exiting polling, the interrupt
419 * will be unmasked (see enic_poll).
420 *
421 * Also, the device uses the same PCIe Traffic Class (TC)
422 * for Memory Write data and MSI, so there are no ordering
423 * issues; the MSI will always arrive at the Root Complex
424 * _after_ corresponding Memory Writes (i.e. descriptor
425 * writes).
426 */
427
428 netif_rx_schedule(enic->netdev, &enic->napi);
429
430 return IRQ_HANDLED;
431}
432
433static irqreturn_t enic_isr_msix_rq(int irq, void *data)
434{
435 struct enic *enic = data;
436
437 /* schedule NAPI polling for RQ cleanup */
438 netif_rx_schedule(enic->netdev, &enic->napi);
439
440 return IRQ_HANDLED;
441}
442
443static irqreturn_t enic_isr_msix_wq(int irq, void *data)
444{
445 struct enic *enic = data;
446 unsigned int wq_work_to_do = -1; /* no limit */
447 unsigned int wq_work_done;
448
449 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
450 wq_work_to_do, enic_wq_service, NULL);
451
452 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
453 wq_work_done,
454 1 /* unmask intr */,
455 1 /* reset intr timer */);
456
457 return IRQ_HANDLED;
458}
459
460static irqreturn_t enic_isr_msix_err(int irq, void *data)
461{
462 struct enic *enic = data;
463
464 enic_log_q_error(enic);
465
466 /* schedule recovery from WQ/RQ error */
467 schedule_work(&enic->reset);
468
469 return IRQ_HANDLED;
470}
471
472static irqreturn_t enic_isr_msix_notify(int irq, void *data)
473{
474 struct enic *enic = data;
475
476 enic_notify_check(enic);
477 vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
478
479 return IRQ_HANDLED;
480}
481
482static inline void enic_queue_wq_skb_cont(struct enic *enic,
483 struct vnic_wq *wq, struct sk_buff *skb,
484 unsigned int len_left)
485{
486 skb_frag_t *frag;
487
488 /* Queue additional data fragments */
489 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
490 len_left -= frag->size;
491 enic_queue_wq_desc_cont(wq, skb,
492 pci_map_page(enic->pdev, frag->page,
493 frag->page_offset, frag->size,
494 PCI_DMA_TODEVICE),
495 frag->size,
496 (len_left == 0)); /* EOP? */
497 }
498}
499
500static inline void enic_queue_wq_skb_vlan(struct enic *enic,
501 struct vnic_wq *wq, struct sk_buff *skb,
502 int vlan_tag_insert, unsigned int vlan_tag)
503{
504 unsigned int head_len = skb_headlen(skb);
505 unsigned int len_left = skb->len - head_len;
506 int eop = (len_left == 0);
507
508 /* Queue the main skb fragment */
509 enic_queue_wq_desc(wq, skb,
510 pci_map_single(enic->pdev, skb->data,
511 head_len, PCI_DMA_TODEVICE),
512 head_len,
513 vlan_tag_insert, vlan_tag,
514 eop);
515
516 if (!eop)
517 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
518}
519
520static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
521 struct vnic_wq *wq, struct sk_buff *skb,
522 int vlan_tag_insert, unsigned int vlan_tag)
523{
524 unsigned int head_len = skb_headlen(skb);
525 unsigned int len_left = skb->len - head_len;
526 unsigned int hdr_len = skb_transport_offset(skb);
527 unsigned int csum_offset = hdr_len + skb->csum_offset;
528 int eop = (len_left == 0);
529
530 /* Queue the main skb fragment */
531 enic_queue_wq_desc_csum_l4(wq, skb,
532 pci_map_single(enic->pdev, skb->data,
533 head_len, PCI_DMA_TODEVICE),
534 head_len,
535 csum_offset,
536 hdr_len,
537 vlan_tag_insert, vlan_tag,
538 eop);
539
540 if (!eop)
541 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
542}
543
544static inline void enic_queue_wq_skb_tso(struct enic *enic,
545 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
546 int vlan_tag_insert, unsigned int vlan_tag)
547{
548 unsigned int head_len = skb_headlen(skb);
549 unsigned int len_left = skb->len - head_len;
550 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
551 int eop = (len_left == 0);
552
553 /* Preload TCP csum field with IP pseudo hdr calculated
554 * with IP length set to zero. HW will later add in length
555 * to each TCP segment resulting from the TSO.
556 */
557
558 if (skb->protocol == __constant_htons(ETH_P_IP)) {
559 ip_hdr(skb)->check = 0;
560 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
561 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
562 } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
563 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
564 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
565 }
566
567 /* Queue the main skb fragment */
568 enic_queue_wq_desc_tso(wq, skb,
569 pci_map_single(enic->pdev, skb->data,
570 head_len, PCI_DMA_TODEVICE),
571 head_len,
572 mss, hdr_len,
573 vlan_tag_insert, vlan_tag,
574 eop);
575
576 if (!eop)
577 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
578}
579
580static inline void enic_queue_wq_skb(struct enic *enic,
581 struct vnic_wq *wq, struct sk_buff *skb)
582{
583 unsigned int mss = skb_shinfo(skb)->gso_size;
584 unsigned int vlan_tag = 0;
585 int vlan_tag_insert = 0;
586
587 if (enic->vlan_group && vlan_tx_tag_present(skb)) {
588 /* VLAN tag from trunking driver */
589 vlan_tag_insert = 1;
590 vlan_tag = vlan_tx_tag_get(skb);
591 }
592
593 if (mss)
594 enic_queue_wq_skb_tso(enic, wq, skb, mss,
595 vlan_tag_insert, vlan_tag);
596 else if (skb->ip_summed == CHECKSUM_PARTIAL)
597 enic_queue_wq_skb_csum_l4(enic, wq, skb,
598 vlan_tag_insert, vlan_tag);
599 else
600 enic_queue_wq_skb_vlan(enic, wq, skb,
601 vlan_tag_insert, vlan_tag);
602}
603
604/* netif_tx_lock held, process context with BHs disabled */
605static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
606{
607 struct enic *enic = netdev_priv(netdev);
608 struct vnic_wq *wq = &enic->wq[0];
609 unsigned long flags;
610
611 if (skb->len <= 0) {
612 dev_kfree_skb(skb);
613 return NETDEV_TX_OK;
614 }
615
616 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
617 * which is very likely. In the off chance it's going to take
618 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
619 */
620
621 if (skb_shinfo(skb)->gso_size == 0 &&
622 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
623 skb_linearize(skb)) {
624 dev_kfree_skb(skb);
625 return NETDEV_TX_OK;
626 }
627
628 spin_lock_irqsave(&enic->wq_lock[0], flags);
629
630 if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
631 netif_stop_queue(netdev);
632 /* This is a hard error, log it */
633 printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
634 "queue awake!\n", netdev->name);
635 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
636 return NETDEV_TX_BUSY;
637 }
638
639 enic_queue_wq_skb(enic, wq, skb);
640
641 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
642 netif_stop_queue(netdev);
643
644 netdev->trans_start = jiffies;
645
646 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
647
648 return NETDEV_TX_OK;
649}
650
651/* dev_base_lock rwlock held, nominally process context */
652static struct net_device_stats *enic_get_stats(struct net_device *netdev)
653{
654 struct enic *enic = netdev_priv(netdev);
655 struct vnic_stats *stats;
656
657 spin_lock(&enic->devcmd_lock);
658 vnic_dev_stats_dump(enic->vdev, &stats);
659 spin_unlock(&enic->devcmd_lock);
660
661 enic->net_stats.tx_packets = stats->tx.tx_frames_ok;
662 enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok;
663 enic->net_stats.tx_errors = stats->tx.tx_errors;
664 enic->net_stats.tx_dropped = stats->tx.tx_drops;
665
666 enic->net_stats.rx_packets = stats->rx.rx_frames_ok;
667 enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok;
668 enic->net_stats.rx_errors = stats->rx.rx_errors;
669 enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok;
670 enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors;
671 enic->net_stats.rx_dropped = stats->rx.rx_no_bufs;
672
673 return &enic->net_stats;
674}
675
676static void enic_reset_mcaddrs(struct enic *enic)
677{
678 enic->mc_count = 0;
679}
680
681static int enic_set_mac_addr(struct net_device *netdev, char *addr)
682{
683 if (!is_valid_ether_addr(addr))
684 return -EADDRNOTAVAIL;
685
686 memcpy(netdev->dev_addr, addr, netdev->addr_len);
687
688 return 0;
689}
690
691/* netif_tx_lock held, BHs disabled */
692static void enic_set_multicast_list(struct net_device *netdev)
693{
694 struct enic *enic = netdev_priv(netdev);
695 struct dev_mc_list *list = netdev->mc_list;
696 int directed = 1;
697 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
698 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
699 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
700 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
701 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
702 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
703 unsigned int mc_count = netdev->mc_count;
704 unsigned int i, j;
705
706 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
707 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
708
709 spin_lock(&enic->devcmd_lock);
710
711 vnic_dev_packet_filter(enic->vdev, directed,
712 multicast, broadcast, promisc, allmulti);
713
714 /* Is there an easier way? Trying to minimize to
715 * calls to add/del multicast addrs. We keep the
716 * addrs from the last call in enic->mc_addr and
717 * look for changes to add/del.
718 */
719
720 for (i = 0; list && i < mc_count; i++) {
721 memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
722 list = list->next;
723 }
724
725 for (i = 0; i < enic->mc_count; i++) {
726 for (j = 0; j < mc_count; j++)
727 if (compare_ether_addr(enic->mc_addr[i],
728 mc_addr[j]) == 0)
729 break;
730 if (j == mc_count)
731 enic_del_multicast_addr(enic, enic->mc_addr[i]);
732 }
733
734 for (i = 0; i < mc_count; i++) {
735 for (j = 0; j < enic->mc_count; j++)
736 if (compare_ether_addr(mc_addr[i],
737 enic->mc_addr[j]) == 0)
738 break;
739 if (j == enic->mc_count)
740 enic_add_multicast_addr(enic, mc_addr[i]);
741 }
742
743 /* Save the list to compare against next time
744 */
745
746 for (i = 0; i < mc_count; i++)
747 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
748
749 enic->mc_count = mc_count;
750
751 spin_unlock(&enic->devcmd_lock);
752}
753
754/* rtnl lock is held */
755static void enic_vlan_rx_register(struct net_device *netdev,
756 struct vlan_group *vlan_group)
757{
758 struct enic *enic = netdev_priv(netdev);
759 enic->vlan_group = vlan_group;
760}
761
762/* rtnl lock is held */
763static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
764{
765 struct enic *enic = netdev_priv(netdev);
766
767 spin_lock(&enic->devcmd_lock);
768 enic_add_vlan(enic, vid);
769 spin_unlock(&enic->devcmd_lock);
770}
771
772/* rtnl lock is held */
773static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
774{
775 struct enic *enic = netdev_priv(netdev);
776
777 spin_lock(&enic->devcmd_lock);
778 enic_del_vlan(enic, vid);
779 spin_unlock(&enic->devcmd_lock);
780}
781
782/* netif_tx_lock held, BHs disabled */
783static void enic_tx_timeout(struct net_device *netdev)
784{
785 struct enic *enic = netdev_priv(netdev);
786 schedule_work(&enic->reset);
787}
788
789static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
790{
791 struct enic *enic = vnic_dev_priv(rq->vdev);
792
793 if (!buf->os_buf)
794 return;
795
796 pci_unmap_single(enic->pdev, buf->dma_addr,
797 buf->len, PCI_DMA_FROMDEVICE);
798 dev_kfree_skb_any(buf->os_buf);
799}
800
801static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size)
802{
803 struct sk_buff *skb;
804
805 skb = dev_alloc_skb(size + NET_IP_ALIGN);
806
807 if (skb)
808 skb_reserve(skb, NET_IP_ALIGN);
809
810 return skb;
811}
812
813static int enic_rq_alloc_buf(struct vnic_rq *rq)
814{
815 struct enic *enic = vnic_dev_priv(rq->vdev);
816 struct sk_buff *skb;
817 unsigned int len = enic->netdev->mtu + ETH_HLEN;
818 unsigned int os_buf_index = 0;
819 dma_addr_t dma_addr;
820
821 skb = enic_rq_alloc_skb(len);
822 if (!skb)
823 return -ENOMEM;
824
825 dma_addr = pci_map_single(enic->pdev, skb->data,
826 len, PCI_DMA_FROMDEVICE);
827
828 enic_queue_rq_desc(rq, skb, os_buf_index,
829 dma_addr, len);
830
831 return 0;
832}
833
834static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
835 void **tcph, u64 *hdr_flags, void *priv)
836{
837 struct cq_enet_rq_desc *cq_desc = priv;
838 unsigned int ip_len;
839 struct iphdr *iph;
840
841 u8 type, color, eop, sop, ingress_port, vlan_stripped;
842 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
843 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
844 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
845 u8 packet_error;
846 u16 q_number, completed_index, bytes_written, vlan, checksum;
847 u32 rss_hash;
848
849 cq_enet_rq_desc_dec(cq_desc,
850 &type, &color, &q_number, &completed_index,
851 &ingress_port, &fcoe, &eop, &sop, &rss_type,
852 &csum_not_calc, &rss_hash, &bytes_written,
853 &packet_error, &vlan_stripped, &vlan, &checksum,
854 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
855 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
856 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
857 &fcs_ok);
858
859 if (!(ipv4 && tcp && !ipv4_fragment))
860 return -1;
861
862 skb_reset_network_header(skb);
863 iph = ip_hdr(skb);
864
865 ip_len = ip_hdrlen(skb);
866 skb_set_transport_header(skb, ip_len);
867
868 /* check if ip header and tcp header are complete */
869 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
870 return -1;
871
872 *hdr_flags = LRO_IPV4 | LRO_TCP;
873 *tcph = tcp_hdr(skb);
874 *iphdr = iph;
875
876 return 0;
877}
878
879static void enic_rq_indicate_buf(struct vnic_rq *rq,
880 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
881 int skipped, void *opaque)
882{
883 struct enic *enic = vnic_dev_priv(rq->vdev);
884 struct sk_buff *skb;
885
886 u8 type, color, eop, sop, ingress_port, vlan_stripped;
887 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
888 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
889 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
890 u8 packet_error;
891 u16 q_number, completed_index, bytes_written, vlan, checksum;
892 u32 rss_hash;
893
894 if (skipped)
895 return;
896
897 skb = buf->os_buf;
898 prefetch(skb->data - NET_IP_ALIGN);
899 pci_unmap_single(enic->pdev, buf->dma_addr,
900 buf->len, PCI_DMA_FROMDEVICE);
901
902 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
903 &type, &color, &q_number, &completed_index,
904 &ingress_port, &fcoe, &eop, &sop, &rss_type,
905 &csum_not_calc, &rss_hash, &bytes_written,
906 &packet_error, &vlan_stripped, &vlan, &checksum,
907 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
908 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
909 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
910 &fcs_ok);
911
912 if (packet_error) {
913
914 if (bytes_written > 0 && !fcs_ok) {
915 if (net_ratelimit())
916 printk(KERN_ERR PFX
917 "%s: packet error: bad FCS\n",
918 enic->netdev->name);
919 }
920
921 dev_kfree_skb_any(skb);
922
923 return;
924 }
925
926 if (eop && bytes_written > 0) {
927
928 /* Good receive
929 */
930
931 skb_put(skb, bytes_written);
932 skb->protocol = eth_type_trans(skb, enic->netdev);
933
934 if (enic->csum_rx_enabled && !csum_not_calc) {
935 skb->csum = htons(checksum);
936 skb->ip_summed = CHECKSUM_COMPLETE;
937 }
938
939 skb->dev = enic->netdev;
940 enic->netdev->last_rx = jiffies;
941
942 if (enic->vlan_group && vlan_stripped) {
943
944 if (ENIC_SETTING(enic, LRO))
945 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
946 skb, enic->vlan_group,
947 vlan, cq_desc);
948 else
949 vlan_hwaccel_receive_skb(skb,
950 enic->vlan_group, vlan);
951
952 } else {
953
954 if (ENIC_SETTING(enic, LRO))
955 lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
956 else
957 netif_receive_skb(skb);
958
959 }
960
961 } else {
962
963 /* Buffer overflow
964 */
965
966 dev_kfree_skb_any(skb);
967 }
968}
969
970static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
971 u8 type, u16 q_number, u16 completed_index, void *opaque)
972{
973 struct enic *enic = vnic_dev_priv(vdev);
974
975 vnic_rq_service(&enic->rq[q_number], cq_desc,
976 completed_index, VNIC_RQ_RETURN_DESC,
977 enic_rq_indicate_buf, opaque);
978
979 return 0;
980}
981
982static void enic_rq_drop_buf(struct vnic_rq *rq,
983 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
984 int skipped, void *opaque)
985{
986 struct enic *enic = vnic_dev_priv(rq->vdev);
987 struct sk_buff *skb = buf->os_buf;
988
989 if (skipped)
990 return;
991
992 pci_unmap_single(enic->pdev, buf->dma_addr,
993 buf->len, PCI_DMA_FROMDEVICE);
994
995 dev_kfree_skb_any(skb);
996}
997
998static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
999 u8 type, u16 q_number, u16 completed_index, void *opaque)
1000{
1001 struct enic *enic = vnic_dev_priv(vdev);
1002
1003 vnic_rq_service(&enic->rq[q_number], cq_desc,
1004 completed_index, VNIC_RQ_RETURN_DESC,
1005 enic_rq_drop_buf, opaque);
1006
1007 return 0;
1008}
1009
1010static int enic_poll(struct napi_struct *napi, int budget)
1011{
1012 struct enic *enic = container_of(napi, struct enic, napi);
1013 struct net_device *netdev = enic->netdev;
1014 unsigned int rq_work_to_do = budget;
1015 unsigned int wq_work_to_do = -1; /* no limit */
1016 unsigned int work_done, rq_work_done, wq_work_done;
1017
1018 /* Service RQ (first) and WQ
1019 */
1020
1021 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1022 rq_work_to_do, enic_rq_service, NULL);
1023
1024 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1025 wq_work_to_do, enic_wq_service, NULL);
1026
1027 /* Accumulate intr event credits for this polling
1028 * cycle. An intr event is the completion of a
1029 * a WQ or RQ packet.
1030 */
1031
1032 work_done = rq_work_done + wq_work_done;
1033
1034 if (work_done > 0)
1035 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
1036 work_done,
1037 0 /* don't unmask intr */,
1038 0 /* don't reset intr timer */);
1039
1040 if (rq_work_done > 0) {
1041
1042 /* Replenish RQ
1043 */
1044
1045 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1046
1047 } else {
1048
1049 /* If no work done, flush all LROs and exit polling
1050 */
1051
1052 if (ENIC_SETTING(enic, LRO))
1053 lro_flush_all(&enic->lro_mgr);
1054
1055 netif_rx_complete(netdev, napi);
1056 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1057 }
1058
1059 return rq_work_done;
1060}
1061
1062static int enic_poll_msix(struct napi_struct *napi, int budget)
1063{
1064 struct enic *enic = container_of(napi, struct enic, napi);
1065 struct net_device *netdev = enic->netdev;
1066 unsigned int work_to_do = budget;
1067 unsigned int work_done;
1068
1069 /* Service RQ
1070 */
1071
1072 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1073 work_to_do, enic_rq_service, NULL);
1074
1075 if (work_done > 0) {
1076
1077 /* Replenish RQ
1078 */
1079
1080 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1081
1082 /* Accumulate intr event credits for this polling
1083 * cycle. An intr event is the completion of a
1084 * a WQ or RQ packet.
1085 */
1086
1087 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1088 work_done,
1089 0 /* don't unmask intr */,
1090 0 /* don't reset intr timer */);
1091 } else {
1092
1093 /* If no work done, flush all LROs and exit polling
1094 */
1095
1096 if (ENIC_SETTING(enic, LRO))
1097 lro_flush_all(&enic->lro_mgr);
1098
1099 netif_rx_complete(netdev, napi);
1100 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1101 }
1102
1103 return work_done;
1104}
1105
1106static void enic_notify_timer(unsigned long data)
1107{
1108 struct enic *enic = (struct enic *)data;
1109
1110 enic_notify_check(enic);
1111
1112 mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD));
1113}
1114
1115static void enic_free_intr(struct enic *enic)
1116{
1117 struct net_device *netdev = enic->netdev;
1118 unsigned int i;
1119
1120 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1121 case VNIC_DEV_INTR_MODE_INTX:
1122 case VNIC_DEV_INTR_MODE_MSI:
1123 free_irq(enic->pdev->irq, netdev);
1124 break;
1125 case VNIC_DEV_INTR_MODE_MSIX:
1126 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1127 if (enic->msix[i].requested)
1128 free_irq(enic->msix_entry[i].vector,
1129 enic->msix[i].devid);
1130 break;
1131 default:
1132 break;
1133 }
1134}
1135
1136static int enic_request_intr(struct enic *enic)
1137{
1138 struct net_device *netdev = enic->netdev;
1139 unsigned int i;
1140 int err = 0;
1141
1142 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1143
1144 case VNIC_DEV_INTR_MODE_INTX:
1145
1146 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1147 IRQF_SHARED, netdev->name, netdev);
1148 break;
1149
1150 case VNIC_DEV_INTR_MODE_MSI:
1151
1152 err = request_irq(enic->pdev->irq, enic_isr_msi,
1153 0, netdev->name, enic);
1154 break;
1155
1156 case VNIC_DEV_INTR_MODE_MSIX:
1157
1158 sprintf(enic->msix[ENIC_MSIX_RQ].devname,
1159 "%.11s-rx", netdev->name);
1160 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
1161 enic->msix[ENIC_MSIX_RQ].devid = enic;
1162
1163 sprintf(enic->msix[ENIC_MSIX_WQ].devname,
1164 "%.11s-tx", netdev->name);
1165 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
1166 enic->msix[ENIC_MSIX_WQ].devid = enic;
1167
1168 sprintf(enic->msix[ENIC_MSIX_ERR].devname,
1169 "%.11s-err", netdev->name);
1170 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
1171 enic->msix[ENIC_MSIX_ERR].devid = enic;
1172
1173 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
1174 "%.11s-notify", netdev->name);
1175 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
1176 enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
1177
1178 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
1179 err = request_irq(enic->msix_entry[i].vector,
1180 enic->msix[i].isr, 0,
1181 enic->msix[i].devname,
1182 enic->msix[i].devid);
1183 if (err) {
1184 enic_free_intr(enic);
1185 break;
1186 }
1187 enic->msix[i].requested = 1;
1188 }
1189
1190 break;
1191
1192 default:
1193 break;
1194 }
1195
1196 return err;
1197}
1198
1199static int enic_notify_set(struct enic *enic)
1200{
1201 int err;
1202
1203 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1204 case VNIC_DEV_INTR_MODE_INTX:
1205 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
1206 break;
1207 case VNIC_DEV_INTR_MODE_MSIX:
1208 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
1209 break;
1210 default:
1211 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1212 break;
1213 }
1214
1215 return err;
1216}
1217
1218static void enic_notify_timer_start(struct enic *enic)
1219{
1220 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1221 case VNIC_DEV_INTR_MODE_MSI:
1222 mod_timer(&enic->notify_timer, jiffies);
1223 break;
1224 default:
1225 /* Using intr for notification for INTx/MSI-X */
1226 break;
1227 };
1228}
1229
1230/* rtnl lock is held, process context */
1231static int enic_open(struct net_device *netdev)
1232{
1233 struct enic *enic = netdev_priv(netdev);
1234 unsigned int i;
1235 int err;
1236
1237 for (i = 0; i < enic->rq_count; i++) {
1238 err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1239 if (err) {
1240 printk(KERN_ERR PFX
1241 "%s: Unable to alloc receive buffers.\n",
1242 netdev->name);
1243 return err;
1244 }
1245 }
1246
1247 for (i = 0; i < enic->wq_count; i++)
1248 vnic_wq_enable(&enic->wq[i]);
1249 for (i = 0; i < enic->rq_count; i++)
1250 vnic_rq_enable(&enic->rq[i]);
1251
1252 enic_add_station_addr(enic);
1253 enic_set_multicast_list(netdev);
1254
1255 netif_wake_queue(netdev);
1256 napi_enable(&enic->napi);
1257 vnic_dev_enable(enic->vdev);
1258
1259 for (i = 0; i < enic->intr_count; i++)
1260 vnic_intr_unmask(&enic->intr[i]);
1261
1262 enic_notify_timer_start(enic);
1263
1264 return 0;
1265}
1266
1267/* rtnl lock is held, process context */
1268static int enic_stop(struct net_device *netdev)
1269{
1270 struct enic *enic = netdev_priv(netdev);
1271 unsigned int i;
1272 int err;
1273
1274 del_timer_sync(&enic->notify_timer);
1275
1276 vnic_dev_disable(enic->vdev);
1277 napi_disable(&enic->napi);
1278 netif_stop_queue(netdev);
1279
1280 for (i = 0; i < enic->intr_count; i++)
1281 vnic_intr_mask(&enic->intr[i]);
1282
1283 for (i = 0; i < enic->wq_count; i++) {
1284 err = vnic_wq_disable(&enic->wq[i]);
1285 if (err)
1286 return err;
1287 }
1288 for (i = 0; i < enic->rq_count; i++) {
1289 err = vnic_rq_disable(&enic->rq[i]);
1290 if (err)
1291 return err;
1292 }
1293
1294 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1295 -1, enic_rq_service_drop, NULL);
1296 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1297 -1, enic_wq_service, NULL);
1298
1299 for (i = 0; i < enic->wq_count; i++)
1300 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1301 for (i = 0; i < enic->rq_count; i++)
1302 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1303 for (i = 0; i < enic->cq_count; i++)
1304 vnic_cq_clean(&enic->cq[i]);
1305 for (i = 0; i < enic->intr_count; i++)
1306 vnic_intr_clean(&enic->intr[i]);
1307
1308 return 0;
1309}
1310
1311static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1312{
1313 struct enic *enic = netdev_priv(netdev);
1314 int running = netif_running(netdev);
1315
1316 if (running)
1317 enic_stop(netdev);
1318
1319 if (new_mtu < ENIC_MIN_MTU)
1320 new_mtu = ENIC_MIN_MTU;
1321 if (new_mtu > ENIC_MAX_MTU)
1322 new_mtu = ENIC_MAX_MTU;
1323
1324 netdev->mtu = new_mtu;
1325
1326 if (netdev->mtu > enic->port_mtu)
1327 printk(KERN_WARNING PFX
1328 "%s: interface MTU (%d) set higher "
1329 "than port MTU (%d)\n",
1330 netdev->name, netdev->mtu, enic->port_mtu);
1331
1332 if (running)
1333 enic_open(netdev);
1334
1335 return 0;
1336}
1337
1338#ifdef CONFIG_NET_POLL_CONTROLLER
1339static void enic_poll_controller(struct net_device *netdev)
1340{
1341 struct enic *enic = netdev_priv(netdev);
1342 struct vnic_dev *vdev = enic->vdev;
1343
1344 switch (vnic_dev_get_intr_mode(vdev)) {
1345 case VNIC_DEV_INTR_MODE_MSIX:
1346 enic_isr_msix_rq(enic->pdev->irq, enic);
1347 enic_isr_msix_wq(enic->pdev->irq, enic);
1348 break;
1349 case VNIC_DEV_INTR_MODE_MSI:
1350 enic_isr_msi(enic->pdev->irq, enic);
1351 break;
1352 case VNIC_DEV_INTR_MODE_INTX:
1353 enic_isr_legacy(enic->pdev->irq, netdev);
1354 break;
1355 default:
1356 break;
1357 }
1358}
1359#endif
1360
1361static int enic_dev_wait(struct vnic_dev *vdev,
1362 int (*start)(struct vnic_dev *, int),
1363 int (*finished)(struct vnic_dev *, int *),
1364 int arg)
1365{
1366 unsigned long time;
1367 int done;
1368 int err;
1369
1370 BUG_ON(in_interrupt());
1371
1372 err = start(vdev, arg);
1373 if (err)
1374 return err;
1375
1376 /* Wait for func to complete...2 seconds max
1377 */
1378
1379 time = jiffies + (HZ * 2);
1380 do {
1381
1382 err = finished(vdev, &done);
1383 if (err)
1384 return err;
1385
1386 if (done)
1387 return 0;
1388
1389 schedule_timeout_uninterruptible(HZ / 10);
1390
1391 } while (time_after(time, jiffies));
1392
1393 return -ETIMEDOUT;
1394}
1395
1396static int enic_dev_open(struct enic *enic)
1397{
1398 int err;
1399
1400 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1401 vnic_dev_open_done, 0);
1402 if (err)
1403 printk(KERN_ERR PFX
1404 "vNIC device open failed, err %d.\n", err);
1405
1406 return err;
1407}
1408
1409static int enic_dev_soft_reset(struct enic *enic)
1410{
1411 int err;
1412
1413 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
1414 vnic_dev_soft_reset_done, 0);
1415 if (err)
1416 printk(KERN_ERR PFX
1417 "vNIC soft reset failed, err %d.\n", err);
1418
1419 return err;
1420}
1421
1422static void enic_reset(struct work_struct *work)
1423{
1424 struct enic *enic = container_of(work, struct enic, reset);
1425
1426 if (!netif_running(enic->netdev))
1427 return;
1428
1429 rtnl_lock();
1430
1431 spin_lock(&enic->devcmd_lock);
1432 vnic_dev_hang_notify(enic->vdev);
1433 spin_unlock(&enic->devcmd_lock);
1434
1435 enic_stop(enic->netdev);
1436 enic_dev_soft_reset(enic);
1437 enic_reset_mcaddrs(enic);
1438 enic_init_vnic_resources(enic);
1439 enic_open(enic->netdev);
1440
1441 rtnl_unlock();
1442}
1443
1444static int enic_set_intr_mode(struct enic *enic)
1445{
1446 unsigned int n = ARRAY_SIZE(enic->rq);
1447 unsigned int m = ARRAY_SIZE(enic->wq);
1448 unsigned int i;
1449
1450 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1451 * system capabilities.
1452 *
1453 * Try MSI-X first
1454 *
1455 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1456 * (the second to last INTR is used for WQ/RQ errors)
1457 * (the last INTR is used for notifications)
1458 */
1459
1460 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1461 for (i = 0; i < n + m + 2; i++)
1462 enic->msix_entry[i].entry = i;
1463
1464 if (enic->config.intr_mode < 1 &&
1465 enic->rq_count >= n &&
1466 enic->wq_count >= m &&
1467 enic->cq_count >= n + m &&
1468 enic->intr_count >= n + m + 2 &&
1469 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
1470
1471 enic->rq_count = n;
1472 enic->wq_count = m;
1473 enic->cq_count = n + m;
1474 enic->intr_count = n + m + 2;
1475
1476 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
1477
1478 return 0;
1479 }
1480
1481 /* Next try MSI
1482 *
1483 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1484 */
1485
1486 if (enic->config.intr_mode < 2 &&
1487 enic->rq_count >= 1 &&
1488 enic->wq_count >= 1 &&
1489 enic->cq_count >= 2 &&
1490 enic->intr_count >= 1 &&
1491 !pci_enable_msi(enic->pdev)) {
1492
1493 enic->rq_count = 1;
1494 enic->wq_count = 1;
1495 enic->cq_count = 2;
1496 enic->intr_count = 1;
1497
1498 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1499
1500 return 0;
1501 }
1502
1503 /* Next try INTx
1504 *
1505 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1506 * (the first INTR is used for WQ/RQ)
1507 * (the second INTR is used for WQ/RQ errors)
1508 * (the last INTR is used for notifications)
1509 */
1510
1511 if (enic->config.intr_mode < 3 &&
1512 enic->rq_count >= 1 &&
1513 enic->wq_count >= 1 &&
1514 enic->cq_count >= 2 &&
1515 enic->intr_count >= 3) {
1516
1517 enic->rq_count = 1;
1518 enic->wq_count = 1;
1519 enic->cq_count = 2;
1520 enic->intr_count = 3;
1521
1522 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1523
1524 return 0;
1525 }
1526
1527 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1528
1529 return -EINVAL;
1530}
1531
1532static void enic_clear_intr_mode(struct enic *enic)
1533{
1534 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1535 case VNIC_DEV_INTR_MODE_MSIX:
1536 pci_disable_msix(enic->pdev);
1537 break;
1538 case VNIC_DEV_INTR_MODE_MSI:
1539 pci_disable_msi(enic->pdev);
1540 break;
1541 default:
1542 break;
1543 }
1544
1545 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1546}
1547
1548static void enic_iounmap(struct enic *enic)
1549{
1550 if (enic->bar0.vaddr)
1551 iounmap(enic->bar0.vaddr);
1552}
1553
1554static int __devinit enic_probe(struct pci_dev *pdev,
1555 const struct pci_device_id *ent)
1556{
1557 struct net_device *netdev;
1558 struct enic *enic;
1559 int using_dac = 0;
1560 unsigned int i;
1561 int err;
1562
1563 const u8 rss_default_cpu = 0;
1564 const u8 rss_hash_type = 0;
1565 const u8 rss_hash_bits = 0;
1566 const u8 rss_base_cpu = 0;
1567 const u8 rss_enable = 0;
1568 const u8 tso_ipid_split_en = 0;
1569 const u8 ig_vlan_strip_en = 1;
1570
1571 /* Allocate net device structure and initialize. Private
1572 * instance data is initialized to zero.
1573 */
1574
1575 netdev = alloc_etherdev(sizeof(struct enic));
1576 if (!netdev) {
1577 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1578 return -ENOMEM;
1579 }
1580
1581 /* Set the netdev name early so intr vectors are properly
1582 * named and any error msgs can include netdev->name
1583 */
1584
1585 rtnl_lock();
1586 err = dev_alloc_name(netdev, netdev->name);
1587 rtnl_unlock();
1588 if (err < 0) {
1589 printk(KERN_ERR PFX "Unable to allocate netdev name.\n");
1590 goto err_out_free_netdev;
1591 }
1592
1593 pci_set_drvdata(pdev, netdev);
1594
1595 SET_NETDEV_DEV(netdev, &pdev->dev);
1596
1597 enic = netdev_priv(netdev);
1598 enic->netdev = netdev;
1599 enic->pdev = pdev;
1600
1601 /* Setup PCI resources
1602 */
1603
1604 err = pci_enable_device(pdev);
1605 if (err) {
1606 printk(KERN_ERR PFX
1607 "%s: Cannot enable PCI device, aborting.\n",
1608 netdev->name);
1609 goto err_out_free_netdev;
1610 }
1611
1612 err = pci_request_regions(pdev, DRV_NAME);
1613 if (err) {
1614 printk(KERN_ERR PFX
1615 "%s: Cannot request PCI regions, aborting.\n",
1616 netdev->name);
1617 goto err_out_disable_device;
1618 }
1619
1620 pci_set_master(pdev);
1621
1622 /* Query PCI controller on system for DMA addressing
1623 * limitation for the device. Try 40-bit first, and
1624 * fail to 32-bit.
1625 */
1626
1627 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
1628 if (err) {
1629 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1630 if (err) {
1631 printk(KERN_ERR PFX
1632 "%s: No usable DMA configuration, aborting.\n",
1633 netdev->name);
1634 goto err_out_release_regions;
1635 }
1636 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1637 if (err) {
1638 printk(KERN_ERR PFX
1639 "%s: Unable to obtain 32-bit DMA "
1640 "for consistent allocations, aborting.\n",
1641 netdev->name);
1642 goto err_out_release_regions;
1643 }
1644 } else {
1645 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
1646 if (err) {
1647 printk(KERN_ERR PFX
1648 "%s: Unable to obtain 40-bit DMA "
1649 "for consistent allocations, aborting.\n",
1650 netdev->name);
1651 goto err_out_release_regions;
1652 }
1653 using_dac = 1;
1654 }
1655
1656 /* Map vNIC resources from BAR0
1657 */
1658
1659 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1660 printk(KERN_ERR PFX
1661 "%s: BAR0 not memory-map'able, aborting.\n",
1662 netdev->name);
1663 err = -ENODEV;
1664 goto err_out_release_regions;
1665 }
1666
1667 enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len);
1668 enic->bar0.bus_addr = pci_resource_start(pdev, 0);
1669 enic->bar0.len = pci_resource_len(pdev, 0);
1670
1671 if (!enic->bar0.vaddr) {
1672 printk(KERN_ERR PFX
1673 "%s: Cannot memory-map BAR0 res hdr, aborting.\n",
1674 netdev->name);
1675 err = -ENODEV;
1676 goto err_out_release_regions;
1677 }
1678
1679 /* Register vNIC device
1680 */
1681
1682 enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0);
1683 if (!enic->vdev) {
1684 printk(KERN_ERR PFX
1685 "%s: vNIC registration failed, aborting.\n",
1686 netdev->name);
1687 err = -ENODEV;
1688 goto err_out_iounmap;
1689 }
1690
1691 /* Issue device open to get device in known state
1692 */
1693
1694 err = enic_dev_open(enic);
1695 if (err) {
1696 printk(KERN_ERR PFX
1697 "%s: vNIC dev open failed, aborting.\n",
1698 netdev->name);
1699 goto err_out_vnic_unregister;
1700 }
1701
1702 /* Issue device init to initialize the vnic-to-switch link.
1703 * We'll start with carrier off and wait for link UP
1704 * notification later to turn on carrier. We don't need
1705 * to wait here for the vnic-to-switch link initialization
1706 * to complete; link UP notification is the indication that
1707 * the process is complete.
1708 */
1709
1710 netif_carrier_off(netdev);
1711
1712 err = vnic_dev_init(enic->vdev, 0);
1713 if (err) {
1714 printk(KERN_ERR PFX
1715 "%s: vNIC dev init failed, aborting.\n",
1716 netdev->name);
1717 goto err_out_dev_close;
1718 }
1719
1720 /* Get vNIC configuration
1721 */
1722
1723 err = enic_get_vnic_config(enic);
1724 if (err) {
1725 printk(KERN_ERR PFX
1726 "%s: Get vNIC configuration failed, aborting.\n",
1727 netdev->name);
1728 goto err_out_dev_close;
1729 }
1730
1731 /* Get available resource counts
1732 */
1733
1734 enic_get_res_counts(enic);
1735
1736 /* Set interrupt mode based on resource counts and system
1737 * capabilities
1738 */
1739
1740 err = enic_set_intr_mode(enic);
1741 if (err) {
1742 printk(KERN_ERR PFX
1743 "%s: Failed to set intr mode, aborting.\n",
1744 netdev->name);
1745 goto err_out_dev_close;
1746 }
1747
1748 /* Request interrupt vector(s)
1749 */
1750
1751 err = enic_request_intr(enic);
1752 if (err) {
1753 printk(KERN_ERR PFX "%s: Unable to request irq.\n",
1754 netdev->name);
1755 goto err_out_dev_close;
1756 }
1757
1758 /* Allocate and configure vNIC resources
1759 */
1760
1761 err = enic_alloc_vnic_resources(enic);
1762 if (err) {
1763 printk(KERN_ERR PFX
1764 "%s: Failed to alloc vNIC resources, aborting.\n",
1765 netdev->name);
1766 goto err_out_free_vnic_resources;
1767 }
1768
1769 enic_init_vnic_resources(enic);
1770
1771 /* Enable VLAN tag stripping. RSS not enabled (yet).
1772 */
1773
1774 err = enic_set_nic_cfg(enic,
1775 rss_default_cpu, rss_hash_type,
1776 rss_hash_bits, rss_base_cpu,
1777 rss_enable, tso_ipid_split_en,
1778 ig_vlan_strip_en);
1779 if (err) {
1780 printk(KERN_ERR PFX
1781 "%s: Failed to config nic, aborting.\n",
1782 netdev->name);
1783 goto err_out_free_vnic_resources;
1784 }
1785
1786 /* Setup notification buffer area
1787 */
1788
1789 err = enic_notify_set(enic);
1790 if (err) {
1791 printk(KERN_ERR PFX
1792 "%s: Failed to alloc notify buffer, aborting.\n",
1793 netdev->name);
1794 goto err_out_free_vnic_resources;
1795 }
1796
1797 /* Setup notification timer, HW reset task, and locks
1798 */
1799
1800 init_timer(&enic->notify_timer);
1801 enic->notify_timer.function = enic_notify_timer;
1802 enic->notify_timer.data = (unsigned long)enic;
1803
1804 INIT_WORK(&enic->reset, enic_reset);
1805
1806 for (i = 0; i < enic->wq_count; i++)
1807 spin_lock_init(&enic->wq_lock[i]);
1808
1809 spin_lock_init(&enic->devcmd_lock);
1810
1811 /* Register net device
1812 */
1813
1814 enic->port_mtu = enic->config.mtu;
1815 (void)enic_change_mtu(netdev, enic->port_mtu);
1816
1817 err = enic_set_mac_addr(netdev, enic->mac_addr);
1818 if (err) {
1819 printk(KERN_ERR PFX
1820 "%s: Invalid MAC address, aborting.\n",
1821 netdev->name);
1822 goto err_out_notify_unset;
1823 }
1824
1825 netdev->open = enic_open;
1826 netdev->stop = enic_stop;
1827 netdev->hard_start_xmit = enic_hard_start_xmit;
1828 netdev->get_stats = enic_get_stats;
1829 netdev->set_multicast_list = enic_set_multicast_list;
1830 netdev->change_mtu = enic_change_mtu;
1831 netdev->vlan_rx_register = enic_vlan_rx_register;
1832 netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
1833 netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
1834 netdev->tx_timeout = enic_tx_timeout;
1835 netdev->watchdog_timeo = 2 * HZ;
1836 netdev->ethtool_ops = &enic_ethtool_ops;
1837#ifdef CONFIG_NET_POLL_CONTROLLER
1838 netdev->poll_controller = enic_poll_controller;
1839#endif
1840
1841 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1842 default:
1843 netif_napi_add(netdev, &enic->napi, enic_poll, 64);
1844 break;
1845 case VNIC_DEV_INTR_MODE_MSIX:
1846 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
1847 break;
1848 }
1849
1850 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1851 if (ENIC_SETTING(enic, TXCSUM))
1852 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1853 if (ENIC_SETTING(enic, TSO))
1854 netdev->features |= NETIF_F_TSO |
1855 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
1856 if (using_dac)
1857 netdev->features |= NETIF_F_HIGHDMA;
1858
1859
1860 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
1861
1862 if (ENIC_SETTING(enic, LRO)) {
1863 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
1864 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
1865 enic->lro_mgr.lro_arr = enic->lro_desc;
1866 enic->lro_mgr.get_skb_header = enic_get_skb_header;
1867 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1868 enic->lro_mgr.dev = netdev;
1869 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
1870 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1871 }
1872
1873 err = register_netdev(netdev);
1874 if (err) {
1875 printk(KERN_ERR PFX
1876 "%s: Cannot register net device, aborting.\n",
1877 netdev->name);
1878 goto err_out_notify_unset;
1879 }
1880
1881 return 0;
1882
1883err_out_notify_unset:
1884 vnic_dev_notify_unset(enic->vdev);
1885err_out_free_vnic_resources:
1886 enic_free_vnic_resources(enic);
1887 enic_free_intr(enic);
1888err_out_dev_close:
1889 vnic_dev_close(enic->vdev);
1890err_out_vnic_unregister:
1891 enic_clear_intr_mode(enic);
1892 vnic_dev_unregister(enic->vdev);
1893err_out_iounmap:
1894 enic_iounmap(enic);
1895err_out_release_regions:
1896 pci_release_regions(pdev);
1897err_out_disable_device:
1898 pci_disable_device(pdev);
1899err_out_free_netdev:
1900 pci_set_drvdata(pdev, NULL);
1901 free_netdev(netdev);
1902
1903 return err;
1904}
1905
1906static void __devexit enic_remove(struct pci_dev *pdev)
1907{
1908 struct net_device *netdev = pci_get_drvdata(pdev);
1909
1910 if (netdev) {
1911 struct enic *enic = netdev_priv(netdev);
1912
1913 flush_scheduled_work();
1914 unregister_netdev(netdev);
1915 vnic_dev_notify_unset(enic->vdev);
1916 enic_free_vnic_resources(enic);
1917 enic_free_intr(enic);
1918 vnic_dev_close(enic->vdev);
1919 enic_clear_intr_mode(enic);
1920 vnic_dev_unregister(enic->vdev);
1921 enic_iounmap(enic);
1922 pci_release_regions(pdev);
1923 pci_disable_device(pdev);
1924 pci_set_drvdata(pdev, NULL);
1925 free_netdev(netdev);
1926 }
1927}
1928
1929static struct pci_driver enic_driver = {
1930 .name = DRV_NAME,
1931 .id_table = enic_id_table,
1932 .probe = enic_probe,
1933 .remove = __devexit_p(enic_remove),
1934};
1935
1936static int __init enic_init_module(void)
1937{
1938 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
1939
1940 return pci_register_driver(&enic_driver);
1941}
1942
1943static void __exit enic_cleanup_module(void)
1944{
1945 pci_unregister_driver(&enic_driver);
1946}
1947
1948module_init(enic_init_module);
1949module_exit(enic_cleanup_module);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
new file mode 100644
index 000000000000..95184b9108ef
--- /dev/null
+++ b/drivers/net/enic/enic_res.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/netdevice.h>
25
26#include "wq_enet_desc.h"
27#include "rq_enet_desc.h"
28#include "cq_enet_desc.h"
29#include "vnic_resource.h"
30#include "vnic_enet.h"
31#include "vnic_dev.h"
32#include "vnic_wq.h"
33#include "vnic_rq.h"
34#include "vnic_cq.h"
35#include "vnic_intr.h"
36#include "vnic_stats.h"
37#include "vnic_nic.h"
38#include "vnic_rss.h"
39#include "enic_res.h"
40#include "enic.h"
41
42int enic_get_vnic_config(struct enic *enic)
43{
44 struct vnic_enet_config *c = &enic->config;
45 int err;
46
47 err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
48 if (err) {
49 printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
50 return err;
51 }
52
53#define GET_CONFIG(m) \
54 do { \
55 err = vnic_dev_spec(enic->vdev, \
56 offsetof(struct vnic_enet_config, m), \
57 sizeof(c->m), &c->m); \
58 if (err) { \
59 printk(KERN_ERR PFX \
60 "Error getting %s, %d\n", #m, err); \
61 return err; \
62 } \
63 } while (0)
64
65 GET_CONFIG(flags);
66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode);
72
73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
78
79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
84
85 if (c->mtu == 0)
86 c->mtu = 1500;
87 c->mtu = min_t(u16, ENIC_MAX_MTU,
88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu));
90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
92
93 printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
94 "wq/rq %d/%d\n",
95 enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
96 enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
97 c->wq_desc_count, c->rq_desc_count);
98 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
99 "intr timer %d\n",
100 c->mtu, ENIC_SETTING(enic, TXCSUM),
101 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
102 ENIC_SETTING(enic, LRO), c->intr_timer);
103
104 return 0;
105}
106
107void enic_add_station_addr(struct enic *enic)
108{
109 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
110}
111
112void enic_add_multicast_addr(struct enic *enic, u8 *addr)
113{
114 vnic_dev_add_addr(enic->vdev, addr);
115}
116
117void enic_del_multicast_addr(struct enic *enic, u8 *addr)
118{
119 vnic_dev_del_addr(enic->vdev, addr);
120}
121
122void enic_add_vlan(struct enic *enic, u16 vlanid)
123{
124 u64 a0 = vlanid, a1 = 0;
125 int wait = 1000;
126 int err;
127
128 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
129 if (err)
130 printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
131}
132
133void enic_del_vlan(struct enic *enic, u16 vlanid)
134{
135 u64 a0 = vlanid, a1 = 0;
136 int wait = 1000;
137 int err;
138
139 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
140 if (err)
141 printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
142}
143
144int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
145 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
146 u8 ig_vlan_strip_en)
147{
148 u64 a0, a1;
149 u32 nic_cfg;
150 int wait = 1000;
151
152 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
153 rss_hash_type, rss_hash_bits, rss_base_cpu,
154 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
155
156 a0 = nic_cfg;
157 a1 = 0;
158
159 return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
160}
161
162void enic_free_vnic_resources(struct enic *enic)
163{
164 unsigned int i;
165
166 for (i = 0; i < enic->wq_count; i++)
167 vnic_wq_free(&enic->wq[i]);
168 for (i = 0; i < enic->rq_count; i++)
169 vnic_rq_free(&enic->rq[i]);
170 for (i = 0; i < enic->cq_count; i++)
171 vnic_cq_free(&enic->cq[i]);
172 for (i = 0; i < enic->intr_count; i++)
173 vnic_intr_free(&enic->intr[i]);
174}
175
176void enic_get_res_counts(struct enic *enic)
177{
178 enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
179 enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
180 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
181 enic->intr_count = vnic_dev_get_res_count(enic->vdev,
182 RES_TYPE_INTR_CTRL);
183
184 printk(KERN_INFO PFX "vNIC resources avail: "
185 "wq %d rq %d cq %d intr %d\n",
186 enic->wq_count, enic->rq_count,
187 enic->cq_count, enic->intr_count);
188}
189
190void enic_init_vnic_resources(struct enic *enic)
191{
192 enum vnic_dev_intr_mode intr_mode;
193 unsigned int mask_on_assertion;
194 unsigned int interrupt_offset;
195 unsigned int error_interrupt_enable;
196 unsigned int error_interrupt_offset;
197 unsigned int cq_index;
198 unsigned int i;
199
200 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
201
202 /* Init RQ/WQ resources.
203 *
204 * RQ[0 - n-1] point to CQ[0 - n-1]
205 * WQ[0 - m-1] point to CQ[n - n+m-1]
206 *
207 * Error interrupt is not enabled for MSI.
208 */
209
210 switch (intr_mode) {
211 case VNIC_DEV_INTR_MODE_INTX:
212 case VNIC_DEV_INTR_MODE_MSIX:
213 error_interrupt_enable = 1;
214 error_interrupt_offset = enic->intr_count - 2;
215 break;
216 default:
217 error_interrupt_enable = 0;
218 error_interrupt_offset = 0;
219 break;
220 }
221
222 for (i = 0; i < enic->rq_count; i++) {
223 cq_index = i;
224 vnic_rq_init(&enic->rq[i],
225 cq_index,
226 error_interrupt_enable,
227 error_interrupt_offset);
228 }
229
230 for (i = 0; i < enic->wq_count; i++) {
231 cq_index = enic->rq_count + i;
232 vnic_wq_init(&enic->wq[i],
233 cq_index,
234 error_interrupt_enable,
235 error_interrupt_offset);
236 }
237
238 /* Init CQ resources
239 *
240 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
241 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
242 */
243
244 for (i = 0; i < enic->cq_count; i++) {
245
246 switch (intr_mode) {
247 case VNIC_DEV_INTR_MODE_MSIX:
248 interrupt_offset = i;
249 break;
250 default:
251 interrupt_offset = 0;
252 break;
253 }
254
255 vnic_cq_init(&enic->cq[i],
256 0 /* flow_control_enable */,
257 1 /* color_enable */,
258 0 /* cq_head */,
259 0 /* cq_tail */,
260 1 /* cq_tail_color */,
261 1 /* interrupt_enable */,
262 1 /* cq_entry_enable */,
263 0 /* cq_message_enable */,
264 interrupt_offset,
265 0 /* cq_message_addr */);
266 }
267
268 /* Init INTR resources
269 *
270 * mask_on_assertion is not used for INTx due to the level-
271 * triggered nature of INTx
272 */
273
274 switch (intr_mode) {
275 case VNIC_DEV_INTR_MODE_MSI:
276 case VNIC_DEV_INTR_MODE_MSIX:
277 mask_on_assertion = 1;
278 break;
279 default:
280 mask_on_assertion = 0;
281 break;
282 }
283
284 for (i = 0; i < enic->intr_count; i++) {
285 vnic_intr_init(&enic->intr[i],
286 enic->config.intr_timer,
287 enic->config.intr_timer_type,
288 mask_on_assertion);
289 }
290
291 /* Clear LIF stats
292 */
293
294 vnic_dev_stats_clear(enic->vdev);
295}
296
297int enic_alloc_vnic_resources(struct enic *enic)
298{
299 enum vnic_dev_intr_mode intr_mode;
300 unsigned int i;
301 int err;
302
303 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
304
305 printk(KERN_INFO PFX "vNIC resources used: "
306 "wq %d rq %d cq %d intr %d intr mode %s\n",
307 enic->wq_count, enic->rq_count,
308 enic->cq_count, enic->intr_count,
309 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
310 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
311 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
312 "unknown"
313 );
314
315 /* Allocate queue resources
316 */
317
318 for (i = 0; i < enic->wq_count; i++) {
319 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
320 enic->config.wq_desc_count,
321 sizeof(struct wq_enet_desc));
322 if (err)
323 goto err_out_cleanup;
324 }
325
326 for (i = 0; i < enic->rq_count; i++) {
327 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
328 enic->config.rq_desc_count,
329 sizeof(struct rq_enet_desc));
330 if (err)
331 goto err_out_cleanup;
332 }
333
334 for (i = 0; i < enic->cq_count; i++) {
335 if (i < enic->rq_count)
336 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
337 enic->config.rq_desc_count,
338 sizeof(struct cq_enet_rq_desc));
339 else
340 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
341 enic->config.wq_desc_count,
342 sizeof(struct cq_enet_wq_desc));
343 if (err)
344 goto err_out_cleanup;
345 }
346
347 for (i = 0; i < enic->intr_count; i++) {
348 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
349 if (err)
350 goto err_out_cleanup;
351 }
352
353 /* Hook remaining resource
354 */
355
356 enic->legacy_pba = vnic_dev_get_res(enic->vdev,
357 RES_TYPE_INTR_PBA_LEGACY, 0);
358 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
359 printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
360 err = -ENODEV;
361 goto err_out_cleanup;
362 }
363
364 return 0;
365
366err_out_cleanup:
367 enic_free_vnic_resources(enic);
368
369 return err;
370}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
new file mode 100644
index 000000000000..68534a29b7ac
--- /dev/null
+++ b/drivers/net/enic/enic_res.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_RES_H_
21#define _ENIC_RES_H_
22
23#include "wq_enet_desc.h"
24#include "rq_enet_desc.h"
25#include "vnic_wq.h"
26#include "vnic_rq.h"
27
28#define ENIC_MIN_WQ_DESCS 64
29#define ENIC_MAX_WQ_DESCS 4096
30#define ENIC_MIN_RQ_DESCS 64
31#define ENIC_MAX_RQ_DESCS 4096
32
33#define ENIC_MIN_MTU 576 /* minimum for IPv4 */
34#define ENIC_MAX_MTU 9000
35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32
37
38#define ENIC_NON_TSO_MAX_DESC 16
39
40#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
41
42static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
43 void *os_buf, dma_addr_t dma_addr, unsigned int len,
44 unsigned int mss_or_csum_offset, unsigned int hdr_len,
45 int vlan_tag_insert, unsigned int vlan_tag,
46 int offload_mode, int cq_entry, int sop, int eop)
47{
48 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
49
50 wq_enet_desc_enc(desc,
51 (u64)dma_addr | VNIC_PADDR_TARGET,
52 (u16)len,
53 (u16)mss_or_csum_offset,
54 (u16)hdr_len, (u8)offload_mode,
55 (u8)eop, (u8)cq_entry,
56 0, /* fcoe_encap */
57 (u8)vlan_tag_insert,
58 (u16)vlan_tag,
59 0 /* loopback */);
60
61 wmb();
62
63 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
64}
65
66static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
67 void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
68{
69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
70 0, 0, 0, 0, 0,
71 eop, 0 /* !SOP */, eop);
72}
73
74static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
75 dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
76 unsigned int vlan_tag, int eop)
77{
78 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
79 0, 0, vlan_tag_insert, vlan_tag,
80 WQ_ENET_OFFLOAD_MODE_CSUM,
81 eop, 1 /* SOP */, eop);
82}
83
84static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
85 void *os_buf, dma_addr_t dma_addr, unsigned int len,
86 int ip_csum, int tcpudp_csum, int vlan_tag_insert,
87 unsigned int vlan_tag, int eop)
88{
89 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
90 (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
91 0, vlan_tag_insert, vlan_tag,
92 WQ_ENET_OFFLOAD_MODE_CSUM,
93 eop, 1 /* SOP */, eop);
94}
95
96static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
97 void *os_buf, dma_addr_t dma_addr, unsigned int len,
98 unsigned int csum_offset, unsigned int hdr_len,
99 int vlan_tag_insert, unsigned int vlan_tag, int eop)
100{
101 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
102 csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
103 WQ_ENET_OFFLOAD_MODE_CSUM_L4,
104 eop, 1 /* SOP */, eop);
105}
106
107static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
108 void *os_buf, dma_addr_t dma_addr, unsigned int len,
109 unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
110 unsigned int vlan_tag, int eop)
111{
112 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
113 mss, hdr_len, vlan_tag_insert, vlan_tag,
114 WQ_ENET_OFFLOAD_MODE_TSO,
115 eop, 1 /* SOP */, eop);
116}
117
118static inline void enic_queue_rq_desc(struct vnic_rq *rq,
119 void *os_buf, unsigned int os_buf_index,
120 dma_addr_t dma_addr, unsigned int len)
121{
122 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
123 u8 type = os_buf_index ?
124 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
125
126 rq_enet_desc_enc(desc,
127 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len);
129
130 wmb();
131
132 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
133}
134
135struct enic;
136
137int enic_get_vnic_config(struct enic *);
138void enic_add_station_addr(struct enic *enic);
139void enic_add_multicast_addr(struct enic *enic, u8 *addr);
140void enic_del_multicast_addr(struct enic *enic, u8 *addr);
141void enic_add_vlan(struct enic *enic, u16 vlanid);
142void enic_del_vlan(struct enic *enic, u16 vlanid);
143int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
144 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
145 u8 ig_vlan_strip_en);
146void enic_get_res_counts(struct enic *enic);
147void enic_init_vnic_resources(struct enic *enic);
148int enic_alloc_vnic_resources(struct enic *);
149void enic_free_vnic_resources(struct enic *);
150
151#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..a06e649010ce
--- /dev/null
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _RQ_ENET_DESC_H_
21#define _RQ_ENET_DESC_H_
22
23/* Ethernet receive queue descriptor: 16B */
24struct rq_enet_desc {
25 __le64 address;
26 __le16 length_type;
27 u8 reserved[6];
28};
29
30enum rq_enet_type_types {
31 RQ_ENET_TYPE_ONLY_SOP = 0,
32 RQ_ENET_TYPE_NOT_SOP = 1,
33 RQ_ENET_TYPE_RESV2 = 2,
34 RQ_ENET_TYPE_RESV3 = 3,
35};
36
37#define RQ_ENET_ADDR_BITS 64
38#define RQ_ENET_LEN_BITS 14
39#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
40#define RQ_ENET_TYPE_BITS 2
41#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
42
43static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
44 u64 address, u8 type, u16 length)
45{
46 desc->address = cpu_to_le64(address);
47 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
48 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
49}
50
51static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
52 u64 *address, u8 *type, u16 *length)
53{
54 *address = le64_to_cpu(desc->address);
55 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
56 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
57 RQ_ENET_TYPE_MASK);
58}
59
60#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
new file mode 100644
index 000000000000..020ae6c3f3d9
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28void vnic_cq_free(struct vnic_cq *cq)
29{
30 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
31
32 cq->ctrl = NULL;
33}
34
35int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
36 unsigned int desc_count, unsigned int desc_size)
37{
38 int err;
39
40 cq->index = index;
41 cq->vdev = vdev;
42
43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
44 if (!cq->ctrl) {
45 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
46 return -EINVAL;
47 }
48
49 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
50 if (err)
51 return err;
52
53 return 0;
54}
55
56void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
57 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
58 unsigned int cq_tail_color, unsigned int interrupt_enable,
59 unsigned int cq_entry_enable, unsigned int cq_message_enable,
60 unsigned int interrupt_offset, u64 cq_message_addr)
61{
62 u64 paddr;
63
64 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
65 writeq(paddr, &cq->ctrl->ring_base);
66 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
67 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
68 iowrite32(color_enable, &cq->ctrl->color_enable);
69 iowrite32(cq_head, &cq->ctrl->cq_head);
70 iowrite32(cq_tail, &cq->ctrl->cq_tail);
71 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
72 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
73 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
74 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
75 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
76 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
77}
78
79void vnic_cq_clean(struct vnic_cq *cq)
80{
81 cq->to_clean = 0;
82 cq->last_color = 0;
83
84 iowrite32(0, &cq->ctrl->cq_head);
85 iowrite32(0, &cq->ctrl->cq_tail);
86 iowrite32(1, &cq->ctrl->cq_tail_color);
87
88 vnic_dev_clear_desc_ring(&cq->ring);
89}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
new file mode 100644
index 000000000000..114763cbc2f8
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_CQ_H_
21#define _VNIC_CQ_H_
22
23#include "cq_desc.h"
24#include "vnic_dev.h"
25
26/* Completion queue control */
27struct vnic_cq_ctrl {
28 u64 ring_base; /* 0x00 */
29 u32 ring_size; /* 0x08 */
30 u32 pad0;
31 u32 flow_control_enable; /* 0x10 */
32 u32 pad1;
33 u32 color_enable; /* 0x18 */
34 u32 pad2;
35 u32 cq_head; /* 0x20 */
36 u32 pad3;
37 u32 cq_tail; /* 0x28 */
38 u32 pad4;
39 u32 cq_tail_color; /* 0x30 */
40 u32 pad5;
41 u32 interrupt_enable; /* 0x38 */
42 u32 pad6;
43 u32 cq_entry_enable; /* 0x40 */
44 u32 pad7;
45 u32 cq_message_enable; /* 0x48 */
46 u32 pad8;
47 u32 interrupt_offset; /* 0x50 */
48 u32 pad9;
49 u64 cq_message_addr; /* 0x58 */
50 u32 pad10;
51};
52
53struct vnic_cq {
54 unsigned int index;
55 struct vnic_dev *vdev;
56 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
57 struct vnic_dev_ring ring;
58 unsigned int to_clean;
59 unsigned int last_color;
60};
61
62static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
63 unsigned int work_to_do,
64 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
65 u8 type, u16 q_number, u16 completed_index, void *opaque),
66 void *opaque)
67{
68 struct cq_desc *cq_desc;
69 unsigned int work_done = 0;
70 u16 q_number, completed_index;
71 u8 type, color;
72
73 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
74 cq->ring.desc_size * cq->to_clean);
75 cq_desc_dec(cq_desc, &type, &color,
76 &q_number, &completed_index);
77
78 while (color != cq->last_color) {
79
80 if ((*q_service)(cq->vdev, cq_desc, type,
81 q_number, completed_index, opaque))
82 break;
83
84 cq->to_clean++;
85 if (cq->to_clean == cq->ring.desc_count) {
86 cq->to_clean = 0;
87 cq->last_color = cq->last_color ? 0 : 1;
88 }
89
90 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
91 cq->ring.desc_size * cq->to_clean);
92 cq_desc_dec(cq_desc, &type, &color,
93 &q_number, &completed_index);
94
95 work_done++;
96 if (work_done >= work_to_do)
97 break;
98 }
99
100 return work_done;
101}
102
103void vnic_cq_free(struct vnic_cq *cq);
104int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
105 unsigned int desc_count, unsigned int desc_size);
106void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
107 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
108 unsigned int cq_tail_color, unsigned int interrupt_enable,
109 unsigned int cq_entry_enable, unsigned int message_enable,
110 unsigned int interrupt_offset, u64 message_addr);
111void vnic_cq_clean(struct vnic_cq *cq);
112
113#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
new file mode 100644
index 000000000000..4d104f5c30f9
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.c
@@ -0,0 +1,674 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25#include <linux/if_ether.h>
26
27#include "vnic_resource.h"
28#include "vnic_devcmd.h"
29#include "vnic_dev.h"
30#include "vnic_stats.h"
31
32struct vnic_res {
33 void __iomem *vaddr;
34 unsigned int count;
35};
36
37struct vnic_dev {
38 void *priv;
39 struct pci_dev *pdev;
40 struct vnic_res res[RES_TYPE_MAX];
41 enum vnic_dev_intr_mode intr_mode;
42 struct vnic_devcmd __iomem *devcmd;
43 struct vnic_devcmd_notify *notify;
44 struct vnic_devcmd_notify notify_copy;
45 dma_addr_t notify_pa;
46 u32 *linkstatus;
47 dma_addr_t linkstatus_pa;
48 struct vnic_stats *stats;
49 dma_addr_t stats_pa;
50 struct vnic_devcmd_fw_info *fw_info;
51 dma_addr_t fw_info_pa;
52};
53
54#define VNIC_MAX_RES_HDR_SIZE \
55 (sizeof(struct vnic_resource_header) + \
56 sizeof(struct vnic_resource) * RES_TYPE_MAX)
57#define VNIC_RES_STRIDE 128
58
59void *vnic_dev_priv(struct vnic_dev *vdev)
60{
61 return vdev->priv;
62}
63
64static int vnic_dev_discover_res(struct vnic_dev *vdev,
65 struct vnic_dev_bar *bar)
66{
67 struct vnic_resource_header __iomem *rh;
68 struct vnic_resource __iomem *r;
69 u8 type;
70
71 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
72 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
73 return -EINVAL;
74 }
75
76 rh = bar->vaddr;
77 if (!rh) {
78 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
79 return -EINVAL;
80 }
81
82 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
83 ioread32(&rh->version) != VNIC_RES_VERSION) {
84 printk(KERN_ERR "vNIC BAR0 res magic/version error "
85 "exp (%lx/%lx) curr (%x/%x)\n",
86 VNIC_RES_MAGIC, VNIC_RES_VERSION,
87 ioread32(&rh->magic), ioread32(&rh->version));
88 return -EINVAL;
89 }
90
91 r = (struct vnic_resource __iomem *)(rh + 1);
92
93 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
94
95 u8 bar_num = ioread8(&r->bar);
96 u32 bar_offset = ioread32(&r->bar_offset);
97 u32 count = ioread32(&r->count);
98 u32 len;
99
100 r++;
101
102 if (bar_num != 0) /* only mapping in BAR0 resources */
103 continue;
104
105 switch (type) {
106 case RES_TYPE_WQ:
107 case RES_TYPE_RQ:
108 case RES_TYPE_CQ:
109 case RES_TYPE_INTR_CTRL:
110 /* each count is stride bytes long */
111 len = count * VNIC_RES_STRIDE;
112 if (len + bar_offset > bar->len) {
113 printk(KERN_ERR "vNIC BAR0 resource %d "
114 "out-of-bounds, offset 0x%x + "
115 "size 0x%x > bar len 0x%lx\n",
116 type, bar_offset,
117 len,
118 bar->len);
119 return -EINVAL;
120 }
121 break;
122 case RES_TYPE_INTR_PBA_LEGACY:
123 case RES_TYPE_DEVCMD:
124 len = count;
125 break;
126 default:
127 continue;
128 }
129
130 vdev->res[type].count = count;
131 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
132 }
133
134 return 0;
135}
136
137unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
138 enum vnic_res_type type)
139{
140 return vdev->res[type].count;
141}
142
143void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
144 unsigned int index)
145{
146 if (!vdev->res[type].vaddr)
147 return NULL;
148
149 switch (type) {
150 case RES_TYPE_WQ:
151 case RES_TYPE_RQ:
152 case RES_TYPE_CQ:
153 case RES_TYPE_INTR_CTRL:
154 return (char __iomem *)vdev->res[type].vaddr +
155 index * VNIC_RES_STRIDE;
156 default:
157 return (char __iomem *)vdev->res[type].vaddr;
158 }
159}
160
161unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
162 unsigned int desc_count, unsigned int desc_size)
163{
164 /* The base address of the desc rings must be 512 byte aligned.
165 * Descriptor count is aligned to groups of 32 descriptors. A
166 * count of 0 means the maximum 4096 descriptors. Descriptor
167 * size is aligned to 16 bytes.
168 */
169
170 unsigned int count_align = 32;
171 unsigned int desc_align = 16;
172
173 ring->base_align = 512;
174
175 if (desc_count == 0)
176 desc_count = 4096;
177
178 ring->desc_count = ALIGN(desc_count, count_align);
179
180 ring->desc_size = ALIGN(desc_size, desc_align);
181
182 ring->size = ring->desc_count * ring->desc_size;
183 ring->size_unaligned = ring->size + ring->base_align;
184
185 return ring->size_unaligned;
186}
187
188void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
189{
190 memset(ring->descs, 0, ring->size);
191}
192
193int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
194 unsigned int desc_count, unsigned int desc_size)
195{
196 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
197
198 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
199 ring->size_unaligned,
200 &ring->base_addr_unaligned);
201
202 if (!ring->descs_unaligned) {
203 printk(KERN_ERR
204 "Failed to allocate ring (size=%d), aborting\n",
205 (int)ring->size);
206 return -ENOMEM;
207 }
208
209 ring->base_addr = ALIGN(ring->base_addr_unaligned,
210 ring->base_align);
211 ring->descs = (u8 *)ring->descs_unaligned +
212 (ring->base_addr - ring->base_addr_unaligned);
213
214 vnic_dev_clear_desc_ring(ring);
215
216 ring->desc_avail = ring->desc_count - 1;
217
218 return 0;
219}
220
221void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
222{
223 if (ring->descs) {
224 pci_free_consistent(vdev->pdev,
225 ring->size_unaligned,
226 ring->descs_unaligned,
227 ring->base_addr_unaligned);
228 ring->descs = NULL;
229 }
230}
231
232int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
233 u64 *a0, u64 *a1, int wait)
234{
235 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
236 int delay;
237 u32 status;
238 int dev_cmd_err[] = {
239 /* convert from fw's version of error.h to host's version */
240 0, /* ERR_SUCCESS */
241 EINVAL, /* ERR_EINVAL */
242 EFAULT, /* ERR_EFAULT */
243 EPERM, /* ERR_EPERM */
244 EBUSY, /* ERR_EBUSY */
245 };
246 int err;
247
248 status = ioread32(&devcmd->status);
249 if (status & STAT_BUSY) {
250 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
251 return -EBUSY;
252 }
253
254 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
255 writeq(*a0, &devcmd->args[0]);
256 writeq(*a1, &devcmd->args[1]);
257 wmb();
258 }
259
260 iowrite32(cmd, &devcmd->cmd);
261
262 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
263 return 0;
264
265 for (delay = 0; delay < wait; delay++) {
266
267 udelay(100);
268
269 status = ioread32(&devcmd->status);
270 if (!(status & STAT_BUSY)) {
271
272 if (status & STAT_ERROR) {
273 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
274 printk(KERN_ERR "Error %d devcmd %d\n",
275 err, _CMD_N(cmd));
276 return -err;
277 }
278
279 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
280 rmb();
281 *a0 = readq(&devcmd->args[0]);
282 *a1 = readq(&devcmd->args[1]);
283 }
284
285 return 0;
286 }
287 }
288
289 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
290 return -ETIMEDOUT;
291}
292
293int vnic_dev_fw_info(struct vnic_dev *vdev,
294 struct vnic_devcmd_fw_info **fw_info)
295{
296 u64 a0, a1 = 0;
297 int wait = 1000;
298 int err = 0;
299
300 if (!vdev->fw_info) {
301 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
302 sizeof(struct vnic_devcmd_fw_info),
303 &vdev->fw_info_pa);
304 if (!vdev->fw_info)
305 return -ENOMEM;
306
307 a0 = vdev->fw_info_pa;
308
309 /* only get fw_info once and cache it */
310 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
311 }
312
313 *fw_info = vdev->fw_info;
314
315 return err;
316}
317
318int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
319 void *value)
320{
321 u64 a0, a1;
322 int wait = 1000;
323 int err;
324
325 a0 = offset;
326 a1 = size;
327
328 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
329
330 switch (size) {
331 case 1: *(u8 *)value = (u8)a0; break;
332 case 2: *(u16 *)value = (u16)a0; break;
333 case 4: *(u32 *)value = (u32)a0; break;
334 case 8: *(u64 *)value = a0; break;
335 default: BUG(); break;
336 }
337
338 return err;
339}
340
341int vnic_dev_stats_clear(struct vnic_dev *vdev)
342{
343 u64 a0 = 0, a1 = 0;
344 int wait = 1000;
345 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
346}
347
348int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
349{
350 u64 a0, a1;
351 int wait = 1000;
352
353 if (!vdev->stats) {
354 vdev->stats = pci_alloc_consistent(vdev->pdev,
355 sizeof(struct vnic_stats), &vdev->stats_pa);
356 if (!vdev->stats)
357 return -ENOMEM;
358 }
359
360 *stats = vdev->stats;
361 a0 = vdev->stats_pa;
362 a1 = sizeof(struct vnic_stats);
363
364 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
365}
366
367int vnic_dev_close(struct vnic_dev *vdev)
368{
369 u64 a0 = 0, a1 = 0;
370 int wait = 1000;
371 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
372}
373
374int vnic_dev_enable(struct vnic_dev *vdev)
375{
376 u64 a0 = 0, a1 = 0;
377 int wait = 1000;
378 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
379}
380
381int vnic_dev_disable(struct vnic_dev *vdev)
382{
383 u64 a0 = 0, a1 = 0;
384 int wait = 1000;
385 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
386}
387
388int vnic_dev_open(struct vnic_dev *vdev, int arg)
389{
390 u64 a0 = (u32)arg, a1 = 0;
391 int wait = 1000;
392 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
393}
394
395int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
396{
397 u64 a0 = 0, a1 = 0;
398 int wait = 1000;
399 int err;
400
401 *done = 0;
402
403 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
404 if (err)
405 return err;
406
407 *done = (a0 == 0);
408
409 return 0;
410}
411
412int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
413{
414 u64 a0 = (u32)arg, a1 = 0;
415 int wait = 1000;
416 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
417}
418
419int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
420{
421 u64 a0 = 0, a1 = 0;
422 int wait = 1000;
423 int err;
424
425 *done = 0;
426
427 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
428 if (err)
429 return err;
430
431 *done = (a0 == 0);
432
433 return 0;
434}
435
436int vnic_dev_hang_notify(struct vnic_dev *vdev)
437{
438 u64 a0, a1;
439 int wait = 1000;
440 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
441}
442
443int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
444{
445 u64 a0, a1;
446 int wait = 1000;
447 int err, i;
448
449 for (i = 0; i < ETH_ALEN; i++)
450 mac_addr[i] = 0;
451
452 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
453 if (err)
454 return err;
455
456 for (i = 0; i < ETH_ALEN; i++)
457 mac_addr[i] = ((u8 *)&a0)[i];
458
459 return 0;
460}
461
462void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
463 int broadcast, int promisc, int allmulti)
464{
465 u64 a0, a1 = 0;
466 int wait = 1000;
467 int err;
468
469 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
470 (multicast ? CMD_PFILTER_MULTICAST : 0) |
471 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
472 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
473 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
474
475 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
476 if (err)
477 printk(KERN_ERR "Can't set packet filter\n");
478}
479
480void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
481{
482 u64 a0 = 0, a1 = 0;
483 int wait = 1000;
484 int err;
485 int i;
486
487 for (i = 0; i < ETH_ALEN; i++)
488 ((u8 *)&a0)[i] = addr[i];
489
490 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
491 if (err)
492 printk(KERN_ERR
493 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
494 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
495 err);
496}
497
498void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
499{
500 u64 a0 = 0, a1 = 0;
501 int wait = 1000;
502 int err;
503 int i;
504
505 for (i = 0; i < ETH_ALEN; i++)
506 ((u8 *)&a0)[i] = addr[i];
507
508 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
509 if (err)
510 printk(KERN_ERR
511 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
512 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
513 err);
514}
515
516int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
517{
518 u64 a0, a1;
519 int wait = 1000;
520
521 if (!vdev->notify) {
522 vdev->notify = pci_alloc_consistent(vdev->pdev,
523 sizeof(struct vnic_devcmd_notify),
524 &vdev->notify_pa);
525 if (!vdev->notify)
526 return -ENOMEM;
527 }
528
529 a0 = vdev->notify_pa;
530 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
531 a1 += sizeof(struct vnic_devcmd_notify);
532
533 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
534}
535
536void vnic_dev_notify_unset(struct vnic_dev *vdev)
537{
538 u64 a0, a1;
539 int wait = 1000;
540
541 a0 = 0; /* paddr = 0 to unset notify buffer */
542 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
543 a1 += sizeof(struct vnic_devcmd_notify);
544
545 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
546}
547
548static int vnic_dev_notify_ready(struct vnic_dev *vdev)
549{
550 u32 *words;
551 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
552 unsigned int i;
553 u32 csum;
554
555 if (!vdev->notify)
556 return 0;
557
558 do {
559 csum = 0;
560 memcpy(&vdev->notify_copy, vdev->notify,
561 sizeof(struct vnic_devcmd_notify));
562 words = (u32 *)&vdev->notify_copy;
563 for (i = 1; i < nwords; i++)
564 csum += words[i];
565 } while (csum != words[0]);
566
567 return 1;
568}
569
570int vnic_dev_init(struct vnic_dev *vdev, int arg)
571{
572 u64 a0 = (u32)arg, a1 = 0;
573 int wait = 1000;
574 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
575}
576
577int vnic_dev_link_status(struct vnic_dev *vdev)
578{
579 if (vdev->linkstatus)
580 return *vdev->linkstatus;
581
582 if (!vnic_dev_notify_ready(vdev))
583 return 0;
584
585 return vdev->notify_copy.link_state;
586}
587
588u32 vnic_dev_port_speed(struct vnic_dev *vdev)
589{
590 if (!vnic_dev_notify_ready(vdev))
591 return 0;
592
593 return vdev->notify_copy.port_speed;
594}
595
596u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
597{
598 if (!vnic_dev_notify_ready(vdev))
599 return 0;
600
601 return vdev->notify_copy.msglvl;
602}
603
604u32 vnic_dev_mtu(struct vnic_dev *vdev)
605{
606 if (!vnic_dev_notify_ready(vdev))
607 return 0;
608
609 return vdev->notify_copy.mtu;
610}
611
612void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
613 enum vnic_dev_intr_mode intr_mode)
614{
615 vdev->intr_mode = intr_mode;
616}
617
618enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
619 struct vnic_dev *vdev)
620{
621 return vdev->intr_mode;
622}
623
624void vnic_dev_unregister(struct vnic_dev *vdev)
625{
626 if (vdev) {
627 if (vdev->notify)
628 pci_free_consistent(vdev->pdev,
629 sizeof(struct vnic_devcmd_notify),
630 vdev->notify,
631 vdev->notify_pa);
632 if (vdev->linkstatus)
633 pci_free_consistent(vdev->pdev,
634 sizeof(u32),
635 vdev->linkstatus,
636 vdev->linkstatus_pa);
637 if (vdev->stats)
638 pci_free_consistent(vdev->pdev,
639 sizeof(struct vnic_dev),
640 vdev->stats, vdev->stats_pa);
641 if (vdev->fw_info)
642 pci_free_consistent(vdev->pdev,
643 sizeof(struct vnic_devcmd_fw_info),
644 vdev->fw_info, vdev->fw_info_pa);
645 kfree(vdev);
646 }
647}
648
649struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
650 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
651{
652 if (!vdev) {
653 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
654 if (!vdev)
655 return NULL;
656 }
657
658 vdev->priv = priv;
659 vdev->pdev = pdev;
660
661 if (vnic_dev_discover_res(vdev, bar))
662 goto err_out;
663
664 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
665 if (!vdev->devcmd)
666 goto err_out;
667
668 return vdev;
669
670err_out:
671 vnic_dev_unregister(vdev);
672 return NULL;
673}
674
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
new file mode 100644
index 000000000000..2dcffd3a24bd
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEV_H_
21#define _VNIC_DEV_H_
22
23#include "vnic_resource.h"
24#include "vnic_devcmd.h"
25
26#ifndef VNIC_PADDR_TARGET
27#define VNIC_PADDR_TARGET 0x0000000000000000ULL
28#endif
29
30enum vnic_dev_intr_mode {
31 VNIC_DEV_INTR_MODE_UNKNOWN,
32 VNIC_DEV_INTR_MODE_INTX,
33 VNIC_DEV_INTR_MODE_MSI,
34 VNIC_DEV_INTR_MODE_MSIX,
35};
36
37struct vnic_dev_bar {
38 void __iomem *vaddr;
39 dma_addr_t bus_addr;
40 unsigned long len;
41};
42
43struct vnic_dev_ring {
44 void *descs;
45 size_t size;
46 dma_addr_t base_addr;
47 size_t base_align;
48 void *descs_unaligned;
49 size_t size_unaligned;
50 dma_addr_t base_addr_unaligned;
51 unsigned int desc_size;
52 unsigned int desc_count;
53 unsigned int desc_avail;
54};
55
56struct vnic_dev;
57struct vnic_stats;
58
59void *vnic_dev_priv(struct vnic_dev *vdev);
60unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
61 enum vnic_res_type type);
62void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
63 unsigned int index);
64unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
65 unsigned int desc_count, unsigned int desc_size);
66void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
67int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
68 unsigned int desc_count, unsigned int desc_size);
69void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
70 struct vnic_dev_ring *ring);
71int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
72 u64 *a0, u64 *a1, int wait);
73int vnic_dev_fw_info(struct vnic_dev *vdev,
74 struct vnic_devcmd_fw_info **fw_info);
75int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
76 void *value);
77int vnic_dev_stats_clear(struct vnic_dev *vdev);
78int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
79int vnic_dev_hang_notify(struct vnic_dev *vdev);
80void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
81 int broadcast, int promisc, int allmulti);
82void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
83void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
84int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
85int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
86void vnic_dev_notify_unset(struct vnic_dev *vdev);
87int vnic_dev_link_status(struct vnic_dev *vdev);
88u32 vnic_dev_port_speed(struct vnic_dev *vdev);
89u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
90u32 vnic_dev_mtu(struct vnic_dev *vdev);
91int vnic_dev_close(struct vnic_dev *vdev);
92int vnic_dev_enable(struct vnic_dev *vdev);
93int vnic_dev_disable(struct vnic_dev *vdev);
94int vnic_dev_open(struct vnic_dev *vdev, int arg);
95int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
96int vnic_dev_init(struct vnic_dev *vdev, int arg);
97int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
98int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
99void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
100 enum vnic_dev_intr_mode intr_mode);
101enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
102void vnic_dev_unregister(struct vnic_dev *vdev);
103struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
104 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
105
106#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..d8617a3373b1
--- /dev/null
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -0,0 +1,282 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEVCMD_H_
21#define _VNIC_DEVCMD_H_
22
23#define _CMD_NBITS 14
24#define _CMD_VTYPEBITS 10
25#define _CMD_FLAGSBITS 6
26#define _CMD_DIRBITS 2
27
28#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
29#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
30#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
31#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
32
33#define _CMD_NSHIFT 0
34#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
35#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
36#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
37
38/*
39 * Direction bits (from host perspective).
40 */
41#define _CMD_DIR_NONE 0U
42#define _CMD_DIR_WRITE 1U
43#define _CMD_DIR_READ 2U
44#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
45
46/*
47 * Flag bits.
48 */
49#define _CMD_FLAGS_NONE 0U
50#define _CMD_FLAGS_NOWAIT 1U
51
52/*
53 * vNIC type bits.
54 */
55#define _CMD_VTYPE_NONE 0U
56#define _CMD_VTYPE_ENET 1U
57#define _CMD_VTYPE_FC 2U
58#define _CMD_VTYPE_SCSI 4U
59#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
60
61/*
62 * Used to create cmds..
63*/
64#define _CMDCF(dir, flags, vtype, nr) \
65 (((dir) << _CMD_DIRSHIFT) | \
66 ((flags) << _CMD_FLAGSSHIFT) | \
67 ((vtype) << _CMD_VTYPESHIFT) | \
68 ((nr) << _CMD_NSHIFT))
69#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
70#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
71
72/*
73 * Used to decode cmds..
74*/
75#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
76#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
77#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
78#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
79
80enum vnic_devcmd_cmd {
81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
82
83 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
84 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
85
86 /* dev-specific block member:
87 * in: (u16)a0=offset,(u8)a1=size
88 * out: a0=value */
89 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
90
91 /* stats clear */
92 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
93
94 /* stats dump in mem: (u64)a0=paddr to stats area,
95 * (u16)a1=sizeof stats area */
96 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
97
98 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
99 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
100
101 /* hang detection notification */
102 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
103
104 /* MAC address in (u48)a0 */
105 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
106 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
107
108 /* disable/enable promisc mode: (u8)a0=0/1 */
109/***** XXX DEPRECATED *****/
110 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
111
112 /* disable/enable all-multi mode: (u8)a0=0/1 */
113/***** XXX DEPRECATED *****/
114 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
115
116 /* add addr from (u48)a0 */
117 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
118 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
119
120 /* del addr from (u48)a0 */
121 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
122 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
123
124 /* add VLAN id in (u16)a0 */
125 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
126
127 /* del VLAN id in (u16)a0 */
128 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
129
130 /* nic_cfg in (u32)a0 */
131 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
132
133 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
134 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
135
136 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
137 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
138
139 /* initiate softreset */
140 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
141
142 /* softreset status:
143 * out: a0=0 reset complete, a0=1 reset in progress */
144 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
145
146 /* set struct vnic_devcmd_notify buffer in mem:
147 * in:
148 * (u64)a0=paddr to notify (set paddr=0 to unset)
149 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
150 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
151 * out:
152 * (u32)a1 = effective size
153 */
154 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
155
156 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
157 * (u8)a1=PXENV_UNDI_xxx */
158 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
159
160 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
161 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
162
163 /* open status:
164 * out: a0=0 open complete, a0=1 open in progress */
165 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
166
167 /* close vnic */
168 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
169
170 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
171 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
172
173 /* variant of CMD_INIT, with provisioning info
174 * (u64)a0=paddr of vnic_devcmd_provinfo
175 * (u32)a1=sizeof provision info */
176 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
177
178 /* enable virtual link */
179 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
180
181 /* disable virtual link */
182 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
183
184 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
185 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
186
187 /* init status:
188 * out: a0=0 init complete, a0=1 init in progress
189 * if a0=0, a1=errno */
190 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
191
192 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
193 * (u8)a1=INT13_CMD_xxx */
194 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
195
196 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
197 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
198
199 /* undo initialize of virtual link */
200 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
201};
202
203/* flags for CMD_OPEN */
204#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
205
206/* flags for CMD_INIT */
207#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
208
209/* flags for CMD_PACKET_FILTER */
210#define CMD_PFILTER_DIRECTED 0x01
211#define CMD_PFILTER_MULTICAST 0x02
212#define CMD_PFILTER_BROADCAST 0x04
213#define CMD_PFILTER_PROMISCUOUS 0x08
214#define CMD_PFILTER_ALL_MULTICAST 0x10
215
216enum vnic_devcmd_status {
217 STAT_NONE = 0,
218 STAT_BUSY = 1 << 0, /* cmd in progress */
219 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
220};
221
222enum vnic_devcmd_error {
223 ERR_SUCCESS = 0,
224 ERR_EINVAL = 1,
225 ERR_EFAULT = 2,
226 ERR_EPERM = 3,
227 ERR_EBUSY = 4,
228 ERR_ECMDUNKNOWN = 5,
229 ERR_EBADSTATE = 6,
230 ERR_ENOMEM = 7,
231 ERR_ETIMEDOUT = 8,
232 ERR_ELINKDOWN = 9,
233};
234
235struct vnic_devcmd_fw_info {
236 char fw_version[32];
237 char fw_build[32];
238 char hw_version[32];
239 char hw_serial_number[32];
240};
241
242struct vnic_devcmd_notify {
243 u32 csum; /* checksum over following words */
244
245 u32 link_state; /* link up == 1 */
246 u32 port_speed; /* effective port speed (rate limit) */
247 u32 mtu; /* MTU */
248 u32 msglvl; /* requested driver msg lvl */
249 u32 uif; /* uplink interface */
250 u32 status; /* status bits (see VNIC_STF_*) */
251 u32 error; /* error code (see ERR_*) for first ERR */
252};
253#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
254
255struct vnic_devcmd_provinfo {
256 u8 oui[3];
257 u8 type;
258 u8 data[0];
259};
260
261/*
262 * Writing cmd register causes STAT_BUSY to get set in status register.
263 * When cmd completes, STAT_BUSY will be cleared.
264 *
265 * If cmd completed successfully STAT_ERROR will be clear
266 * and args registers contain cmd-specific results.
267 *
268 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
269 *
270 * status register is read-only. While STAT_BUSY is set,
271 * all other register contents are read-only.
272 */
273
274/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
275#define VNIC_DEVCMD_NARGS 15
276struct vnic_devcmd {
277 u32 status; /* RO */
278 u32 cmd; /* RW */
279 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
280};
281
282#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
new file mode 100644
index 000000000000..6332ac9391b8
--- /dev/null
+++ b/drivers/net/enic/vnic_enet.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_
22
23/* Device-specific region: enet configuration */
24struct vnic_enet_config {
25 u32 flags;
26 u32 wq_desc_count;
27 u32 rq_desc_count;
28 u16 mtu;
29 u16 intr_timer;
30 u8 intr_timer_type;
31 u8 intr_mode;
32 char devname[16];
33};
34
35#define VENETF_TSO 0x1 /* TSO enabled */
36#define VENETF_LRO 0x2 /* LRO enabled */
37#define VENETF_RXCSUM 0x4 /* RX csum enabled */
38#define VENETF_TXCSUM 0x8 /* TX csum enabled */
39#define VENETF_RSS 0x10 /* RSS enabled */
40#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
41#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
42#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
43#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
44#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
45#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
46
47#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
new file mode 100644
index 000000000000..ddc38f8f4656
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_intr.h"
28
29void vnic_intr_free(struct vnic_intr *intr)
30{
31 intr->ctrl = NULL;
32}
33
34int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
35 unsigned int index)
36{
37 intr->index = index;
38 intr->vdev = vdev;
39
40 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
41 if (!intr->ctrl) {
42 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
43 index);
44 return -EINVAL;
45 }
46
47 return 0;
48}
49
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits);
57}
58
59void vnic_intr_clean(struct vnic_intr *intr)
60{
61 iowrite32(0, &intr->ctrl->int_credits);
62}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
new file mode 100644
index 000000000000..ccc408116af8
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_INTR_H_
21#define _VNIC_INTR_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26
27#define VNIC_INTR_TIMER_MAX 0xffff
28
29#define VNIC_INTR_TIMER_TYPE_ABS 0
30#define VNIC_INTR_TIMER_TYPE_QUIET 1
31
32/* Interrupt control */
33struct vnic_intr_ctrl {
34 u32 coalescing_timer; /* 0x00 */
35 u32 pad0;
36 u32 coalescing_value; /* 0x08 */
37 u32 pad1;
38 u32 coalescing_type; /* 0x10 */
39 u32 pad2;
40 u32 mask_on_assertion; /* 0x18 */
41 u32 pad3;
42 u32 mask; /* 0x20 */
43 u32 pad4;
44 u32 int_credits; /* 0x28 */
45 u32 pad5;
46 u32 int_credit_return; /* 0x30 */
47 u32 pad6;
48};
49
50struct vnic_intr {
51 unsigned int index;
52 struct vnic_dev *vdev;
53 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
54};
55
56static inline void vnic_intr_unmask(struct vnic_intr *intr)
57{
58 iowrite32(0, &intr->ctrl->mask);
59}
60
61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{
63 iowrite32(1, &intr->ctrl->mask);
64}
65
66static inline void vnic_intr_return_credits(struct vnic_intr *intr,
67 unsigned int credits, int unmask, int reset_timer)
68{
69#define VNIC_INTR_UNMASK_SHIFT 16
70#define VNIC_INTR_RESET_TIMER_SHIFT 17
71
72 u32 int_credit_return = (credits & 0xffff) |
73 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
74 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
75
76 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
77}
78
79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
80{
81 /* get and ack interrupt in one read (clear-and-ack-on-read) */
82 return ioread32(legacy_pba);
83}
84
85void vnic_intr_free(struct vnic_intr *intr);
86int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
87 unsigned int index);
88void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
89 unsigned int coalescing_type, unsigned int mask_on_assertion);
90void vnic_intr_clean(struct vnic_intr *intr);
91
92#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
new file mode 100644
index 000000000000..dadf26fae69a
--- /dev/null
+++ b/drivers/net/enic/vnic_nic.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_NIC_H_
21#define _VNIC_NIC_H_
22
23#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
24#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
25#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
26#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
27#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
28#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
29#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
30#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
31#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
32#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
33#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
34#define NIC_CFG_RSS_ENABLE (1UL << 22)
35#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
36#define NIC_CFG_RSS_ENABLE_SHIFT 22
37#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
38#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
39#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
40#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43
44static inline void vnic_set_nic_cfg(u32 *nic_cfg,
45 u8 rss_default_cpu, u8 rss_hash_type,
46 u8 rss_hash_bits, u8 rss_base_cpu,
47 u8 rss_enable, u8 tso_ipid_split_en,
48 u8 ig_vlan_strip_en)
49{
50 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
51 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
52 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
53 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
54 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
55 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
56 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
57 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
58 << NIC_CFG_RSS_ENABLE_SHIFT) |
59 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
60 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
61 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
62 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
63}
64
65#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
new file mode 100644
index 000000000000..144d2812f081
--- /dev/null
+++ b/drivers/net/enic/vnic_resource.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RESOURCE_H_
21#define _VNIC_RESOURCE_H_
22
23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
24#define VNIC_RES_VERSION 0x00000000L
25
26/* vNIC resource types */
27enum vnic_res_type {
28 RES_TYPE_EOL, /* End-of-list */
29 RES_TYPE_WQ, /* Work queues */
30 RES_TYPE_RQ, /* Receive queues */
31 RES_TYPE_CQ, /* Completion queues */
32 RES_TYPE_RSVD1,
33 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
34 RES_TYPE_RSVD2,
35 RES_TYPE_RSVD3,
36 RES_TYPE_RSVD4,
37 RES_TYPE_RSVD5,
38 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
39 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
40 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
41 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */
42 RES_TYPE_RSVD6,
43 RES_TYPE_RSVD7,
44 RES_TYPE_DEVCMD, /* Device command region */
45 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
46
47 RES_TYPE_MAX, /* Count of resource types */
48};
49
50struct vnic_resource_header {
51 u32 magic;
52 u32 version;
53};
54
55struct vnic_resource {
56 u8 type;
57 u8 bar;
58 u8 pad[2];
59 u32 bar_offset;
60 u32 count;
61};
62
63#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
new file mode 100644
index 000000000000..9365e63e821a
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_rq.h"
28
29static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
30{
31 struct vnic_rq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = rq->ring.desc_count;
34 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
35
36 vdev = rq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!rq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc rq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = rq->bufs[i];
48 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)rq->ring.descs +
51 rq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = rq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
56 buf->next = rq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 rq->to_use = rq->to_clean = rq->bufs[0];
65 rq->buf_index = 0;
66
67 return 0;
68}
69
70void vnic_rq_free(struct vnic_rq *rq)
71{
72 struct vnic_dev *vdev;
73 unsigned int i;
74
75 vdev = rq->vdev;
76
77 vnic_dev_free_desc_ring(vdev, &rq->ring);
78
79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
80 kfree(rq->bufs[i]);
81 rq->bufs[i] = NULL;
82 }
83
84 rq->ctrl = NULL;
85}
86
87int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
88 unsigned int desc_count, unsigned int desc_size)
89{
90 int err;
91
92 rq->index = index;
93 rq->vdev = vdev;
94
95 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
96 if (!rq->ctrl) {
97 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
98 return -EINVAL;
99 }
100
101 vnic_rq_disable(rq);
102
103 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
104 if (err)
105 return err;
106
107 err = vnic_rq_alloc_bufs(rq);
108 if (err) {
109 vnic_rq_free(rq);
110 return err;
111 }
112
113 return 0;
114}
115
116void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
117 unsigned int error_interrupt_enable,
118 unsigned int error_interrupt_offset)
119{
120 u64 paddr;
121 u32 fetch_index;
122
123 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
124 writeq(paddr, &rq->ctrl->ring_base);
125 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
126 iowrite32(cq_index, &rq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
129 iowrite32(0, &rq->ctrl->dropped_packet_count);
130 iowrite32(0, &rq->ctrl->error_status);
131
132 /* Use current fetch_index as the ring starting point */
133 fetch_index = ioread32(&rq->ctrl->fetch_index);
134 rq->to_use = rq->to_clean =
135 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
136 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
137 iowrite32(fetch_index, &rq->ctrl->posted_index);
138
139 rq->buf_index = 0;
140}
141
142unsigned int vnic_rq_error_status(struct vnic_rq *rq)
143{
144 return ioread32(&rq->ctrl->error_status);
145}
146
147void vnic_rq_enable(struct vnic_rq *rq)
148{
149 iowrite32(1, &rq->ctrl->enable);
150}
151
152int vnic_rq_disable(struct vnic_rq *rq)
153{
154 unsigned int wait;
155
156 iowrite32(0, &rq->ctrl->enable);
157
158 /* Wait for HW to ACK disable request */
159 for (wait = 0; wait < 100; wait++) {
160 if (!(ioread32(&rq->ctrl->running)))
161 return 0;
162 udelay(1);
163 }
164
165 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
166
167 return -ETIMEDOUT;
168}
169
170void vnic_rq_clean(struct vnic_rq *rq,
171 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
172{
173 struct vnic_rq_buf *buf;
174 u32 fetch_index;
175
176 BUG_ON(ioread32(&rq->ctrl->enable));
177
178 buf = rq->to_clean;
179
180 while (vnic_rq_desc_used(rq) > 0) {
181
182 (*buf_clean)(rq, buf);
183
184 buf = rq->to_clean = buf->next;
185 rq->ring.desc_avail++;
186 }
187
188 /* Use current fetch_index as the ring starting point */
189 fetch_index = ioread32(&rq->ctrl->fetch_index);
190 rq->to_use = rq->to_clean =
191 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
192 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
193 iowrite32(fetch_index, &rq->ctrl->posted_index);
194
195 rq->buf_index = 0;
196
197 vnic_dev_clear_desc_ring(&rq->ring);
198}
199
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
new file mode 100644
index 000000000000..82bfca67cc4d
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.h
@@ -0,0 +1,204 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RQ_H_
21#define _VNIC_RQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Receive queue control */
29struct vnic_rq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 error_interrupt_enable; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_offset; /* 0x40 */
46 u32 pad7;
47 u32 error_status; /* 0x48 */
48 u32 pad8;
49 u32 dropped_packet_count; /* 0x50 */
50 u32 pad9;
51 u32 dropped_packet_count_rc; /* 0x58 */
52 u32 pad10;
53};
54
55/* Break the vnic_rq_buf allocations into blocks of 64 entries */
56#define VNIC_RQ_BUF_BLK_ENTRIES 64
57#define VNIC_RQ_BUF_BLK_SZ \
58 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
59#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
60 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
61#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
62
63struct vnic_rq_buf {
64 struct vnic_rq_buf *next;
65 dma_addr_t dma_addr;
66 void *os_buf;
67 unsigned int os_buf_index;
68 unsigned int len;
69 unsigned int index;
70 void *desc;
71};
72
73struct vnic_rq {
74 unsigned int index;
75 struct vnic_dev *vdev;
76 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
77 struct vnic_dev_ring ring;
78 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
79 struct vnic_rq_buf *to_use;
80 struct vnic_rq_buf *to_clean;
81 void *os_buf_head;
82 unsigned int buf_index;
83 unsigned int pkts_outstanding;
84};
85
86static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
87{
88 /* how many does SW own? */
89 return rq->ring.desc_avail;
90}
91
92static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
93{
94 /* how many does HW own? */
95 return rq->ring.desc_count - rq->ring.desc_avail - 1;
96}
97
98static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
99{
100 return rq->to_use->desc;
101}
102
103static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
104{
105 return rq->to_use->index;
106}
107
108static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
109{
110 return rq->buf_index++;
111}
112
113static inline void vnic_rq_post(struct vnic_rq *rq,
114 void *os_buf, unsigned int os_buf_index,
115 dma_addr_t dma_addr, unsigned int len)
116{
117 struct vnic_rq_buf *buf = rq->to_use;
118
119 buf->os_buf = os_buf;
120 buf->os_buf_index = os_buf_index;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 rq->to_use = buf;
126 rq->ring.desc_avail--;
127
128 /* Move the posted_index every nth descriptor
129 */
130
131#ifndef VNIC_RQ_RETURN_RATE
132#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
133#endif
134
135 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
136 iowrite32(buf->index, &rq->ctrl->posted_index);
137}
138
139static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
140{
141 rq->ring.desc_avail += count;
142}
143
144enum desc_return_options {
145 VNIC_RQ_RETURN_DESC,
146 VNIC_RQ_DEFER_RETURN_DESC,
147};
148
149static inline void vnic_rq_service(struct vnic_rq *rq,
150 struct cq_desc *cq_desc, u16 completed_index,
151 int desc_return, void (*buf_service)(struct vnic_rq *rq,
152 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
153 int skipped, void *opaque), void *opaque)
154{
155 struct vnic_rq_buf *buf;
156 int skipped;
157
158 buf = rq->to_clean;
159 while (1) {
160
161 skipped = (buf->index != completed_index);
162
163 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
164
165 if (desc_return == VNIC_RQ_RETURN_DESC)
166 rq->ring.desc_avail++;
167
168 rq->to_clean = buf->next;
169
170 if (!skipped)
171 break;
172
173 buf = rq->to_clean;
174 }
175}
176
177static inline int vnic_rq_fill(struct vnic_rq *rq,
178 int (*buf_fill)(struct vnic_rq *rq))
179{
180 int err;
181
182 while (vnic_rq_desc_avail(rq) > 1) {
183
184 err = (*buf_fill)(rq);
185 if (err)
186 return err;
187 }
188
189 return 0;
190}
191
192void vnic_rq_free(struct vnic_rq *rq);
193int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
194 unsigned int desc_count, unsigned int desc_size);
195void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
196 unsigned int error_interrupt_enable,
197 unsigned int error_interrupt_offset);
198unsigned int vnic_rq_error_status(struct vnic_rq *rq);
199void vnic_rq_enable(struct vnic_rq *rq);
200int vnic_rq_disable(struct vnic_rq *rq);
201void vnic_rq_clean(struct vnic_rq *rq,
202 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
203
204#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
new file mode 100644
index 000000000000..e325d65d7c34
--- /dev/null
+++ b/drivers/net/enic/vnic_rss.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6#ifndef _VNIC_RSS_H_
7#define _VNIC_RSS_H_
8
9/* RSS key array */
10union vnic_rss_key {
11 struct {
12 u8 b[10];
13 u8 b_pad[6];
14 } key[4];
15 u64 raw[8];
16};
17
18/* RSS cpu array */
19union vnic_rss_cpu {
20 struct {
21 u8 b[4] ;
22 u8 b_pad[4];
23 } cpu[32];
24 u64 raw[32];
25};
26
27void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
28void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
29void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
30void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
31
32#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
new file mode 100644
index 000000000000..9ff9614d89b1
--- /dev/null
+++ b/drivers/net/enic/vnic_stats.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_STATS_H_
21#define _VNIC_STATS_H_
22
23/* Tx statistics */
24struct vnic_tx_stats {
25 u64 tx_frames_ok;
26 u64 tx_unicast_frames_ok;
27 u64 tx_multicast_frames_ok;
28 u64 tx_broadcast_frames_ok;
29 u64 tx_bytes_ok;
30 u64 tx_unicast_bytes_ok;
31 u64 tx_multicast_bytes_ok;
32 u64 tx_broadcast_bytes_ok;
33 u64 tx_drops;
34 u64 tx_errors;
35 u64 tx_tso;
36 u64 rsvd[16];
37};
38
39/* Rx statistics */
40struct vnic_rx_stats {
41 u64 rx_frames_ok;
42 u64 rx_frames_total;
43 u64 rx_unicast_frames_ok;
44 u64 rx_multicast_frames_ok;
45 u64 rx_broadcast_frames_ok;
46 u64 rx_bytes_ok;
47 u64 rx_unicast_bytes_ok;
48 u64 rx_multicast_bytes_ok;
49 u64 rx_broadcast_bytes_ok;
50 u64 rx_drop;
51 u64 rx_no_bufs;
52 u64 rx_errors;
53 u64 rx_rss;
54 u64 rx_crc_errors;
55 u64 rx_frames_64;
56 u64 rx_frames_127;
57 u64 rx_frames_255;
58 u64 rx_frames_511;
59 u64 rx_frames_1023;
60 u64 rx_frames_1518;
61 u64 rx_frames_to_max;
62 u64 rsvd[16];
63};
64
65struct vnic_stats {
66 struct vnic_tx_stats tx;
67 struct vnic_rx_stats rx;
68};
69
70#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
new file mode 100644
index 000000000000..a576d04708ef
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28
29static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
30{
31 struct vnic_wq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = wq->ring.desc_count;
34 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
35
36 vdev = wq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!wq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc wq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = wq->bufs[i];
48 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)wq->ring.descs +
51 wq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = wq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
56 buf->next = wq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 wq->to_use = wq->to_clean = wq->bufs[0];
65
66 return 0;
67}
68
69void vnic_wq_free(struct vnic_wq *wq)
70{
71 struct vnic_dev *vdev;
72 unsigned int i;
73
74 vdev = wq->vdev;
75
76 vnic_dev_free_desc_ring(vdev, &wq->ring);
77
78 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
79 kfree(wq->bufs[i]);
80 wq->bufs[i] = NULL;
81 }
82
83 wq->ctrl = NULL;
84}
85
86int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
87 unsigned int desc_count, unsigned int desc_size)
88{
89 int err;
90
91 wq->index = index;
92 wq->vdev = vdev;
93
94 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
95 if (!wq->ctrl) {
96 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
97 return -EINVAL;
98 }
99
100 vnic_wq_disable(wq);
101
102 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
103 if (err)
104 return err;
105
106 err = vnic_wq_alloc_bufs(wq);
107 if (err) {
108 vnic_wq_free(wq);
109 return err;
110 }
111
112 return 0;
113}
114
115void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
116 unsigned int error_interrupt_enable,
117 unsigned int error_interrupt_offset)
118{
119 u64 paddr;
120
121 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
122 writeq(paddr, &wq->ctrl->ring_base);
123 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
124 iowrite32(0, &wq->ctrl->fetch_index);
125 iowrite32(0, &wq->ctrl->posted_index);
126 iowrite32(cq_index, &wq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
129 iowrite32(0, &wq->ctrl->error_status);
130}
131
132unsigned int vnic_wq_error_status(struct vnic_wq *wq)
133{
134 return ioread32(&wq->ctrl->error_status);
135}
136
137void vnic_wq_enable(struct vnic_wq *wq)
138{
139 iowrite32(1, &wq->ctrl->enable);
140}
141
142int vnic_wq_disable(struct vnic_wq *wq)
143{
144 unsigned int wait;
145
146 iowrite32(0, &wq->ctrl->enable);
147
148 /* Wait for HW to ACK disable request */
149 for (wait = 0; wait < 100; wait++) {
150 if (!(ioread32(&wq->ctrl->running)))
151 return 0;
152 udelay(1);
153 }
154
155 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
156
157 return -ETIMEDOUT;
158}
159
160void vnic_wq_clean(struct vnic_wq *wq,
161 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
162{
163 struct vnic_wq_buf *buf;
164
165 BUG_ON(ioread32(&wq->ctrl->enable));
166
167 buf = wq->to_clean;
168
169 while (vnic_wq_desc_used(wq) > 0) {
170
171 (*buf_clean)(wq, buf);
172
173 buf = wq->to_clean = buf->next;
174 wq->ring.desc_avail++;
175 }
176
177 wq->to_use = wq->to_clean = wq->bufs[0];
178
179 iowrite32(0, &wq->ctrl->fetch_index);
180 iowrite32(0, &wq->ctrl->posted_index);
181 iowrite32(0, &wq->ctrl->error_status);
182
183 vnic_dev_clear_desc_ring(&wq->ring);
184}
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
new file mode 100644
index 000000000000..7081828d8a42
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_WQ_H_
21#define _VNIC_WQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Work queue control */
29struct vnic_wq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 dca_value; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_enable; /* 0x40 */
46 u32 pad7;
47 u32 error_interrupt_offset; /* 0x48 */
48 u32 pad8;
49 u32 error_status; /* 0x50 */
50 u32 pad9;
51};
52
53struct vnic_wq_buf {
54 struct vnic_wq_buf *next;
55 dma_addr_t dma_addr;
56 void *os_buf;
57 unsigned int len;
58 unsigned int index;
59 int sop;
60 void *desc;
61};
62
63/* Break the vnic_wq_buf allocations into blocks of 64 entries */
64#define VNIC_WQ_BUF_BLK_ENTRIES 64
65#define VNIC_WQ_BUF_BLK_SZ \
66 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
67#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
68 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
69#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
70
71struct vnic_wq {
72 unsigned int index;
73 struct vnic_dev *vdev;
74 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
75 struct vnic_dev_ring ring;
76 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
77 struct vnic_wq_buf *to_use;
78 struct vnic_wq_buf *to_clean;
79 unsigned int pkts_outstanding;
80};
81
82static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
83{
84 /* how many does SW own? */
85 return wq->ring.desc_avail;
86}
87
88static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
89{
90 /* how many does HW own? */
91 return wq->ring.desc_count - wq->ring.desc_avail - 1;
92}
93
94static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
95{
96 return wq->to_use->desc;
97}
98
99static inline void vnic_wq_post(struct vnic_wq *wq,
100 void *os_buf, dma_addr_t dma_addr,
101 unsigned int len, int sop, int eop)
102{
103 struct vnic_wq_buf *buf = wq->to_use;
104
105 buf->sop = sop;
106 buf->os_buf = eop ? os_buf : NULL;
107 buf->dma_addr = dma_addr;
108 buf->len = len;
109
110 buf = buf->next;
111 if (eop)
112 iowrite32(buf->index, &wq->ctrl->posted_index);
113 wq->to_use = buf;
114
115 wq->ring.desc_avail--;
116}
117
118static inline void vnic_wq_service(struct vnic_wq *wq,
119 struct cq_desc *cq_desc, u16 completed_index,
120 void (*buf_service)(struct vnic_wq *wq,
121 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
122 void *opaque)
123{
124 struct vnic_wq_buf *buf;
125
126 buf = wq->to_clean;
127 while (1) {
128
129 (*buf_service)(wq, cq_desc, buf, opaque);
130
131 wq->ring.desc_avail++;
132
133 wq->to_clean = buf->next;
134
135 if (buf->index == completed_index)
136 break;
137
138 buf = wq->to_clean;
139 }
140}
141
142void vnic_wq_free(struct vnic_wq *wq);
143int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
144 unsigned int desc_count, unsigned int desc_size);
145void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
146 unsigned int error_interrupt_enable,
147 unsigned int error_interrupt_offset);
148unsigned int vnic_wq_error_status(struct vnic_wq *wq);
149void vnic_wq_enable(struct vnic_wq *wq);
150int vnic_wq_disable(struct vnic_wq *wq);
151void vnic_wq_clean(struct vnic_wq *wq,
152 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
153
154#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..483596c2d8bf
--- /dev/null
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _WQ_ENET_DESC_H_
21#define _WQ_ENET_DESC_H_
22
23/* Ethernet work queue descriptor: 16B */
24struct wq_enet_desc {
25 __le64 address;
26 __le16 length;
27 __le16 mss_loopback;
28 __le16 header_length_flags;
29 __le16 vlan_tag;
30};
31
32#define WQ_ENET_ADDR_BITS 64
33#define WQ_ENET_LEN_BITS 14
34#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
35#define WQ_ENET_MSS_BITS 14
36#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
37#define WQ_ENET_MSS_SHIFT 2
38#define WQ_ENET_LOOPBACK_SHIFT 1
39#define WQ_ENET_HDRLEN_BITS 10
40#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
41#define WQ_ENET_FLAGS_OM_BITS 2
42#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
43#define WQ_ENET_FLAGS_EOP_SHIFT 12
44#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
45#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
46#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
47
48#define WQ_ENET_OFFLOAD_MODE_CSUM 0
49#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
50#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
51#define WQ_ENET_OFFLOAD_MODE_TSO 3
52
53static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
54 u64 address, u16 length, u16 mss, u16 header_length,
55 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
56 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
57{
58 desc->address = cpu_to_le64(address);
59 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
61 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
62 desc->header_length_flags = cpu_to_le16(
63 (header_length & WQ_ENET_HDRLEN_MASK) |
64 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
65 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
66 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
67 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
68 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
69 desc->vlan_tag = cpu_to_le16(vlan_tag);
70}
71
72static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
73 u64 *address, u16 *length, u16 *mss, u16 *header_length,
74 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
75 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
76{
77 *address = le64_to_cpu(desc->address);
78 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
80 WQ_ENET_MSS_MASK;
81 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
82 WQ_ENET_LOOPBACK_SHIFT) & 1);
83 *header_length = le16_to_cpu(desc->header_length_flags) &
84 WQ_ENET_HDRLEN_MASK;
85 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
87 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
89 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
91 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
93 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
94 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
95 *vlan_tag = le16_to_cpu(desc->vlan_tag);
96}
97
98#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 331b86b01fa9..0b6ecef9a849 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5975,10 +5975,12 @@ static void nv_shutdown(struct pci_dev *pdev)
5975 if (netif_running(dev)) 5975 if (netif_running(dev))
5976 nv_close(dev); 5976 nv_close(dev);
5977 5977
5978 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5979 pci_enable_wake(pdev, PCI_D3cold, np->wolenabled);
5980 pci_disable_device(pdev); 5978 pci_disable_device(pdev);
5981 pci_set_power_state(pdev, PCI_D3hot); 5979 if (system_state == SYSTEM_POWER_OFF) {
5980 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
5981 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5982 pci_set_power_state(pdev, PCI_D3hot);
5983 }
5982} 5984}
5983#else 5985#else
5984#define nv_suspend NULL 5986#define nv_suspend NULL
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index eaa7262dc079..717dc38b6858 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -102,7 +102,7 @@
102/* MAL V1 IER bits */ 102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008 103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE 104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_OTE | \ 105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) 106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 107
108/* MAL V2 IER bits */ 108/* MAL V2 IER bits */
@@ -110,7 +110,7 @@
110#define MAL2_IER_PRE 0x00000040 110#define MAL2_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 111#define MAL2_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE) 112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_OTE | \ 113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) 114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 115
116 116
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index 37bfeea8788a..9164abb72d9b 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -321,7 +321,7 @@ static struct mii_phy_def bcm5248_phy_def = {
321 321
322static int m88e1111_init(struct mii_phy *phy) 322static int m88e1111_init(struct mii_phy *phy)
323{ 323{
324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); 324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
325 phy_write(phy, 0x14, 0x0ce3); 325 phy_write(phy, 0x14, 0x0ce3);
326 phy_write(phy, 0x18, 0x4101); 326 phy_write(phy, 0x18, 0x4101);
327 phy_write(phy, 0x09, 0x0e00); 327 phy_write(phy, 0x09, 0x0e00);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 634c4c9d87be..93d02efa9a0a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3563,10 +3563,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3563 struct net_device *netdev = adapter->netdev; 3563 struct net_device *netdev = adapter->netdev;
3564 int work_done = 0; 3564 int work_done = 0;
3565 3565
3566 /* Keep link state information with original netdev */
3567 if (!netif_carrier_ok(netdev))
3568 goto quit_polling;
3569
3570#ifdef CONFIG_DCA 3566#ifdef CONFIG_DCA
3571 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3567 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3572 igb_update_rx_dca(rx_ring); 3568 igb_update_rx_dca(rx_ring);
@@ -3576,7 +3572,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3576 3572
3577 /* If not enough Rx work done, exit the polling mode */ 3573 /* If not enough Rx work done, exit the polling mode */
3578 if ((work_done == 0) || !netif_running(netdev)) { 3574 if ((work_done == 0) || !netif_running(netdev)) {
3579quit_polling:
3580 netif_rx_complete(netdev, napi); 3575 netif_rx_complete(netdev, napi);
3581 3576
3582 if (adapter->itr_setting & 3) { 3577 if (adapter->itr_setting & 3) {
@@ -3617,16 +3612,14 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3617 unsigned int i; 3612 unsigned int i;
3618 u32 head, oldhead; 3613 u32 head, oldhead;
3619 unsigned int count = 0; 3614 unsigned int count = 0;
3620 bool cleaned = false;
3621 bool retval = true;
3622 unsigned int total_bytes = 0, total_packets = 0; 3615 unsigned int total_bytes = 0, total_packets = 0;
3616 bool retval = true;
3623 3617
3624 rmb(); 3618 rmb();
3625 head = get_head(tx_ring); 3619 head = get_head(tx_ring);
3626 i = tx_ring->next_to_clean; 3620 i = tx_ring->next_to_clean;
3627 while (1) { 3621 while (1) {
3628 while (i != head) { 3622 while (i != head) {
3629 cleaned = true;
3630 tx_desc = E1000_TX_DESC(*tx_ring, i); 3623 tx_desc = E1000_TX_DESC(*tx_ring, i);
3631 buffer_info = &tx_ring->buffer_info[i]; 3624 buffer_info = &tx_ring->buffer_info[i];
3632 skb = buffer_info->skb; 3625 skb = buffer_info->skb;
@@ -3643,7 +3636,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3643 } 3636 }
3644 3637
3645 igb_unmap_and_free_tx_resource(adapter, buffer_info); 3638 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3646 tx_desc->upper.data = 0;
3647 3639
3648 i++; 3640 i++;
3649 if (i == tx_ring->count) 3641 if (i == tx_ring->count)
@@ -3665,7 +3657,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3665done_cleaning: 3657done_cleaning:
3666 tx_ring->next_to_clean = i; 3658 tx_ring->next_to_clean = i;
3667 3659
3668 if (unlikely(cleaned && 3660 if (unlikely(count &&
3669 netif_carrier_ok(netdev) && 3661 netif_carrier_ok(netdev) &&
3670 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) { 3662 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3671 /* Make sure that anybody stopping the queue after this 3663 /* Make sure that anybody stopping the queue after this
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 18f4b3a96aed..9c926d205de9 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -165,7 +165,7 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
165 unsigned iobase = pci_resource_start(pdev, 0); 165 unsigned iobase = pci_resource_start(pdev, 0);
166 unsigned i; 166 unsigned i;
167 167
168 seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n", 168 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 804698fc6a8f..d85717e3022a 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -85,7 +85,7 @@ struct ixgb_adapter;
85#define DPRINTK(nlevel, klevel, fmt, args...) \ 85#define DPRINTK(nlevel, klevel, fmt, args...) \
86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
88 __FUNCTION__ , ## args)) 88 __func__ , ## args))
89 89
90 90
91/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 956914a5028d..2198b77c53ed 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -37,17 +36,15 @@
37#include "ixgbe_type.h" 36#include "ixgbe_type.h"
38#include "ixgbe_common.h" 37#include "ixgbe_common.h"
39 38
40#ifdef CONFIG_DCA 39#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
41#include <linux/dca.h> 40#include <linux/dca.h>
42#endif 41#endif
43 42
44#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
45
46#define PFX "ixgbe: " 43#define PFX "ixgbe: "
47#define DPRINTK(nlevel, klevel, fmt, args...) \ 44#define DPRINTK(nlevel, klevel, fmt, args...) \
48 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 45 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
49 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 46 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
50 __FUNCTION__ , ## args))) 47 __func__ , ## args)))
51 48
52/* TX/RX descriptor defines */ 49/* TX/RX descriptor defines */
53#define IXGBE_DEFAULT_TXD 1024 50#define IXGBE_DEFAULT_TXD 1024
@@ -58,23 +55,14 @@
58#define IXGBE_MAX_RXD 4096 55#define IXGBE_MAX_RXD 4096
59#define IXGBE_MIN_RXD 64 56#define IXGBE_MIN_RXD 64
60 57
61#define IXGBE_DEFAULT_RXQ 1
62#define IXGBE_MAX_RXQ 1
63#define IXGBE_MIN_RXQ 1
64
65#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */
66#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */
67#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */
68#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */
69
70/* flow control */ 58/* flow control */
71#define IXGBE_DEFAULT_FCRTL 0x10000 59#define IXGBE_DEFAULT_FCRTL 0x10000
72#define IXGBE_MIN_FCRTL 0 60#define IXGBE_MIN_FCRTL 0x40
73#define IXGBE_MAX_FCRTL 0x7FF80 61#define IXGBE_MAX_FCRTL 0x7FF80
74#define IXGBE_DEFAULT_FCRTH 0x20000 62#define IXGBE_DEFAULT_FCRTH 0x20000
75#define IXGBE_MIN_FCRTH 0 63#define IXGBE_MIN_FCRTH 0x600
76#define IXGBE_MAX_FCRTH 0x7FFF0 64#define IXGBE_MAX_FCRTH 0x7FFF0
77#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */ 65#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
78#define IXGBE_MIN_FCPAUSE 0 66#define IXGBE_MIN_FCPAUSE 0
79#define IXGBE_MAX_FCPAUSE 0xFFFF 67#define IXGBE_MAX_FCPAUSE 0xFFFF
80 68
@@ -88,9 +76,6 @@
88 76
89#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 77#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
90 78
91/* How many Tx Descriptors do we need to call netif_wake_queue? */
92#define IXGBE_TX_QUEUE_WAKE 16
93
94/* How many Rx Buffers do we bundle into one write to the hardware ? */ 79/* How many Rx Buffers do we bundle into one write to the hardware ? */
95#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 80#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
96 81
@@ -119,6 +104,7 @@ struct ixgbe_rx_buffer {
119 dma_addr_t dma; 104 dma_addr_t dma;
120 struct page *page; 105 struct page *page;
121 dma_addr_t page_dma; 106 dma_addr_t page_dma;
107 unsigned int page_offset;
122}; 108};
123 109
124struct ixgbe_queue_stats { 110struct ixgbe_queue_stats {
@@ -150,22 +136,20 @@ struct ixgbe_ring {
150 * offset associated with this ring, which is different 136 * offset associated with this ring, which is different
151 * for DCE and RSS modes */ 137 * for DCE and RSS modes */
152 138
153#ifdef CONFIG_DCA 139#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
154 /* cpu for tx queue */ 140 /* cpu for tx queue */
155 int cpu; 141 int cpu;
156#endif 142#endif
157 struct net_lro_mgr lro_mgr; 143 struct net_lro_mgr lro_mgr;
158 bool lro_used; 144 bool lro_used;
159 struct ixgbe_queue_stats stats; 145 struct ixgbe_queue_stats stats;
160 u8 v_idx; /* maps directly to the index for this ring in the hardware 146 u16 v_idx; /* maps directly to the index for this ring in the hardware
161 * vector array, can also be used for finding the bit in EICR 147 * vector array, can also be used for finding the bit in EICR
162 * and friends that represents the vector for this ring */ 148 * and friends that represents the vector for this ring */
163 149
164 u32 eims_value;
165 u16 itr_register;
166 150
167 char name[IFNAMSIZ + 5];
168 u16 work_limit; /* max work per interrupt */ 151 u16 work_limit; /* max work per interrupt */
152 u16 rx_buf_len;
169}; 153};
170 154
171#define RING_F_VMDQ 1 155#define RING_F_VMDQ 1
@@ -190,8 +174,8 @@ struct ixgbe_q_vector {
190 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 174 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
191 u8 rxr_count; /* Rx ring count assigned to this vector */ 175 u8 rxr_count; /* Rx ring count assigned to this vector */
192 u8 txr_count; /* Tx ring count assigned to this vector */ 176 u8 txr_count; /* Tx ring count assigned to this vector */
193 u8 tx_eitr; 177 u8 tx_itr;
194 u8 rx_eitr; 178 u8 rx_itr;
195 u32 eitr; 179 u32 eitr;
196}; 180};
197 181
@@ -228,7 +212,6 @@ struct ixgbe_adapter {
228 struct timer_list watchdog_timer; 212 struct timer_list watchdog_timer;
229 struct vlan_group *vlgrp; 213 struct vlan_group *vlgrp;
230 u16 bd_number; 214 u16 bd_number;
231 u16 rx_buf_len;
232 struct work_struct reset_task; 215 struct work_struct reset_task;
233 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 216 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
234 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 217 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
@@ -240,7 +223,9 @@ struct ixgbe_adapter {
240 223
241 /* TX */ 224 /* TX */
242 struct ixgbe_ring *tx_ring; /* One per active queue */ 225 struct ixgbe_ring *tx_ring; /* One per active queue */
226 int num_tx_queues;
243 u64 restart_queue; 227 u64 restart_queue;
228 u64 hw_csum_tx_good;
244 u64 lsc_int; 229 u64 lsc_int;
245 u64 hw_tso_ctxt; 230 u64 hw_tso_ctxt;
246 u64 hw_tso6_ctxt; 231 u64 hw_tso6_ctxt;
@@ -249,12 +234,10 @@ struct ixgbe_adapter {
249 234
250 /* RX */ 235 /* RX */
251 struct ixgbe_ring *rx_ring; /* One per active queue */ 236 struct ixgbe_ring *rx_ring; /* One per active queue */
252 u64 hw_csum_tx_good; 237 int num_rx_queues;
253 u64 hw_csum_rx_error; 238 u64 hw_csum_rx_error;
254 u64 hw_csum_rx_good; 239 u64 hw_csum_rx_good;
255 u64 non_eop_descs; 240 u64 non_eop_descs;
256 int num_tx_queues;
257 int num_rx_queues;
258 int num_msix_vectors; 241 int num_msix_vectors;
259 struct ixgbe_ring_feature ring_feature[3]; 242 struct ixgbe_ring_feature ring_feature[3];
260 struct msix_entry *msix_entries; 243 struct msix_entry *msix_entries;
@@ -267,15 +250,28 @@ struct ixgbe_adapter {
267 * thus the additional *_CAPABLE flags. 250 * thus the additional *_CAPABLE flags.
268 */ 251 */
269 u32 flags; 252 u32 flags;
270#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0) 253#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
271#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) 254#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
272#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) 255#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
273#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) 256#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
274#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) 257#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
275#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) 258#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
276#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6) 259#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
277#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7) 260#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
278#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) 261#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
262#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
263#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
264#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
265#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
266#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
267#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
268#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
269#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
270#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
271#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
272
273/* default to trying for four seconds */
274#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
279 275
280 /* OS defined structs */ 276 /* OS defined structs */
281 struct net_device *netdev; 277 struct net_device *netdev;
@@ -288,14 +284,21 @@ struct ixgbe_adapter {
288 struct ixgbe_hw_stats stats; 284 struct ixgbe_hw_stats stats;
289 285
290 /* Interrupt Throttle Rate */ 286 /* Interrupt Throttle Rate */
291 u32 rx_eitr; 287 u32 eitr_param;
292 u32 tx_eitr;
293 288
294 unsigned long state; 289 unsigned long state;
295 u64 tx_busy; 290 u64 tx_busy;
296 u64 lro_aggregated; 291 u64 lro_aggregated;
297 u64 lro_flushed; 292 u64 lro_flushed;
298 u64 lro_no_desc; 293 u64 lro_no_desc;
294 unsigned int tx_ring_count;
295 unsigned int rx_ring_count;
296
297 u32 link_speed;
298 bool link_up;
299 unsigned long link_check_timeout;
300
301 struct work_struct watchdog_task;
299}; 302};
300 303
301enum ixbge_state_t { 304enum ixbge_state_t {
@@ -317,11 +320,11 @@ extern int ixgbe_up(struct ixgbe_adapter *adapter);
317extern void ixgbe_down(struct ixgbe_adapter *adapter); 320extern void ixgbe_down(struct ixgbe_adapter *adapter);
318extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 321extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
319extern void ixgbe_reset(struct ixgbe_adapter *adapter); 322extern void ixgbe_reset(struct ixgbe_adapter *adapter);
320extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
321extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 323extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
322extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 324extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
323 struct ixgbe_ring *rxdr); 325extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
324extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 326extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
325 struct ixgbe_ring *txdr); 327extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
326 329
327#endif /* _IXGBE_H_ */ 330#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index f96358b641af..7cddcfba809e 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -36,67 +35,62 @@
36#define IXGBE_82598_MAX_TX_QUEUES 32 35#define IXGBE_82598_MAX_TX_QUEUES 32
37#define IXGBE_82598_MAX_RX_QUEUES 64 36#define IXGBE_82598_MAX_RX_QUEUES 64
38#define IXGBE_82598_RAR_ENTRIES 16 37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128
39 40
40static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); 41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
41static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 42 ixgbe_link_speed *speed,
42 bool *autoneg); 43 bool *autoneg);
43static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
44 u32 *speed, bool *autoneg);
45static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
47static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
48 bool *link_up);
49static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
50 bool autoneg,
51 bool autoneg_wait_to_complete);
52static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); 44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
53static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 45static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
54 bool autoneg, 46 ixgbe_link_speed speed,
55 bool autoneg_wait_to_complete); 47 bool autoneg,
56static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 48 bool autoneg_wait_to_complete);
57
58 49
50/**
51 */
59static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 52static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
60{ 53{
61 hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 54 struct ixgbe_mac_info *mac = &hw->mac;
62 hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 55 struct ixgbe_phy_info *phy = &hw->phy;
63 hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; 56
64 57 /* Call PHY identify routine to get the phy type */
65 /* PHY ops are filled in by default properly for Fiber only */ 58 ixgbe_identify_phy_generic(hw);
66 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 59
67 hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; 60 /* PHY Init */
68 hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; 61 switch (phy->type) {
69 hw->mac.ops.get_link_settings = 62 default:
70 &ixgbe_get_copper_link_settings_82598; 63 break;
71
72 /* Call PHY identify routine to get the phy type */
73 ixgbe_identify_phy(hw);
74
75 switch (hw->phy.type) {
76 case ixgbe_phy_tn:
77 hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
78 hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
79 hw->phy.ops.setup_link_speed =
80 &ixgbe_setup_tnx_phy_link_speed;
81 break;
82 default:
83 break;
84 }
85 } 64 }
86 65
66 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
67 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
68 mac->ops.setup_link_speed =
69 &ixgbe_setup_copper_link_speed_82598;
70 mac->ops.get_link_capabilities =
71 &ixgbe_get_copper_link_capabilities_82598;
72 }
73
74 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
75 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
76 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
77 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
78 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
79
87 return 0; 80 return 0;
88} 81}
89 82
90/** 83/**
91 * ixgbe_get_link_settings_82598 - Determines default link settings 84 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
92 * @hw: pointer to hardware structure 85 * @hw: pointer to hardware structure
93 * @speed: pointer to link speed 86 * @speed: pointer to link speed
94 * @autoneg: boolean auto-negotiation value 87 * @autoneg: boolean auto-negotiation value
95 * 88 *
96 * Determines the default link settings by reading the AUTOC register. 89 * Determines the link capabilities by reading the AUTOC register.
97 **/ 90 **/
98static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 91static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
99 bool *autoneg) 92 ixgbe_link_speed *speed,
93 bool *autoneg)
100{ 94{
101 s32 status = 0; 95 s32 status = 0;
102 s32 autoc_reg; 96 s32 autoc_reg;
@@ -145,15 +139,16 @@ static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
145} 139}
146 140
147/** 141/**
148 * ixgbe_get_copper_link_settings_82598 - Determines default link settings 142 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
149 * @hw: pointer to hardware structure 143 * @hw: pointer to hardware structure
150 * @speed: pointer to link speed 144 * @speed: pointer to link speed
151 * @autoneg: boolean auto-negotiation value 145 * @autoneg: boolean auto-negotiation value
152 * 146 *
153 * Determines the default link settings by reading the AUTOC register. 147 * Determines the link capabilities by reading the AUTOC register.
154 **/ 148 **/
155static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, 149s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
156 u32 *speed, bool *autoneg) 150 ixgbe_link_speed *speed,
151 bool *autoneg)
157{ 152{
158 s32 status = IXGBE_ERR_LINK_SETUP; 153 s32 status = IXGBE_ERR_LINK_SETUP;
159 u16 speed_ability; 154 u16 speed_ability;
@@ -161,9 +156,9 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
161 *speed = 0; 156 *speed = 0;
162 *autoneg = true; 157 *autoneg = true;
163 158
164 status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, 159 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
165 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 160 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
166 &speed_ability); 161 &speed_ability);
167 162
168 if (status == 0) { 163 if (status == 0) {
169 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) 164 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -191,11 +186,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
191 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 186 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
192 case IXGBE_DEV_ID_82598EB_CX4: 187 case IXGBE_DEV_ID_82598EB_CX4:
193 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 188 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
189 case IXGBE_DEV_ID_82598EB_XF_LR:
194 media_type = ixgbe_media_type_fiber; 190 media_type = ixgbe_media_type_fiber;
195 break; 191 break;
196 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
197 media_type = ixgbe_media_type_copper;
198 break;
199 default: 192 default:
200 media_type = ixgbe_media_type_unknown; 193 media_type = ixgbe_media_type_unknown;
201 break; 194 break;
@@ -205,6 +198,122 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
205} 198}
206 199
207/** 200/**
201 * ixgbe_setup_fc_82598 - Configure flow control settings
202 * @hw: pointer to hardware structure
203 * @packetbuf_num: packet buffer number (0-7)
204 *
205 * Configures the flow control settings based on SW configuration. This
206 * function is used for 802.3x flow control configuration only.
207 **/
208s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
209{
210 u32 frctl_reg;
211 u32 rmcs_reg;
212
213 if (packetbuf_num < 0 || packetbuf_num > 7) {
214 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
215 " 0-7\n", packetbuf_num);
216 }
217
218 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
219 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
220
221 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
222 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
223
224 /*
225 * 10 gig parts do not have a word in the EEPROM to determine the
226 * default flow control setting, so we explicitly set it to full.
227 */
228 if (hw->fc.type == ixgbe_fc_default)
229 hw->fc.type = ixgbe_fc_full;
230
231 /*
232 * We want to save off the original Flow Control configuration just in
233 * case we get disconnected and then reconnected into a different hub
234 * or switch with different Flow Control capabilities.
235 */
236 hw->fc.original_type = hw->fc.type;
237
238 /*
239 * The possible values of the "flow_control" parameter are:
240 * 0: Flow control is completely disabled
241 * 1: Rx flow control is enabled (we can receive pause frames but not
242 * send pause frames).
243 * 2: Tx flow control is enabled (we can send pause frames but we do not
244 * support receiving pause frames)
245 * 3: Both Rx and Tx flow control (symmetric) are enabled.
246 * other: Invalid.
247 */
248 switch (hw->fc.type) {
249 case ixgbe_fc_none:
250 break;
251 case ixgbe_fc_rx_pause:
252 /*
253 * Rx Flow control is enabled,
254 * and Tx Flow control is disabled.
255 */
256 frctl_reg |= IXGBE_FCTRL_RFCE;
257 break;
258 case ixgbe_fc_tx_pause:
259 /*
260 * Tx Flow control is enabled, and Rx Flow control is disabled,
261 * by a software over-ride.
262 */
263 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
264 break;
265 case ixgbe_fc_full:
266 /*
267 * Flow control (both Rx and Tx) is enabled by a software
268 * over-ride.
269 */
270 frctl_reg |= IXGBE_FCTRL_RFCE;
271 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
272 break;
273 default:
274 /* We should never get here. The value should be 0-3. */
275 hw_dbg(hw, "Flow control param set incorrectly\n");
276 break;
277 }
278
279 /* Enable 802.3x based flow control settings. */
280 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
281 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
282
283 /*
284 * Check for invalid software configuration, zeros are completely
285 * invalid for all parameters used past this point, and if we enable
286 * flow control with zero water marks, we blast flow control packets.
287 */
288 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
289 hw_dbg(hw, "Flow control structure initialized incorrectly\n");
290 return IXGBE_ERR_INVALID_LINK_SETTINGS;
291 }
292
293 /*
294 * We need to set up the Receive Threshold high and low water
295 * marks as well as (optionally) enabling the transmission of
296 * XON frames.
297 */
298 if (hw->fc.type & ixgbe_fc_tx_pause) {
299 if (hw->fc.send_xon) {
300 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
301 (hw->fc.low_water | IXGBE_FCRTL_XONE));
302 } else {
303 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
304 hw->fc.low_water);
305 }
306 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
307 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
308 }
309
310 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
311 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
312
313 return 0;
314}
315
316/**
208 * ixgbe_setup_mac_link_82598 - Configures MAC link settings 317 * ixgbe_setup_mac_link_82598 - Configures MAC link settings
209 * @hw: pointer to hardware structure 318 * @hw: pointer to hardware structure
210 * 319 *
@@ -248,8 +357,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
248 } 357 }
249 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 358 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
250 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 359 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
251 hw_dbg(hw, 360 hw_dbg(hw, "Autonegotiation did not complete.\n");
252 "Autonegotiation did not complete.\n");
253 } 361 }
254 } 362 }
255 } 363 }
@@ -259,8 +367,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
259 * case we get disconnected and then reconnected into a different hub 367 * case we get disconnected and then reconnected into a different hub
260 * or switch with different Flow Control capabilities. 368 * or switch with different Flow Control capabilities.
261 */ 369 */
262 hw->fc.type = hw->fc.original_type; 370 hw->fc.original_type = hw->fc.type;
263 ixgbe_setup_fc(hw, 0); 371 ixgbe_setup_fc_82598(hw, 0);
264 372
265 /* Add delay to filter out noises during initial link setup */ 373 /* Add delay to filter out noises during initial link setup */
266 msleep(50); 374 msleep(50);
@@ -273,20 +381,35 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
273 * @hw: pointer to hardware structure 381 * @hw: pointer to hardware structure
274 * @speed: pointer to link speed 382 * @speed: pointer to link speed
275 * @link_up: true is link is up, false otherwise 383 * @link_up: true is link is up, false otherwise
384 * @link_up_wait_to_complete: bool used to wait for link up or not
276 * 385 *
277 * Reads the links register to determine if link is up and the current speed 386 * Reads the links register to determine if link is up and the current speed
278 **/ 387 **/
279static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, 388static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
280 bool *link_up) 389 ixgbe_link_speed *speed, bool *link_up,
390 bool link_up_wait_to_complete)
281{ 391{
282 u32 links_reg; 392 u32 links_reg;
393 u32 i;
283 394
284 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 395 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
285 396 if (link_up_wait_to_complete) {
286 if (links_reg & IXGBE_LINKS_UP) 397 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
287 *link_up = true; 398 if (links_reg & IXGBE_LINKS_UP) {
288 else 399 *link_up = true;
289 *link_up = false; 400 break;
401 } else {
402 *link_up = false;
403 }
404 msleep(100);
405 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
406 }
407 } else {
408 if (links_reg & IXGBE_LINKS_UP)
409 *link_up = true;
410 else
411 *link_up = false;
412 }
290 413
291 if (links_reg & IXGBE_LINKS_SPEED) 414 if (links_reg & IXGBE_LINKS_SPEED)
292 *speed = IXGBE_LINK_SPEED_10GB_FULL; 415 *speed = IXGBE_LINK_SPEED_10GB_FULL;
@@ -296,6 +419,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
296 return 0; 419 return 0;
297} 420}
298 421
422
299/** 423/**
300 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed 424 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
301 * @hw: pointer to hardware structure 425 * @hw: pointer to hardware structure
@@ -306,18 +430,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
306 * Set the link speed in the AUTOC register and restarts link. 430 * Set the link speed in the AUTOC register and restarts link.
307 **/ 431 **/
308static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, 432static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
309 u32 speed, bool autoneg, 433 ixgbe_link_speed speed, bool autoneg,
310 bool autoneg_wait_to_complete) 434 bool autoneg_wait_to_complete)
311{ 435{
312 s32 status = 0; 436 s32 status = 0;
313 437
314 /* If speed is 10G, then check for CX4 or XAUI. */ 438 /* If speed is 10G, then check for CX4 or XAUI. */
315 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 439 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
316 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) 440 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
317 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 441 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
318 else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) 442 } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
319 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 443 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
320 else if (autoneg) { 444 } else if (autoneg) {
321 /* BX mode - Autonegotiate 1G */ 445 /* BX mode - Autonegotiate 1G */
322 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) 446 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
323 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; 447 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
@@ -336,7 +460,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
336 * ixgbe_hw This will write the AUTOC register based on the new 460 * ixgbe_hw This will write the AUTOC register based on the new
337 * stored values 461 * stored values
338 */ 462 */
339 hw->mac.ops.setup_link(hw); 463 ixgbe_setup_mac_link_82598(hw);
340 } 464 }
341 465
342 return status; 466 return status;
@@ -354,18 +478,17 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
354 **/ 478 **/
355static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) 479static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
356{ 480{
357 s32 status = 0; 481 s32 status;
358 482
359 /* Restart autonegotiation on PHY */ 483 /* Restart autonegotiation on PHY */
360 if (hw->phy.ops.setup_link) 484 status = hw->phy.ops.setup_link(hw);
361 status = hw->phy.ops.setup_link(hw);
362 485
363 /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ 486 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
364 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 487 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
365 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 488 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
366 489
367 /* Set up MAC */ 490 /* Set up MAC */
368 hw->mac.ops.setup_link(hw); 491 ixgbe_setup_mac_link_82598(hw);
369 492
370 return status; 493 return status;
371} 494}
@@ -379,23 +502,23 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
379 * 502 *
380 * Sets the link speed in the AUTOC register in the MAC and restarts link. 503 * Sets the link speed in the AUTOC register in the MAC and restarts link.
381 **/ 504 **/
382static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 505static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
383 bool autoneg, 506 ixgbe_link_speed speed,
384 bool autoneg_wait_to_complete) 507 bool autoneg,
508 bool autoneg_wait_to_complete)
385{ 509{
386 s32 status = 0; 510 s32 status;
387 511
388 /* Setup the PHY according to input speed */ 512 /* Setup the PHY according to input speed */
389 if (hw->phy.ops.setup_link_speed) 513 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
390 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 514 autoneg_wait_to_complete);
391 autoneg_wait_to_complete);
392 515
393 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ 516 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
394 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 517 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
395 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 518 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
396 519
397 /* Set up MAC */ 520 /* Set up MAC */
398 hw->mac.ops.setup_link(hw); 521 ixgbe_setup_mac_link_82598(hw);
399 522
400 return status; 523 return status;
401} 524}
@@ -404,7 +527,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
404 * ixgbe_reset_hw_82598 - Performs hardware reset 527 * ixgbe_reset_hw_82598 - Performs hardware reset
405 * @hw: pointer to hardware structure 528 * @hw: pointer to hardware structure
406 * 529 *
407 * Resets the hardware by reseting the transmit and receive units, masks and 530 * Resets the hardware by resetting the transmit and receive units, masks and
408 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 531 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
409 * reset. 532 * reset.
410 **/ 533 **/
@@ -418,35 +541,44 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
418 u8 analog_val; 541 u8 analog_val;
419 542
420 /* Call adapter stop to disable tx/rx and clear interrupts */ 543 /* Call adapter stop to disable tx/rx and clear interrupts */
421 ixgbe_stop_adapter(hw); 544 hw->mac.ops.stop_adapter(hw);
422 545
423 /* 546 /*
424 * Power up the Atlas TX lanes if they are currently powered down. 547 * Power up the Atlas Tx lanes if they are currently powered down.
425 * Atlas TX lanes are powered down for MAC loopback tests, but 548 * Atlas Tx lanes are powered down for MAC loopback tests, but
426 * they are not automatically restored on reset. 549 * they are not automatically restored on reset.
427 */ 550 */
428 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 551 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
429 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 552 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
430 /* Enable TX Atlas so packets can be transmitted again */ 553 /* Enable Tx Atlas so packets can be transmitted again */
431 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 554 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
555 &analog_val);
432 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 556 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
433 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); 557 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
558 analog_val);
434 559
435 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); 560 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
561 &analog_val);
436 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 562 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
437 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); 563 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
564 analog_val);
438 565
439 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); 566 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
567 &analog_val);
440 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 568 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
441 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); 569 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
570 analog_val);
442 571
443 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); 572 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
573 &analog_val);
444 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 574 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
445 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); 575 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
576 analog_val);
446 } 577 }
447 578
448 /* Reset PHY */ 579 /* Reset PHY */
449 ixgbe_reset_phy(hw); 580 if (hw->phy.reset_disable == false)
581 hw->phy.ops.reset(hw);
450 582
451 /* 583 /*
452 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 584 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
@@ -499,29 +631,311 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
499 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
500 } else { 632 } else {
501 hw->mac.link_attach_type = 633 hw->mac.link_attach_type =
502 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); 634 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
503 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); 635 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
504 hw->mac.link_settings_loaded = true; 636 hw->mac.link_settings_loaded = true;
505 } 637 }
506 638
507 /* Store the permanent mac address */ 639 /* Store the permanent mac address */
508 ixgbe_get_mac_addr(hw, hw->mac.perm_addr); 640 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
509 641
510 return status; 642 return status;
511} 643}
512 644
645/**
646 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
647 * @hw: pointer to hardware struct
648 * @rar: receive address register index to associate with a VMDq index
649 * @vmdq: VMDq set index
650 **/
651s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
652{
653 u32 rar_high;
654
655 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
656 rar_high &= ~IXGBE_RAH_VIND_MASK;
657 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
658 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
659 return 0;
660}
661
662/**
663 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
664 * @hw: pointer to hardware struct
665 * @rar: receive address register index to associate with a VMDq index
666 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
667 **/
668static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
669{
670 u32 rar_high;
671 u32 rar_entries = hw->mac.num_rar_entries;
672
673 if (rar < rar_entries) {
674 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
675 if (rar_high & IXGBE_RAH_VIND_MASK) {
676 rar_high &= ~IXGBE_RAH_VIND_MASK;
677 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
678 }
679 } else {
680 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
681 }
682
683 return 0;
684}
685
686/**
687 * ixgbe_set_vfta_82598 - Set VLAN filter table
688 * @hw: pointer to hardware structure
689 * @vlan: VLAN id to write to VLAN filter
690 * @vind: VMDq output index that maps queue to VLAN id in VFTA
691 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
692 *
693 * Turn on/off specified VLAN in the VLAN filter table.
694 **/
695s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
696 bool vlan_on)
697{
698 u32 regindex;
699 u32 bitindex;
700 u32 bits;
701 u32 vftabyte;
702
703 if (vlan > 4095)
704 return IXGBE_ERR_PARAM;
705
706 /* Determine 32-bit word position in array */
707 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
708
709 /* Determine the location of the (VMD) queue index */
710 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
711 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
712
713 /* Set the nibble for VMD queue index */
714 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
715 bits &= (~(0x0F << bitindex));
716 bits |= (vind << bitindex);
717 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
718
719 /* Determine the location of the bit for this VLAN id */
720 bitindex = vlan & 0x1F; /* lower five bits */
721
722 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
723 if (vlan_on)
724 /* Turn on this VLAN id */
725 bits |= (1 << bitindex);
726 else
727 /* Turn off this VLAN id */
728 bits &= ~(1 << bitindex);
729 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
730
731 return 0;
732}
733
734/**
735 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
736 * @hw: pointer to hardware structure
737 *
738 * Clears the VLAN filer table, and the VMDq index associated with the filter
739 **/
740static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
741{
742 u32 offset;
743 u32 vlanbyte;
744
745 for (offset = 0; offset < hw->mac.vft_size; offset++)
746 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
747
748 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
749 for (offset = 0; offset < hw->mac.vft_size; offset++)
750 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
751 0);
752
753 return 0;
754}
755
756/**
757 * ixgbe_blink_led_start_82598 - Blink LED based on index.
758 * @hw: pointer to hardware structure
759 * @index: led number to blink
760 **/
761static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
762{
763 ixgbe_link_speed speed = 0;
764 bool link_up = 0;
765 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
766 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
767
768 /*
769 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
770 * force it if link is down.
771 */
772 hw->mac.ops.check_link(hw, &speed, &link_up, false);
773
774 if (!link_up) {
775 autoc_reg |= IXGBE_AUTOC_FLU;
776 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
777 msleep(10);
778 }
779
780 led_reg &= ~IXGBE_LED_MODE_MASK(index);
781 led_reg |= IXGBE_LED_BLINK(index);
782 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
783 IXGBE_WRITE_FLUSH(hw);
784
785 return 0;
786}
787
788/**
789 * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
790 * @hw: pointer to hardware structure
791 * @index: led number to stop blinking
792 **/
793static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
794{
795 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
796 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
797
798 autoc_reg &= ~IXGBE_AUTOC_FLU;
799 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
800 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
801
802 led_reg &= ~IXGBE_LED_MODE_MASK(index);
803 led_reg &= ~IXGBE_LED_BLINK(index);
804 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
805 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
806 IXGBE_WRITE_FLUSH(hw);
807
808 return 0;
809}
810
811/**
812 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
813 * @hw: pointer to hardware structure
814 * @reg: analog register to read
815 * @val: read value
816 *
817 * Performs read operation to Atlas analog register specified.
818 **/
819s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
820{
821 u32 atlas_ctl;
822
823 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
824 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
825 IXGBE_WRITE_FLUSH(hw);
826 udelay(10);
827 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
828 *val = (u8)atlas_ctl;
829
830 return 0;
831}
832
833/**
834 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
835 * @hw: pointer to hardware structure
836 * @reg: atlas register to write
837 * @val: value to write
838 *
839 * Performs write operation to Atlas analog register specified.
840 **/
841s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
842{
843 u32 atlas_ctl;
844
845 atlas_ctl = (reg << 8) | val;
846 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
847 IXGBE_WRITE_FLUSH(hw);
848 udelay(10);
849
850 return 0;
851}
852
853/**
854 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
855 * @hw: pointer to hardware structure
856 *
857 * Determines physical layer capabilities of the current configuration.
858 **/
859s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
860{
861 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
862
863 switch (hw->device_id) {
864 case IXGBE_DEV_ID_82598EB_CX4:
865 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
866 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
867 break;
868 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
869 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
870 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
871 break;
872 case IXGBE_DEV_ID_82598EB_XF_LR:
873 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
874 break;
875
876 default:
877 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
878 break;
879 }
880
881 return physical_layer;
882}
883
513static struct ixgbe_mac_operations mac_ops_82598 = { 884static struct ixgbe_mac_operations mac_ops_82598 = {
514 .reset = &ixgbe_reset_hw_82598, 885 .init_hw = &ixgbe_init_hw_generic,
886 .reset_hw = &ixgbe_reset_hw_82598,
887 .start_hw = &ixgbe_start_hw_generic,
888 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
515 .get_media_type = &ixgbe_get_media_type_82598, 889 .get_media_type = &ixgbe_get_media_type_82598,
890 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
891 .get_mac_addr = &ixgbe_get_mac_addr_generic,
892 .stop_adapter = &ixgbe_stop_adapter_generic,
893 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
894 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
516 .setup_link = &ixgbe_setup_mac_link_82598, 895 .setup_link = &ixgbe_setup_mac_link_82598,
517 .check_link = &ixgbe_check_mac_link_82598,
518 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, 896 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
519 .get_link_settings = &ixgbe_get_link_settings_82598, 897 .check_link = &ixgbe_check_mac_link_82598,
898 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
899 .led_on = &ixgbe_led_on_generic,
900 .led_off = &ixgbe_led_off_generic,
901 .blink_led_start = &ixgbe_blink_led_start_82598,
902 .blink_led_stop = &ixgbe_blink_led_stop_82598,
903 .set_rar = &ixgbe_set_rar_generic,
904 .clear_rar = &ixgbe_clear_rar_generic,
905 .set_vmdq = &ixgbe_set_vmdq_82598,
906 .clear_vmdq = &ixgbe_clear_vmdq_82598,
907 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
908 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
909 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
910 .enable_mc = &ixgbe_enable_mc_generic,
911 .disable_mc = &ixgbe_disable_mc_generic,
912 .clear_vfta = &ixgbe_clear_vfta_82598,
913 .set_vfta = &ixgbe_set_vfta_82598,
914 .setup_fc = &ixgbe_setup_fc_82598,
915};
916
917static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
918 .init_params = &ixgbe_init_eeprom_params_generic,
919 .read = &ixgbe_read_eeprom_generic,
920 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
921 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
922};
923
924static struct ixgbe_phy_operations phy_ops_82598 = {
925 .identify = &ixgbe_identify_phy_generic,
926 /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */
927 .reset = &ixgbe_reset_phy_generic,
928 .read_reg = &ixgbe_read_phy_reg_generic,
929 .write_reg = &ixgbe_write_phy_reg_generic,
930 .setup_link = &ixgbe_setup_phy_link_generic,
931 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
520}; 932};
521 933
522struct ixgbe_info ixgbe_82598_info = { 934struct ixgbe_info ixgbe_82598_info = {
523 .mac = ixgbe_mac_82598EB, 935 .mac = ixgbe_mac_82598EB,
524 .get_invariants = &ixgbe_get_invariants_82598, 936 .get_invariants = &ixgbe_get_invariants_82598,
525 .mac_ops = &mac_ops_82598, 937 .mac_ops = &mac_ops_82598,
938 .eeprom_ops = &eeprom_ops_82598,
939 .phy_ops = &phy_ops_82598,
526}; 940};
527 941
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 7fd6aeb1b021..f67c68404bb3 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,20 +32,28 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
36static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
37
38static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); 35static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
36static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 37static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 38static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
39static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
40static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
41static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
42 u16 count);
43static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
44static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
45static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
41static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); 47static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
42 48
43static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
44static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
45static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
46static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); 52static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
47 54
48/** 55/**
49 * ixgbe_start_hw - Prepare hardware for TX/RX 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
50 * @hw: pointer to hardware structure 57 * @hw: pointer to hardware structure
51 * 58 *
52 * Starts the hardware by filling the bus info structure and media type, clears 59 * Starts the hardware by filling the bus info structure and media type, clears
@@ -54,7 +61,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
54 * table, VLAN filter table, calls routine to set up link and flow control 61 * table, VLAN filter table, calls routine to set up link and flow control
55 * settings, and leaves transmit and receive units disabled and uninitialized 62 * settings, and leaves transmit and receive units disabled and uninitialized
56 **/ 63 **/
57s32 ixgbe_start_hw(struct ixgbe_hw *hw) 64s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
58{ 65{
59 u32 ctrl_ext; 66 u32 ctrl_ext;
60 67
@@ -62,22 +69,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
62 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 69 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
63 70
64 /* Identify the PHY */ 71 /* Identify the PHY */
65 ixgbe_identify_phy(hw); 72 hw->phy.ops.identify(hw);
66 73
67 /* 74 /*
68 * Store MAC address from RAR0, clear receive address registers, and 75 * Store MAC address from RAR0, clear receive address registers, and
69 * clear the multicast table 76 * clear the multicast table
70 */ 77 */
71 ixgbe_init_rx_addrs(hw); 78 hw->mac.ops.init_rx_addrs(hw);
72 79
73 /* Clear the VLAN filter table */ 80 /* Clear the VLAN filter table */
74 ixgbe_clear_vfta(hw); 81 hw->mac.ops.clear_vfta(hw);
75 82
76 /* Set up link */ 83 /* Set up link */
77 hw->mac.ops.setup_link(hw); 84 hw->mac.ops.setup_link(hw);
78 85
79 /* Clear statistics registers */ 86 /* Clear statistics registers */
80 ixgbe_clear_hw_cntrs(hw); 87 hw->mac.ops.clear_hw_cntrs(hw);
81 88
82 /* Set No Snoop Disable */ 89 /* Set No Snoop Disable */
83 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 90 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -92,34 +99,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
92} 99}
93 100
94/** 101/**
95 * ixgbe_init_hw - Generic hardware initialization 102 * ixgbe_init_hw_generic - Generic hardware initialization
96 * @hw: pointer to hardware structure 103 * @hw: pointer to hardware structure
97 * 104 *
98 * Initialize the hardware by reseting the hardware, filling the bus info 105 * Initialize the hardware by resetting the hardware, filling the bus info
99 * structure and media type, clears all on chip counters, initializes receive 106 * structure and media type, clears all on chip counters, initializes receive
100 * address registers, multicast table, VLAN filter table, calls routine to set 107 * address registers, multicast table, VLAN filter table, calls routine to set
101 * up link and flow control settings, and leaves transmit and receive units 108 * up link and flow control settings, and leaves transmit and receive units
102 * disabled and uninitialized 109 * disabled and uninitialized
103 **/ 110 **/
104s32 ixgbe_init_hw(struct ixgbe_hw *hw) 111s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
105{ 112{
106 /* Reset the hardware */ 113 /* Reset the hardware */
107 hw->mac.ops.reset(hw); 114 hw->mac.ops.reset_hw(hw);
108 115
109 /* Start the HW */ 116 /* Start the HW */
110 ixgbe_start_hw(hw); 117 hw->mac.ops.start_hw(hw);
111 118
112 return 0; 119 return 0;
113} 120}
114 121
115/** 122/**
116 * ixgbe_clear_hw_cntrs - Generic clear hardware counters 123 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
117 * @hw: pointer to hardware structure 124 * @hw: pointer to hardware structure
118 * 125 *
119 * Clears all hardware statistics counters by reading them from the hardware 126 * Clears all hardware statistics counters by reading them from the hardware
120 * Statistics counters are clear on read. 127 * Statistics counters are clear on read.
121 **/ 128 **/
122static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) 129s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
123{ 130{
124 u16 i = 0; 131 u16 i = 0;
125 132
@@ -191,7 +198,36 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
191} 198}
192 199
193/** 200/**
194 * ixgbe_get_mac_addr - Generic get MAC address 201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
202 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM
204 *
205 * Reads the part number from the EEPROM.
206 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
208{
209 s32 ret_val;
210 u16 data;
211
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val;
216 }
217 *pba_num = (u32)(data << 16);
218
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
220 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val;
223 }
224 *pba_num |= data;
225
226 return 0;
227}
228
229/**
230 * ixgbe_get_mac_addr_generic - Generic get MAC address
195 * @hw: pointer to hardware structure 231 * @hw: pointer to hardware structure
196 * @mac_addr: Adapter MAC address 232 * @mac_addr: Adapter MAC address
197 * 233 *
@@ -199,7 +235,7 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
199 * A reset of the adapter must be performed prior to calling this function 235 * A reset of the adapter must be performed prior to calling this function
200 * in order for the MAC address to have been loaded from the EEPROM into RAR0 236 * in order for the MAC address to have been loaded from the EEPROM into RAR0
201 **/ 237 **/
202s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) 238s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
203{ 239{
204 u32 rar_high; 240 u32 rar_high;
205 u32 rar_low; 241 u32 rar_low;
@@ -217,30 +253,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
217 return 0; 253 return 0;
218} 254}
219 255
220s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
221{
222 s32 ret_val;
223 u16 data;
224
225 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
226 if (ret_val) {
227 hw_dbg(hw, "NVM Read Error\n");
228 return ret_val;
229 }
230 *part_num = (u32)(data << 16);
231
232 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
233 if (ret_val) {
234 hw_dbg(hw, "NVM Read Error\n");
235 return ret_val;
236 }
237 *part_num |= data;
238
239 return 0;
240}
241
242/** 256/**
243 * ixgbe_stop_adapter - Generic stop TX/RX units 257 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
244 * @hw: pointer to hardware structure 258 * @hw: pointer to hardware structure
245 * 259 *
246 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 260 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
@@ -248,7 +262,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
248 * the shared code and drivers to determine if the adapter is in a stopped 262 * the shared code and drivers to determine if the adapter is in a stopped
249 * state and should not touch the hardware. 263 * state and should not touch the hardware.
250 **/ 264 **/
251s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) 265s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
252{ 266{
253 u32 number_of_queues; 267 u32 number_of_queues;
254 u32 reg_val; 268 u32 reg_val;
@@ -264,6 +278,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
264 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 278 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
265 reg_val &= ~(IXGBE_RXCTRL_RXEN); 279 reg_val &= ~(IXGBE_RXCTRL_RXEN);
266 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 280 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
281 IXGBE_WRITE_FLUSH(hw);
267 msleep(2); 282 msleep(2);
268 283
269 /* Clear interrupt mask to stop from interrupts being generated */ 284 /* Clear interrupt mask to stop from interrupts being generated */
@@ -273,7 +288,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
273 IXGBE_READ_REG(hw, IXGBE_EICR); 288 IXGBE_READ_REG(hw, IXGBE_EICR);
274 289
275 /* Disable the transmit unit. Each queue must be disabled. */ 290 /* Disable the transmit unit. Each queue must be disabled. */
276 number_of_queues = hw->mac.num_tx_queues; 291 number_of_queues = hw->mac.max_tx_queues;
277 for (i = 0; i < number_of_queues; i++) { 292 for (i = 0; i < number_of_queues; i++) {
278 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 293 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
279 if (reg_val & IXGBE_TXDCTL_ENABLE) { 294 if (reg_val & IXGBE_TXDCTL_ENABLE) {
@@ -282,15 +297,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
282 } 297 }
283 } 298 }
284 299
300 /*
301 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
302 * access and verify no pending requests
303 */
304 if (ixgbe_disable_pcie_master(hw) != 0)
305 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
306
285 return 0; 307 return 0;
286} 308}
287 309
288/** 310/**
289 * ixgbe_led_on - Turns on the software controllable LEDs. 311 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
290 * @hw: pointer to hardware structure 312 * @hw: pointer to hardware structure
291 * @index: led number to turn on 313 * @index: led number to turn on
292 **/ 314 **/
293s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) 315s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
294{ 316{
295 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 317 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
296 318
@@ -304,11 +326,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
304} 326}
305 327
306/** 328/**
307 * ixgbe_led_off - Turns off the software controllable LEDs. 329 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
308 * @hw: pointer to hardware structure 330 * @hw: pointer to hardware structure
309 * @index: led number to turn off 331 * @index: led number to turn off
310 **/ 332 **/
311s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) 333s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
312{ 334{
313 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 335 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
314 336
@@ -321,15 +343,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
321 return 0; 343 return 0;
322} 344}
323 345
324
325/** 346/**
326 * ixgbe_init_eeprom - Initialize EEPROM params 347 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
327 * @hw: pointer to hardware structure 348 * @hw: pointer to hardware structure
328 * 349 *
329 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 350 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
330 * ixgbe_hw struct in order to set up EEPROM access. 351 * ixgbe_hw struct in order to set up EEPROM access.
331 **/ 352 **/
332s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) 353s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
333{ 354{
334 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 355 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
335 u32 eec; 356 u32 eec;
@@ -337,6 +358,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
337 358
338 if (eeprom->type == ixgbe_eeprom_uninitialized) { 359 if (eeprom->type == ixgbe_eeprom_uninitialized) {
339 eeprom->type = ixgbe_eeprom_none; 360 eeprom->type = ixgbe_eeprom_none;
361 /* Set default semaphore delay to 10ms which is a well
362 * tested value */
363 eeprom->semaphore_delay = 10;
340 364
341 /* 365 /*
342 * Check for EEPROM present first. 366 * Check for EEPROM present first.
@@ -369,18 +393,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
369} 393}
370 394
371/** 395/**
372 * ixgbe_read_eeprom - Read EEPROM word using EERD 396 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
397 * @hw: pointer to hardware structure
398 * @offset: offset within the EEPROM to be read
399 * @data: read 16 bit value from EEPROM
400 *
401 * Reads 16 bit value from EEPROM through bit-bang method
402 **/
403s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
404 u16 *data)
405{
406 s32 status;
407 u16 word_in;
408 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
409
410 hw->eeprom.ops.init_params(hw);
411
412 if (offset >= hw->eeprom.word_size) {
413 status = IXGBE_ERR_EEPROM;
414 goto out;
415 }
416
417 /* Prepare the EEPROM for reading */
418 status = ixgbe_acquire_eeprom(hw);
419
420 if (status == 0) {
421 if (ixgbe_ready_eeprom(hw) != 0) {
422 ixgbe_release_eeprom(hw);
423 status = IXGBE_ERR_EEPROM;
424 }
425 }
426
427 if (status == 0) {
428 ixgbe_standby_eeprom(hw);
429
430 /*
431 * Some SPI eeproms use the 8th address bit embedded in the
432 * opcode
433 */
434 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
435 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
436
437 /* Send the READ command (opcode + addr) */
438 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
439 IXGBE_EEPROM_OPCODE_BITS);
440 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
441 hw->eeprom.address_bits);
442
443 /* Read the data. */
444 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
445 *data = (word_in >> 8) | (word_in << 8);
446
447 /* End this read operation */
448 ixgbe_release_eeprom(hw);
449 }
450
451out:
452 return status;
453}
454
455/**
456 * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
373 * @hw: pointer to hardware structure 457 * @hw: pointer to hardware structure
374 * @offset: offset of word in the EEPROM to read 458 * @offset: offset of word in the EEPROM to read
375 * @data: word read from the EEPROM 459 * @data: word read from the EEPROM
376 * 460 *
377 * Reads a 16 bit word from the EEPROM using the EERD register. 461 * Reads a 16 bit word from the EEPROM using the EERD register.
378 **/ 462 **/
379s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) 463s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
380{ 464{
381 u32 eerd; 465 u32 eerd;
382 s32 status; 466 s32 status;
383 467
468 hw->eeprom.ops.init_params(hw);
469
470 if (offset >= hw->eeprom.word_size) {
471 status = IXGBE_ERR_EEPROM;
472 goto out;
473 }
474
384 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 475 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
385 IXGBE_EEPROM_READ_REG_START; 476 IXGBE_EEPROM_READ_REG_START;
386 477
@@ -389,10 +480,11 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
389 480
390 if (status == 0) 481 if (status == 0)
391 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 482 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
392 IXGBE_EEPROM_READ_REG_DATA); 483 IXGBE_EEPROM_READ_REG_DATA);
393 else 484 else
394 hw_dbg(hw, "Eeprom read timed out\n"); 485 hw_dbg(hw, "Eeprom read timed out\n");
395 486
487out:
396 return status; 488 return status;
397} 489}
398 490
@@ -420,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
420} 512}
421 513
422/** 514/**
515 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
516 * @hw: pointer to hardware structure
517 *
518 * Prepares EEPROM for access using bit-bang method. This function should
519 * be called before issuing a command to the EEPROM.
520 **/
521static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
522{
523 s32 status = 0;
524 u32 eec;
525 u32 i;
526
527 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
528 status = IXGBE_ERR_SWFW_SYNC;
529
530 if (status == 0) {
531 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
532
533 /* Request EEPROM Access */
534 eec |= IXGBE_EEC_REQ;
535 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
536
537 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
538 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
539 if (eec & IXGBE_EEC_GNT)
540 break;
541 udelay(5);
542 }
543
544 /* Release if grant not acquired */
545 if (!(eec & IXGBE_EEC_GNT)) {
546 eec &= ~IXGBE_EEC_REQ;
547 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
548 hw_dbg(hw, "Could not acquire EEPROM grant\n");
549
550 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
551 status = IXGBE_ERR_EEPROM;
552 }
553 }
554
555 /* Setup EEPROM for Read/Write */
556 if (status == 0) {
557 /* Clear CS and SK */
558 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
559 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
560 IXGBE_WRITE_FLUSH(hw);
561 udelay(1);
562 }
563 return status;
564}
565
566/**
423 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 567 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
424 * @hw: pointer to hardware structure 568 * @hw: pointer to hardware structure
425 * 569 *
@@ -475,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
475 */ 619 */
476 if (i >= timeout) { 620 if (i >= timeout) {
477 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 621 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
478 "not granted.\n"); 622 "not granted.\n");
479 ixgbe_release_eeprom_semaphore(hw); 623 ixgbe_release_eeprom_semaphore(hw);
480 status = IXGBE_ERR_EEPROM; 624 status = IXGBE_ERR_EEPROM;
481 } 625 }
@@ -503,6 +647,217 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
503} 647}
504 648
505/** 649/**
650 * ixgbe_ready_eeprom - Polls for EEPROM ready
651 * @hw: pointer to hardware structure
652 **/
653static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
654{
655 s32 status = 0;
656 u16 i;
657 u8 spi_stat_reg;
658
659 /*
660 * Read "Status Register" repeatedly until the LSB is cleared. The
661 * EEPROM will signal that the command has been completed by clearing
662 * bit 0 of the internal status register. If it's not cleared within
663 * 5 milliseconds, then error out.
664 */
665 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
666 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
667 IXGBE_EEPROM_OPCODE_BITS);
668 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
669 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
670 break;
671
672 udelay(5);
673 ixgbe_standby_eeprom(hw);
674 };
675
676 /*
677 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
678 * devices (and only 0-5mSec on 5V devices)
679 */
680 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
681 hw_dbg(hw, "SPI EEPROM Status error\n");
682 status = IXGBE_ERR_EEPROM;
683 }
684
685 return status;
686}
687
688/**
689 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
690 * @hw: pointer to hardware structure
691 **/
692static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
693{
694 u32 eec;
695
696 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
697
698 /* Toggle CS to flush commands */
699 eec |= IXGBE_EEC_CS;
700 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
701 IXGBE_WRITE_FLUSH(hw);
702 udelay(1);
703 eec &= ~IXGBE_EEC_CS;
704 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
705 IXGBE_WRITE_FLUSH(hw);
706 udelay(1);
707}
708
709/**
710 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
711 * @hw: pointer to hardware structure
712 * @data: data to send to the EEPROM
713 * @count: number of bits to shift out
714 **/
715static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
716 u16 count)
717{
718 u32 eec;
719 u32 mask;
720 u32 i;
721
722 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
723
724 /*
725 * Mask is used to shift "count" bits of "data" out to the EEPROM
726 * one bit at a time. Determine the starting bit based on count
727 */
728 mask = 0x01 << (count - 1);
729
730 for (i = 0; i < count; i++) {
731 /*
732 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
733 * "1", and then raising and then lowering the clock (the SK
734 * bit controls the clock input to the EEPROM). A "0" is
735 * shifted out to the EEPROM by setting "DI" to "0" and then
736 * raising and then lowering the clock.
737 */
738 if (data & mask)
739 eec |= IXGBE_EEC_DI;
740 else
741 eec &= ~IXGBE_EEC_DI;
742
743 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
744 IXGBE_WRITE_FLUSH(hw);
745
746 udelay(1);
747
748 ixgbe_raise_eeprom_clk(hw, &eec);
749 ixgbe_lower_eeprom_clk(hw, &eec);
750
751 /*
752 * Shift mask to signify next bit of data to shift in to the
753 * EEPROM
754 */
755 mask = mask >> 1;
756 };
757
758 /* We leave the "DI" bit set to "0" when we leave this routine. */
759 eec &= ~IXGBE_EEC_DI;
760 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
761 IXGBE_WRITE_FLUSH(hw);
762}
763
764/**
765 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
766 * @hw: pointer to hardware structure
767 **/
768static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
769{
770 u32 eec;
771 u32 i;
772 u16 data = 0;
773
774 /*
775 * In order to read a register from the EEPROM, we need to shift
776 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
777 * the clock input to the EEPROM (setting the SK bit), and then reading
778 * the value of the "DO" bit. During this "shifting in" process the
779 * "DI" bit should always be clear.
780 */
781 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
782
783 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
784
785 for (i = 0; i < count; i++) {
786 data = data << 1;
787 ixgbe_raise_eeprom_clk(hw, &eec);
788
789 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
790
791 eec &= ~(IXGBE_EEC_DI);
792 if (eec & IXGBE_EEC_DO)
793 data |= 1;
794
795 ixgbe_lower_eeprom_clk(hw, &eec);
796 }
797
798 return data;
799}
800
801/**
802 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
803 * @hw: pointer to hardware structure
804 * @eec: EEC register's current value
805 **/
806static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
807{
808 /*
809 * Raise the clock input to the EEPROM
810 * (setting the SK bit), then delay
811 */
812 *eec = *eec | IXGBE_EEC_SK;
813 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
814 IXGBE_WRITE_FLUSH(hw);
815 udelay(1);
816}
817
818/**
819 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
820 * @hw: pointer to hardware structure
821 * @eecd: EECD's current value
822 **/
823static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
824{
825 /*
826 * Lower the clock input to the EEPROM (clearing the SK bit), then
827 * delay
828 */
829 *eec = *eec & ~IXGBE_EEC_SK;
830 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
831 IXGBE_WRITE_FLUSH(hw);
832 udelay(1);
833}
834
835/**
836 * ixgbe_release_eeprom - Release EEPROM, release semaphores
837 * @hw: pointer to hardware structure
838 **/
839static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
840{
841 u32 eec;
842
843 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
844
845 eec |= IXGBE_EEC_CS; /* Pull CS high */
846 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
847
848 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
849 IXGBE_WRITE_FLUSH(hw);
850
851 udelay(1);
852
853 /* Stop requesting EEPROM access */
854 eec &= ~IXGBE_EEC_REQ;
855 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
856
857 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
858}
859
860/**
506 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 861 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
507 * @hw: pointer to hardware structure 862 * @hw: pointer to hardware structure
508 **/ 863 **/
@@ -517,7 +872,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
517 872
518 /* Include 0x0-0x3F in the checksum */ 873 /* Include 0x0-0x3F in the checksum */
519 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 874 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
520 if (ixgbe_read_eeprom(hw, i, &word) != 0) { 875 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
521 hw_dbg(hw, "EEPROM read failed\n"); 876 hw_dbg(hw, "EEPROM read failed\n");
522 break; 877 break;
523 } 878 }
@@ -526,15 +881,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
526 881
527 /* Include all data from pointers except for the fw pointer */ 882 /* Include all data from pointers except for the fw pointer */
528 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 883 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
529 ixgbe_read_eeprom(hw, i, &pointer); 884 hw->eeprom.ops.read(hw, i, &pointer);
530 885
531 /* Make sure the pointer seems valid */ 886 /* Make sure the pointer seems valid */
532 if (pointer != 0xFFFF && pointer != 0) { 887 if (pointer != 0xFFFF && pointer != 0) {
533 ixgbe_read_eeprom(hw, pointer, &length); 888 hw->eeprom.ops.read(hw, pointer, &length);
534 889
535 if (length != 0xFFFF && length != 0) { 890 if (length != 0xFFFF && length != 0) {
536 for (j = pointer+1; j <= pointer+length; j++) { 891 for (j = pointer+1; j <= pointer+length; j++) {
537 ixgbe_read_eeprom(hw, j, &word); 892 hw->eeprom.ops.read(hw, j, &word);
538 checksum += word; 893 checksum += word;
539 } 894 }
540 } 895 }
@@ -547,14 +902,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
547} 902}
548 903
549/** 904/**
550 * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum 905 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
551 * @hw: pointer to hardware structure 906 * @hw: pointer to hardware structure
552 * @checksum_val: calculated checksum 907 * @checksum_val: calculated checksum
553 * 908 *
554 * Performs checksum calculation and validates the EEPROM checksum. If the 909 * Performs checksum calculation and validates the EEPROM checksum. If the
555 * caller does not need checksum_val, the value can be NULL. 910 * caller does not need checksum_val, the value can be NULL.
556 **/ 911 **/
557s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) 912s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
913 u16 *checksum_val)
558{ 914{
559 s32 status; 915 s32 status;
560 u16 checksum; 916 u16 checksum;
@@ -565,12 +921,12 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
565 * not continue or we could be in for a very long wait while every 921 * not continue or we could be in for a very long wait while every
566 * EEPROM read fails 922 * EEPROM read fails
567 */ 923 */
568 status = ixgbe_read_eeprom(hw, 0, &checksum); 924 status = hw->eeprom.ops.read(hw, 0, &checksum);
569 925
570 if (status == 0) { 926 if (status == 0) {
571 checksum = ixgbe_calc_eeprom_checksum(hw); 927 checksum = ixgbe_calc_eeprom_checksum(hw);
572 928
573 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 929 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
574 930
575 /* 931 /*
576 * Verify read checksum from EEPROM is the same as 932 * Verify read checksum from EEPROM is the same as
@@ -590,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
590} 946}
591 947
592/** 948/**
949 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
950 * @hw: pointer to hardware structure
951 **/
952s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
953{
954 s32 status;
955 u16 checksum;
956
957 /*
958 * Read the first word from the EEPROM. If this times out or fails, do
959 * not continue or we could be in for a very long wait while every
960 * EEPROM read fails
961 */
962 status = hw->eeprom.ops.read(hw, 0, &checksum);
963
964 if (status == 0) {
965 checksum = ixgbe_calc_eeprom_checksum(hw);
966 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
967 checksum);
968 } else {
969 hw_dbg(hw, "EEPROM read failed\n");
970 }
971
972 return status;
973}
974
975/**
593 * ixgbe_validate_mac_addr - Validate MAC address 976 * ixgbe_validate_mac_addr - Validate MAC address
594 * @mac_addr: pointer to MAC address. 977 * @mac_addr: pointer to MAC address.
595 * 978 *
@@ -607,61 +990,140 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
607 status = IXGBE_ERR_INVALID_MAC_ADDR; 990 status = IXGBE_ERR_INVALID_MAC_ADDR;
608 /* Reject the zero address */ 991 /* Reject the zero address */
609 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 992 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
610 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) 993 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
611 status = IXGBE_ERR_INVALID_MAC_ADDR; 994 status = IXGBE_ERR_INVALID_MAC_ADDR;
612 995
613 return status; 996 return status;
614} 997}
615 998
616/** 999/**
617 * ixgbe_set_rar - Set RX address register 1000 * ixgbe_set_rar_generic - Set Rx address register
618 * @hw: pointer to hardware structure 1001 * @hw: pointer to hardware structure
619 * @addr: Address to put into receive address register
620 * @index: Receive address register to write 1002 * @index: Receive address register to write
621 * @vind: Vind to set RAR to 1003 * @addr: Address to put into receive address register
1004 * @vmdq: VMDq "set" or "pool" index
622 * @enable_addr: set flag that address is active 1005 * @enable_addr: set flag that address is active
623 * 1006 *
624 * Puts an ethernet address into a receive address register. 1007 * Puts an ethernet address into a receive address register.
625 **/ 1008 **/
626s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 1009s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
627 u32 enable_addr) 1010 u32 enable_addr)
628{ 1011{
629 u32 rar_low, rar_high; 1012 u32 rar_low, rar_high;
1013 u32 rar_entries = hw->mac.num_rar_entries;
630 1014
631 /* 1015 /* setup VMDq pool selection before this RAR gets enabled */
632 * HW expects these in little endian so we reverse the byte order from 1016 hw->mac.ops.set_vmdq(hw, index, vmdq);
633 * network order (big endian) to little endian
634 */
635 rar_low = ((u32)addr[0] |
636 ((u32)addr[1] << 8) |
637 ((u32)addr[2] << 16) |
638 ((u32)addr[3] << 24));
639 1017
640 rar_high = ((u32)addr[4] | 1018 /* Make sure we are using a valid rar index range */
641 ((u32)addr[5] << 8) | 1019 if (index < rar_entries) {
642 ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); 1020 /*
1021 * HW expects these in little endian so we reverse the byte
1022 * order from network order (big endian) to little endian
1023 */
1024 rar_low = ((u32)addr[0] |
1025 ((u32)addr[1] << 8) |
1026 ((u32)addr[2] << 16) |
1027 ((u32)addr[3] << 24));
1028 /*
1029 * Some parts put the VMDq setting in the extra RAH bits,
1030 * so save everything except the lower 16 bits that hold part
1031 * of the address and the address valid bit.
1032 */
1033 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1034 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1035 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
643 1036
644 if (enable_addr != 0) 1037 if (enable_addr != 0)
645 rar_high |= IXGBE_RAH_AV; 1038 rar_high |= IXGBE_RAH_AV;
646 1039
647 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1040 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
648 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1041 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1042 } else {
1043 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1044 }
649 1045
650 return 0; 1046 return 0;
651} 1047}
652 1048
653/** 1049/**
654 * ixgbe_init_rx_addrs - Initializes receive address filters. 1050 * ixgbe_clear_rar_generic - Remove Rx address register
1051 * @hw: pointer to hardware structure
1052 * @index: Receive address register to write
1053 *
1054 * Clears an ethernet address from a receive address register.
1055 **/
1056s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1057{
1058 u32 rar_high;
1059 u32 rar_entries = hw->mac.num_rar_entries;
1060
1061 /* Make sure we are using a valid rar index range */
1062 if (index < rar_entries) {
1063 /*
1064 * Some parts put the VMDq setting in the extra RAH bits,
1065 * so save everything except the lower 16 bits that hold part
1066 * of the address and the address valid bit.
1067 */
1068 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1069 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1070
1071 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1072 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1073 } else {
1074 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1075 }
1076
1077 /* clear VMDq pool/queue selection for this RAR */
1078 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1079
1080 return 0;
1081}
1082
1083/**
1084 * ixgbe_enable_rar - Enable Rx address register
1085 * @hw: pointer to hardware structure
1086 * @index: index into the RAR table
1087 *
1088 * Enables the select receive address register.
1089 **/
1090static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1091{
1092 u32 rar_high;
1093
1094 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1095 rar_high |= IXGBE_RAH_AV;
1096 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1097}
1098
1099/**
1100 * ixgbe_disable_rar - Disable Rx address register
1101 * @hw: pointer to hardware structure
1102 * @index: index into the RAR table
1103 *
1104 * Disables the select receive address register.
1105 **/
1106static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1107{
1108 u32 rar_high;
1109
1110 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1111 rar_high &= (~IXGBE_RAH_AV);
1112 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1113}
1114
1115/**
1116 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
655 * @hw: pointer to hardware structure 1117 * @hw: pointer to hardware structure
656 * 1118 *
657 * Places the MAC address in receive address register 0 and clears the rest 1119 * Places the MAC address in receive address register 0 and clears the rest
658 * of the receive addresss registers. Clears the multicast table. Assumes 1120 * of the receive address registers. Clears the multicast table. Assumes
659 * the receiver is in reset when the routine is called. 1121 * the receiver is in reset when the routine is called.
660 **/ 1122 **/
661static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) 1123s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
662{ 1124{
663 u32 i; 1125 u32 i;
664 u32 rar_entries = hw->mac.num_rx_addrs; 1126 u32 rar_entries = hw->mac.num_rar_entries;
665 1127
666 /* 1128 /*
667 * If the current mac address is valid, assume it is a software override 1129 * If the current mac address is valid, assume it is a software override
@@ -671,29 +1133,30 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
671 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1133 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
672 IXGBE_ERR_INVALID_MAC_ADDR) { 1134 IXGBE_ERR_INVALID_MAC_ADDR) {
673 /* Get the MAC address from the RAR0 for later reference */ 1135 /* Get the MAC address from the RAR0 for later reference */
674 ixgbe_get_mac_addr(hw, hw->mac.addr); 1136 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
675 1137
676 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1138 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
677 hw->mac.addr[0], hw->mac.addr[1], 1139 hw->mac.addr[0], hw->mac.addr[1],
678 hw->mac.addr[2]); 1140 hw->mac.addr[2]);
679 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1141 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
680 hw->mac.addr[4], hw->mac.addr[5]); 1142 hw->mac.addr[4], hw->mac.addr[5]);
681 } else { 1143 } else {
682 /* Setup the receive address. */ 1144 /* Setup the receive address. */
683 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1145 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
684 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1146 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
685 hw->mac.addr[0], hw->mac.addr[1], 1147 hw->mac.addr[0], hw->mac.addr[1],
686 hw->mac.addr[2]); 1148 hw->mac.addr[2]);
687 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1149 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
688 hw->mac.addr[4], hw->mac.addr[5]); 1150 hw->mac.addr[4], hw->mac.addr[5]);
689 1151
690 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1152 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
691 } 1153 }
1154 hw->addr_ctrl.overflow_promisc = 0;
692 1155
693 hw->addr_ctrl.rar_used_count = 1; 1156 hw->addr_ctrl.rar_used_count = 1;
694 1157
695 /* Zero out the other receive addresses. */ 1158 /* Zero out the other receive addresses. */
696 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1159 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
697 for (i = 1; i < rar_entries; i++) { 1160 for (i = 1; i < rar_entries; i++) {
698 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1161 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
699 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1162 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -705,9 +1168,113 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
705 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1168 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
706 1169
707 hw_dbg(hw, " Clearing MTA\n"); 1170 hw_dbg(hw, " Clearing MTA\n");
708 for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) 1171 for (i = 0; i < hw->mac.mcft_size; i++)
709 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1172 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
710 1173
1174 if (hw->mac.ops.init_uta_tables)
1175 hw->mac.ops.init_uta_tables(hw);
1176
1177 return 0;
1178}
1179
1180/**
1181 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1182 * @hw: pointer to hardware structure
1183 * @addr: new address
1184 *
1185 * Adds it to unused receive address register or goes into promiscuous mode.
1186 **/
1187static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1188{
1189 u32 rar_entries = hw->mac.num_rar_entries;
1190 u32 rar;
1191
1192 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1193 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1194
1195 /*
1196 * Place this address in the RAR if there is room,
1197 * else put the controller into promiscuous mode
1198 */
1199 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1200 rar = hw->addr_ctrl.rar_used_count -
1201 hw->addr_ctrl.mc_addr_in_rar_count;
1202 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1203 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1204 hw->addr_ctrl.rar_used_count++;
1205 } else {
1206 hw->addr_ctrl.overflow_promisc++;
1207 }
1208
1209 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1210}
1211
1212/**
1213 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1214 * @hw: pointer to hardware structure
1215 * @addr_list: the list of new addresses
1216 * @addr_count: number of addresses
1217 * @next: iterator function to walk the address list
1218 *
1219 * The given list replaces any existing list. Clears the secondary addrs from
1220 * receive address registers. Uses unused receive address registers for the
1221 * first secondary addresses, and falls back to promiscuous mode as needed.
1222 *
1223 * Drivers using secondary unicast addresses must set user_set_promisc when
1224 * manually putting the device into promiscuous mode.
1225 **/
1226s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1227 u32 addr_count, ixgbe_mc_addr_itr next)
1228{
1229 u8 *addr;
1230 u32 i;
1231 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1232 u32 uc_addr_in_use;
1233 u32 fctrl;
1234 u32 vmdq;
1235
1236 /*
1237 * Clear accounting of old secondary address list,
1238 * don't count RAR[0]
1239 */
1240 uc_addr_in_use = hw->addr_ctrl.rar_used_count -
1241 hw->addr_ctrl.mc_addr_in_rar_count - 1;
1242 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1243 hw->addr_ctrl.overflow_promisc = 0;
1244
1245 /* Zero out the other receive addresses */
1246 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
1247 for (i = 1; i <= uc_addr_in_use; i++) {
1248 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1249 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1250 }
1251
1252 /* Add the new addresses */
1253 for (i = 0; i < addr_count; i++) {
1254 hw_dbg(hw, " Adding the secondary addresses:\n");
1255 addr = next(hw, &addr_list, &vmdq);
1256 ixgbe_add_uc_addr(hw, addr, vmdq);
1257 }
1258
1259 if (hw->addr_ctrl.overflow_promisc) {
1260 /* enable promisc if not already in overflow or set by user */
1261 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1262 hw_dbg(hw, " Entering address overflow promisc mode\n");
1263 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1264 fctrl |= IXGBE_FCTRL_UPE;
1265 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1266 }
1267 } else {
1268 /* only disable if set by overflow, not by user */
1269 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1270 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1271 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1272 fctrl &= ~IXGBE_FCTRL_UPE;
1273 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1274 }
1275 }
1276
1277 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
711 return 0; 1278 return 0;
712} 1279}
713 1280
@@ -720,7 +1287,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
720 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1287 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
721 * incoming rx multicast addresses, to determine the bit-vector to check in 1288 * incoming rx multicast addresses, to determine the bit-vector to check in
722 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1289 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
723 * by the MO field of the MCSTCTRL. The MO field is set during initalization 1290 * by the MO field of the MCSTCTRL. The MO field is set during initialization
724 * to mc_filter_type. 1291 * to mc_filter_type.
725 **/ 1292 **/
726static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1293static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
@@ -728,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
728 u32 vector = 0; 1295 u32 vector = 0;
729 1296
730 switch (hw->mac.mc_filter_type) { 1297 switch (hw->mac.mc_filter_type) {
731 case 0: /* use bits [47:36] of the address */ 1298 case 0: /* use bits [47:36] of the address */
732 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1299 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
733 break; 1300 break;
734 case 1: /* use bits [46:35] of the address */ 1301 case 1: /* use bits [46:35] of the address */
735 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1302 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
736 break; 1303 break;
737 case 2: /* use bits [45:34] of the address */ 1304 case 2: /* use bits [45:34] of the address */
738 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1305 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
739 break; 1306 break;
740 case 3: /* use bits [43:32] of the address */ 1307 case 3: /* use bits [43:32] of the address */
741 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1308 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
742 break; 1309 break;
743 default: /* Invalid mc_filter_type */ 1310 default: /* Invalid mc_filter_type */
744 hw_dbg(hw, "MC filter type param set incorrectly\n"); 1311 hw_dbg(hw, "MC filter type param set incorrectly\n");
745 break; 1312 break;
746 } 1313 }
@@ -794,21 +1361,22 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
794 **/ 1361 **/
795static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) 1362static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
796{ 1363{
797 u32 rar_entries = hw->mac.num_rx_addrs; 1364 u32 rar_entries = hw->mac.num_rar_entries;
1365 u32 rar;
798 1366
799 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", 1367 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
800 mc_addr[0], mc_addr[1], mc_addr[2], 1368 mc_addr[0], mc_addr[1], mc_addr[2],
801 mc_addr[3], mc_addr[4], mc_addr[5]); 1369 mc_addr[3], mc_addr[4], mc_addr[5]);
802 1370
803 /* 1371 /*
804 * Place this multicast address in the RAR if there is room, 1372 * Place this multicast address in the RAR if there is room,
805 * else put it in the MTA 1373 * else put it in the MTA
806 */ 1374 */
807 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1375 if (hw->addr_ctrl.rar_used_count < rar_entries) {
808 ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, 1376 /* use RAR from the end up for multicast */
809 mc_addr, 0, IXGBE_RAH_AV); 1377 rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
810 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", 1378 hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
811 hw->addr_ctrl.rar_used_count); 1379 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
812 hw->addr_ctrl.rar_used_count++; 1380 hw->addr_ctrl.rar_used_count++;
813 hw->addr_ctrl.mc_addr_in_rar_count++; 1381 hw->addr_ctrl.mc_addr_in_rar_count++;
814 } else { 1382 } else {
@@ -819,22 +1387,23 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
819} 1387}
820 1388
821/** 1389/**
822 * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses 1390 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
823 * @hw: pointer to hardware structure 1391 * @hw: pointer to hardware structure
824 * @mc_addr_list: the list of new multicast addresses 1392 * @mc_addr_list: the list of new multicast addresses
825 * @mc_addr_count: number of addresses 1393 * @mc_addr_count: number of addresses
826 * @pad: number of bytes between addresses in the list 1394 * @next: iterator function to walk the multicast address list
827 * 1395 *
828 * The given list replaces any existing list. Clears the MC addrs from receive 1396 * The given list replaces any existing list. Clears the MC addrs from receive
829 * address registers and the multicast table. Uses unsed receive address 1397 * address registers and the multicast table. Uses unused receive address
830 * registers for the first multicast addresses, and hashes the rest into the 1398 * registers for the first multicast addresses, and hashes the rest into the
831 * multicast table. 1399 * multicast table.
832 **/ 1400 **/
833s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 1401s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
834 u32 mc_addr_count, u32 pad) 1402 u32 mc_addr_count, ixgbe_mc_addr_itr next)
835{ 1403{
836 u32 i; 1404 u32 i;
837 u32 rar_entries = hw->mac.num_rx_addrs; 1405 u32 rar_entries = hw->mac.num_rar_entries;
1406 u32 vmdq;
838 1407
839 /* 1408 /*
840 * Set the new number of MC addresses that we are being requested to 1409 * Set the new number of MC addresses that we are being requested to
@@ -846,7 +1415,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
846 hw->addr_ctrl.mta_in_use = 0; 1415 hw->addr_ctrl.mta_in_use = 0;
847 1416
848 /* Zero out the other receive addresses. */ 1417 /* Zero out the other receive addresses. */
849 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1418 hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
1419 rar_entries - 1);
850 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { 1420 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
851 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1421 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
852 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1422 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -854,186 +1424,67 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
854 1424
855 /* Clear the MTA */ 1425 /* Clear the MTA */
856 hw_dbg(hw, " Clearing MTA\n"); 1426 hw_dbg(hw, " Clearing MTA\n");
857 for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) 1427 for (i = 0; i < hw->mac.mcft_size; i++)
858 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1428 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
859 1429
860 /* Add the new addresses */ 1430 /* Add the new addresses */
861 for (i = 0; i < mc_addr_count; i++) { 1431 for (i = 0; i < mc_addr_count; i++) {
862 hw_dbg(hw, " Adding the multicast addresses:\n"); 1432 hw_dbg(hw, " Adding the multicast addresses:\n");
863 ixgbe_add_mc_addr(hw, mc_addr_list + 1433 ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
864 (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
865 } 1434 }
866 1435
867 /* Enable mta */ 1436 /* Enable mta */
868 if (hw->addr_ctrl.mta_in_use > 0) 1437 if (hw->addr_ctrl.mta_in_use > 0)
869 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1438 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
870 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1439 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
871 1440
872 hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); 1441 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
873 return 0; 1442 return 0;
874} 1443}
875 1444
876/** 1445/**
877 * ixgbe_clear_vfta - Clear VLAN filter table 1446 * ixgbe_enable_mc_generic - Enable multicast address in RAR
878 * @hw: pointer to hardware structure 1447 * @hw: pointer to hardware structure
879 * 1448 *
880 * Clears the VLAN filer table, and the VMDq index associated with the filter 1449 * Enables multicast address in RAR and the use of the multicast hash table.
881 **/ 1450 **/
882static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) 1451s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
883{ 1452{
884 u32 offset; 1453 u32 i;
885 u32 vlanbyte; 1454 u32 rar_entries = hw->mac.num_rar_entries;
886 1455 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
887 for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
888 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
889
890 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
891 for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
892 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
893 0);
894 1456
895 return 0; 1457 if (a->mc_addr_in_rar_count > 0)
896} 1458 for (i = (rar_entries - a->mc_addr_in_rar_count);
1459 i < rar_entries; i++)
1460 ixgbe_enable_rar(hw, i);
897 1461
898/** 1462 if (a->mta_in_use > 0)
899 * ixgbe_set_vfta - Set VLAN filter table 1463 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
900 * @hw: pointer to hardware structure 1464 hw->mac.mc_filter_type);
901 * @vlan: VLAN id to write to VLAN filter
902 * @vind: VMDq output index that maps queue to VLAN id in VFTA
903 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
904 *
905 * Turn on/off specified VLAN in the VLAN filter table.
906 **/
907s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
908 bool vlan_on)
909{
910 u32 VftaIndex;
911 u32 BitOffset;
912 u32 VftaReg;
913 u32 VftaByte;
914
915 /* Determine 32-bit word position in array */
916 VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
917
918 /* Determine the location of the (VMD) queue index */
919 VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
920 BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
921
922 /* Set the nibble for VMD queue index */
923 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
924 VftaReg &= (~(0x0F << BitOffset));
925 VftaReg |= (vind << BitOffset);
926 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
927
928 /* Determine the location of the bit for this VLAN id */
929 BitOffset = vlan & 0x1F; /* lower five bits */
930
931 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
932 if (vlan_on)
933 /* Turn on this VLAN id */
934 VftaReg |= (1 << BitOffset);
935 else
936 /* Turn off this VLAN id */
937 VftaReg &= ~(1 << BitOffset);
938 IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
939 1465
940 return 0; 1466 return 0;
941} 1467}
942 1468
943/** 1469/**
944 * ixgbe_setup_fc - Configure flow control settings 1470 * ixgbe_disable_mc_generic - Disable multicast address in RAR
945 * @hw: pointer to hardware structure 1471 * @hw: pointer to hardware structure
946 * @packetbuf_num: packet buffer number (0-7)
947 * 1472 *
948 * Configures the flow control settings based on SW configuration. 1473 * Disables multicast address in RAR and the use of the multicast hash table.
949 * This function is used for 802.3x flow control configuration only.
950 **/ 1474 **/
951s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1475s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
952{ 1476{
953 u32 frctl_reg; 1477 u32 i;
954 u32 rmcs_reg; 1478 u32 rar_entries = hw->mac.num_rar_entries;
955 1479 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
956 if (packetbuf_num < 0 || packetbuf_num > 7)
957 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
958 "is 0-7\n", packetbuf_num);
959
960 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
961 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
962
963 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
964 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
965
966 /*
967 * We want to save off the original Flow Control configuration just in
968 * case we get disconnected and then reconnected into a different hub
969 * or switch with different Flow Control capabilities.
970 */
971 hw->fc.type = hw->fc.original_type;
972
973 /*
974 * The possible values of the "flow_control" parameter are:
975 * 0: Flow control is completely disabled
976 * 1: Rx flow control is enabled (we can receive pause frames but not
977 * send pause frames).
978 * 2: Tx flow control is enabled (we can send pause frames but we do not
979 * support receiving pause frames)
980 * 3: Both Rx and TX flow control (symmetric) are enabled.
981 * other: Invalid.
982 */
983 switch (hw->fc.type) {
984 case ixgbe_fc_none:
985 break;
986 case ixgbe_fc_rx_pause:
987 /*
988 * RX Flow control is enabled,
989 * and TX Flow control is disabled.
990 */
991 frctl_reg |= IXGBE_FCTRL_RFCE;
992 break;
993 case ixgbe_fc_tx_pause:
994 /*
995 * TX Flow control is enabled, and RX Flow control is disabled,
996 * by a software over-ride.
997 */
998 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
999 break;
1000 case ixgbe_fc_full:
1001 /*
1002 * Flow control (both RX and TX) is enabled by a software
1003 * over-ride.
1004 */
1005 frctl_reg |= IXGBE_FCTRL_RFCE;
1006 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
1007 break;
1008 default:
1009 /* We should never get here. The value should be 0-3. */
1010 hw_dbg(hw, "Flow control param set incorrectly\n");
1011 break;
1012 }
1013
1014 /* Enable 802.3x based flow control settings. */
1015 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
1016 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
1017 1480
1018 /* 1481 if (a->mc_addr_in_rar_count > 0)
1019 * We need to set up the Receive Threshold high and low water 1482 for (i = (rar_entries - a->mc_addr_in_rar_count);
1020 * marks as well as (optionally) enabling the transmission of 1483 i < rar_entries; i++)
1021 * XON frames. 1484 ixgbe_disable_rar(hw, i);
1022 */
1023 if (hw->fc.type & ixgbe_fc_tx_pause) {
1024 if (hw->fc.send_xon) {
1025 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1026 (hw->fc.low_water | IXGBE_FCRTL_XONE));
1027 } else {
1028 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1029 hw->fc.low_water);
1030 }
1031 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
1032 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
1033 }
1034 1485
1035 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); 1486 if (a->mta_in_use > 0)
1036 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 1487 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1037 1488
1038 return 0; 1489 return 0;
1039} 1490}
@@ -1049,13 +1500,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1049 **/ 1500 **/
1050s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 1501s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1051{ 1502{
1052 u32 ctrl; 1503 u32 i;
1053 s32 i; 1504 u32 reg_val;
1505 u32 number_of_queues;
1054 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 1506 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
1055 1507
1056 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1508 /* Disable the receive unit by stopping each queue */
1057 ctrl |= IXGBE_CTRL_GIO_DIS; 1509 number_of_queues = hw->mac.max_rx_queues;
1058 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1510 for (i = 0; i < number_of_queues; i++) {
1511 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1512 if (reg_val & IXGBE_RXDCTL_ENABLE) {
1513 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1514 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1515 }
1516 }
1517
1518 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
1519 reg_val |= IXGBE_CTRL_GIO_DIS;
1520 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
1059 1521
1060 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 1522 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
1061 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 1523 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
@@ -1070,11 +1532,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1070 1532
1071 1533
1072/** 1534/**
1073 * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore 1535 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
1074 * @hw: pointer to hardware structure 1536 * @hw: pointer to hardware structure
1075 * @mask: Mask to specify wich semaphore to acquire 1537 * @mask: Mask to specify which semaphore to acquire
1076 * 1538 *
1077 * Aquires the SWFW semaphore throught the GSSR register for the specified 1539 * Acquires the SWFW semaphore thought the GSSR register for the specified
1078 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1540 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1079 **/ 1541 **/
1080s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1542s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1116,9 +1578,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1116/** 1578/**
1117 * ixgbe_release_swfw_sync - Release SWFW semaphore 1579 * ixgbe_release_swfw_sync - Release SWFW semaphore
1118 * @hw: pointer to hardware structure 1580 * @hw: pointer to hardware structure
1119 * @mask: Mask to specify wich semaphore to release 1581 * @mask: Mask to specify which semaphore to release
1120 * 1582 *
1121 * Releases the SWFW semaphore throught the GSSR register for the specified 1583 * Releases the SWFW semaphore thought the GSSR register for the specified
1122 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1584 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1123 **/ 1585 **/
1124void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1586void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1135,45 +1597,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1135 ixgbe_release_eeprom_semaphore(hw); 1597 ixgbe_release_eeprom_semaphore(hw);
1136} 1598}
1137 1599
1138/**
1139 * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
1140 * @hw: pointer to hardware structure
1141 * @reg: analog register to read
1142 * @val: read value
1143 *
1144 * Performs write operation to analog register specified.
1145 **/
1146s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
1147{
1148 u32 atlas_ctl;
1149
1150 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1151 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1152 IXGBE_WRITE_FLUSH(hw);
1153 udelay(10);
1154 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1155 *val = (u8)atlas_ctl;
1156
1157 return 0;
1158}
1159
1160/**
1161 * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
1162 * @hw: pointer to hardware structure
1163 * @reg: atlas register to write
1164 * @val: value to write
1165 *
1166 * Performs write operation to Atlas analog register specified.
1167 **/
1168s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
1169{
1170 u32 atlas_ctl;
1171
1172 atlas_ctl = (reg << 8) | val;
1173 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1174 IXGBE_WRITE_FLUSH(hw);
1175 udelay(10);
1176
1177 return 0;
1178}
1179
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index de6ddd5d04ad..192f8d012911 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -31,34 +30,45 @@
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
33 32
34s32 ixgbe_init_hw(struct ixgbe_hw *hw); 33s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_start_hw(struct ixgbe_hw *hw); 34s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); 35s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); 36s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); 37s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
38s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
39s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
40s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
41
42s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
43s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
44
45s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
46s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
47s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
48 u16 *data);
49s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
50 u16 *checksum_val);
51s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
52
53s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
54 u32 enable_addr);
55s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
56s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
57s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
58 u32 mc_addr_count,
59 ixgbe_mc_addr_itr func);
60s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
61 u32 addr_count, ixgbe_mc_addr_itr func);
62s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
63s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
39 64
40s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
41s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
42
43s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
44s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
45s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
46
47s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
48 u32 enable_addr);
49s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
50 u32 mc_addr_count, u32 pad);
51s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
52s32 ixgbe_validate_mac_addr(u8 *mac_addr); 65s32 ixgbe_validate_mac_addr(u8 *mac_addr);
53
54s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
55
56s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 66s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
57void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 67void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
58s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 68s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
59 69
60s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); 70s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
61s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); 71s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
62 72
63#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
64 74
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3efe5dda10af..81a9c4b86726 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -48,7 +47,7 @@ struct ixgbe_stats {
48}; 47};
49 48
50#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
51 offsetof(struct ixgbe_adapter, m) 50 offsetof(struct ixgbe_adapter, m)
52static struct ixgbe_stats ixgbe_gstrings_stats[] = { 51static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 52 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
54 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
@@ -95,14 +94,15 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
95}; 94};
96 95
97#define IXGBE_QUEUE_STATS_LEN \ 96#define IXGBE_QUEUE_STATS_LEN \
98 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
99 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
100 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
103 103
104static int ixgbe_get_settings(struct net_device *netdev, 104static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 105 struct ethtool_cmd *ecmd)
106{ 106{
107 struct ixgbe_adapter *adapter = netdev_priv(netdev); 107 struct ixgbe_adapter *adapter = netdev_priv(netdev);
108 struct ixgbe_hw *hw = &adapter->hw; 108 struct ixgbe_hw *hw = &adapter->hw;
@@ -114,7 +114,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
114 ecmd->transceiver = XCVR_EXTERNAL; 114 ecmd->transceiver = XCVR_EXTERNAL;
115 if (hw->phy.media_type == ixgbe_media_type_copper) { 115 if (hw->phy.media_type == ixgbe_media_type_copper) {
116 ecmd->supported |= (SUPPORTED_1000baseT_Full | 116 ecmd->supported |= (SUPPORTED_1000baseT_Full |
117 SUPPORTED_TP | SUPPORTED_Autoneg); 117 SUPPORTED_TP | SUPPORTED_Autoneg);
118 118
119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); 119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -126,14 +126,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
126 } else { 126 } else {
127 ecmd->supported |= SUPPORTED_FIBRE; 127 ecmd->supported |= SUPPORTED_FIBRE;
128 ecmd->advertising = (ADVERTISED_10000baseT_Full | 128 ecmd->advertising = (ADVERTISED_10000baseT_Full |
129 ADVERTISED_FIBRE); 129 ADVERTISED_FIBRE);
130 ecmd->port = PORT_FIBRE; 130 ecmd->port = PORT_FIBRE;
131 ecmd->autoneg = AUTONEG_DISABLE;
131 } 132 }
132 133
133 adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up); 134 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
134 if (link_up) { 135 if (link_up) {
135 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 136 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
136 SPEED_10000 : SPEED_1000; 137 SPEED_10000 : SPEED_1000;
137 ecmd->duplex = DUPLEX_FULL; 138 ecmd->duplex = DUPLEX_FULL;
138 } else { 139 } else {
139 ecmd->speed = -1; 140 ecmd->speed = -1;
@@ -144,7 +145,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
144} 145}
145 146
146static int ixgbe_set_settings(struct net_device *netdev, 147static int ixgbe_set_settings(struct net_device *netdev,
147 struct ethtool_cmd *ecmd) 148 struct ethtool_cmd *ecmd)
148{ 149{
149 struct ixgbe_adapter *adapter = netdev_priv(netdev); 150 struct ixgbe_adapter *adapter = netdev_priv(netdev);
150 struct ixgbe_hw *hw = &adapter->hw; 151 struct ixgbe_hw *hw = &adapter->hw;
@@ -164,7 +165,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
164} 165}
165 166
166static void ixgbe_get_pauseparam(struct net_device *netdev, 167static void ixgbe_get_pauseparam(struct net_device *netdev,
167 struct ethtool_pauseparam *pause) 168 struct ethtool_pauseparam *pause)
168{ 169{
169 struct ixgbe_adapter *adapter = netdev_priv(netdev); 170 struct ixgbe_adapter *adapter = netdev_priv(netdev);
170 struct ixgbe_hw *hw = &adapter->hw; 171 struct ixgbe_hw *hw = &adapter->hw;
@@ -182,7 +183,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
182} 183}
183 184
184static int ixgbe_set_pauseparam(struct net_device *netdev, 185static int ixgbe_set_pauseparam(struct net_device *netdev,
185 struct ethtool_pauseparam *pause) 186 struct ethtool_pauseparam *pause)
186{ 187{
187 struct ixgbe_adapter *adapter = netdev_priv(netdev); 188 struct ixgbe_adapter *adapter = netdev_priv(netdev);
188 struct ixgbe_hw *hw = &adapter->hw; 189 struct ixgbe_hw *hw = &adapter->hw;
@@ -233,15 +234,15 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
233 234
234static u32 ixgbe_get_tx_csum(struct net_device *netdev) 235static u32 ixgbe_get_tx_csum(struct net_device *netdev)
235{ 236{
236 return (netdev->features & NETIF_F_HW_CSUM) != 0; 237 return (netdev->features & NETIF_F_IP_CSUM) != 0;
237} 238}
238 239
239static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 240static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
240{ 241{
241 if (data) 242 if (data)
242 netdev->features |= NETIF_F_HW_CSUM; 243 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
243 else 244 else
244 netdev->features &= ~NETIF_F_HW_CSUM; 245 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
245 246
246 return 0; 247 return 0;
247} 248}
@@ -281,7 +282,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
281#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 282#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
282 283
283static void ixgbe_get_regs(struct net_device *netdev, 284static void ixgbe_get_regs(struct net_device *netdev,
284 struct ethtool_regs *regs, void *p) 285 struct ethtool_regs *regs, void *p)
285{ 286{
286 struct ixgbe_adapter *adapter = netdev_priv(netdev); 287 struct ixgbe_adapter *adapter = netdev_priv(netdev);
287 struct ixgbe_hw *hw = &adapter->hw; 288 struct ixgbe_hw *hw = &adapter->hw;
@@ -315,7 +316,9 @@ static void ixgbe_get_regs(struct net_device *netdev,
315 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 316 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
316 317
317 /* Interrupt */ 318 /* Interrupt */
318 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); 319 /* don't read EICR because it can clear interrupt causes, instead
320 * read EICS which is a shadow but doesn't clear EICR */
321 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
319 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 322 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
320 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 323 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
321 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 324 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
@@ -325,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
325 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 328 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
326 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 329 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
327 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 330 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
328 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); 331 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
329 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 332 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
330 333
331 /* Flow Control */ 334 /* Flow Control */
@@ -371,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
371 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 374 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
372 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
373 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 376 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
374 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); 377 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
375 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
376 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 379 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
377 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 380 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
@@ -419,7 +422,6 @@ static void ixgbe_get_regs(struct net_device *netdev,
419 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 422 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
420 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); 423 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
421 424
422 /* DCE */
423 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 425 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
424 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 426 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
425 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 427 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -539,21 +541,17 @@ static void ixgbe_get_regs(struct net_device *netdev,
539 /* Diagnostic */ 541 /* Diagnostic */
540 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 542 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
541 for (i = 0; i < 8; i++) 543 for (i = 0; i < 8; i++)
542 regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 544 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
543 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 545 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
544 regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); 546 for (i = 0; i < 4; i++)
545 regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); 547 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
546 regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
547 regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
550 for (i = 0; i < 8; i++) 550 for (i = 0; i < 8; i++)
551 regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 551 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
553 regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); 553 for (i = 0; i < 4; i++)
554 regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); 554 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
555 regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
556 regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
557 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 555 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
558 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 556 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
559 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 557 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
@@ -566,7 +564,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
566 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 564 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
567 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 565 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
568 for (i = 0; i < 8; i++) 566 for (i = 0; i < 8; i++)
569 regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 567 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
570 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 568 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
571 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 569 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
572 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 570 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
@@ -585,7 +583,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
585} 583}
586 584
587static int ixgbe_get_eeprom(struct net_device *netdev, 585static int ixgbe_get_eeprom(struct net_device *netdev,
588 struct ethtool_eeprom *eeprom, u8 *bytes) 586 struct ethtool_eeprom *eeprom, u8 *bytes)
589{ 587{
590 struct ixgbe_adapter *adapter = netdev_priv(netdev); 588 struct ixgbe_adapter *adapter = netdev_priv(netdev);
591 struct ixgbe_hw *hw = &adapter->hw; 589 struct ixgbe_hw *hw = &adapter->hw;
@@ -608,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
608 return -ENOMEM; 606 return -ENOMEM;
609 607
610 for (i = 0; i < eeprom_len; i++) { 608 for (i = 0; i < eeprom_len; i++) {
611 if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, 609 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
612 &eeprom_buff[i]))) 610 &eeprom_buff[i])))
613 break; 611 break;
614 } 612 }
615 613
@@ -624,7 +622,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
624} 622}
625 623
626static void ixgbe_get_drvinfo(struct net_device *netdev, 624static void ixgbe_get_drvinfo(struct net_device *netdev,
627 struct ethtool_drvinfo *drvinfo) 625 struct ethtool_drvinfo *drvinfo)
628{ 626{
629 struct ixgbe_adapter *adapter = netdev_priv(netdev); 627 struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 628
@@ -637,7 +635,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
637} 635}
638 636
639static void ixgbe_get_ringparam(struct net_device *netdev, 637static void ixgbe_get_ringparam(struct net_device *netdev,
640 struct ethtool_ringparam *ring) 638 struct ethtool_ringparam *ring)
641{ 639{
642 struct ixgbe_adapter *adapter = netdev_priv(netdev); 640 struct ixgbe_adapter *adapter = netdev_priv(netdev);
643 struct ixgbe_ring *tx_ring = adapter->tx_ring; 641 struct ixgbe_ring *tx_ring = adapter->tx_ring;
@@ -654,15 +652,12 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
654} 652}
655 653
656static int ixgbe_set_ringparam(struct net_device *netdev, 654static int ixgbe_set_ringparam(struct net_device *netdev,
657 struct ethtool_ringparam *ring) 655 struct ethtool_ringparam *ring)
658{ 656{
659 struct ixgbe_adapter *adapter = netdev_priv(netdev); 657 struct ixgbe_adapter *adapter = netdev_priv(netdev);
660 struct ixgbe_tx_buffer *old_buf; 658 struct ixgbe_ring *temp_ring;
661 struct ixgbe_rx_buffer *old_rx_buf;
662 void *old_desc;
663 int i, err; 659 int i, err;
664 u32 new_rx_count, new_tx_count, old_size; 660 u32 new_rx_count, new_tx_count;
665 dma_addr_t old_dma;
666 661
667 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 662 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
668 return -EINVAL; 663 return -EINVAL;
@@ -681,6 +676,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
681 return 0; 676 return 0;
682 } 677 }
683 678
679 if (adapter->num_tx_queues > adapter->num_rx_queues)
680 temp_ring = vmalloc(adapter->num_tx_queues *
681 sizeof(struct ixgbe_ring));
682 else
683 temp_ring = vmalloc(adapter->num_rx_queues *
684 sizeof(struct ixgbe_ring));
685 if (!temp_ring)
686 return -ENOMEM;
687
684 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
685 msleep(1); 689 msleep(1);
686 690
@@ -693,66 +697,61 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
693 * to the tx and rx ring structs. 697 * to the tx and rx ring structs.
694 */ 698 */
695 if (new_tx_count != adapter->tx_ring->count) { 699 if (new_tx_count != adapter->tx_ring->count) {
700 memcpy(temp_ring, adapter->tx_ring,
701 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
702
696 for (i = 0; i < adapter->num_tx_queues; i++) { 703 for (i = 0; i < adapter->num_tx_queues; i++) {
697 /* Save existing descriptor ring */ 704 temp_ring[i].count = new_tx_count;
698 old_buf = adapter->tx_ring[i].tx_buffer_info; 705 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
699 old_desc = adapter->tx_ring[i].desc;
700 old_size = adapter->tx_ring[i].size;
701 old_dma = adapter->tx_ring[i].dma;
702 /* Try to allocate a new one */
703 adapter->tx_ring[i].tx_buffer_info = NULL;
704 adapter->tx_ring[i].desc = NULL;
705 adapter->tx_ring[i].count = new_tx_count;
706 err = ixgbe_setup_tx_resources(adapter,
707 &adapter->tx_ring[i]);
708 if (err) { 706 if (err) {
709 /* Restore the old one so at least 707 while (i) {
710 the adapter still works, even if 708 i--;
711 we failed the request */ 709 ixgbe_free_tx_resources(adapter,
712 adapter->tx_ring[i].tx_buffer_info = old_buf; 710 &temp_ring[i]);
713 adapter->tx_ring[i].desc = old_desc; 711 }
714 adapter->tx_ring[i].size = old_size;
715 adapter->tx_ring[i].dma = old_dma;
716 goto err_setup; 712 goto err_setup;
717 } 713 }
718 /* Free the old buffer manually */
719 vfree(old_buf);
720 pci_free_consistent(adapter->pdev, old_size,
721 old_desc, old_dma);
722 } 714 }
715
716 for (i = 0; i < adapter->num_tx_queues; i++)
717 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
718
719 memcpy(adapter->tx_ring, temp_ring,
720 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
721
722 adapter->tx_ring_count = new_tx_count;
723 } 723 }
724 724
725 if (new_rx_count != adapter->rx_ring->count) { 725 if (new_rx_count != adapter->rx_ring->count) {
726 for (i = 0; i < adapter->num_rx_queues; i++) { 726 memcpy(temp_ring, adapter->rx_ring,
727 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
727 728
728 old_rx_buf = adapter->rx_ring[i].rx_buffer_info; 729 for (i = 0; i < adapter->num_rx_queues; i++) {
729 old_desc = adapter->rx_ring[i].desc; 730 temp_ring[i].count = new_rx_count;
730 old_size = adapter->rx_ring[i].size; 731 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
731 old_dma = adapter->rx_ring[i].dma;
732
733 adapter->rx_ring[i].rx_buffer_info = NULL;
734 adapter->rx_ring[i].desc = NULL;
735 adapter->rx_ring[i].dma = 0;
736 adapter->rx_ring[i].count = new_rx_count;
737 err = ixgbe_setup_rx_resources(adapter,
738 &adapter->rx_ring[i]);
739 if (err) { 732 if (err) {
740 adapter->rx_ring[i].rx_buffer_info = old_rx_buf; 733 while (i) {
741 adapter->rx_ring[i].desc = old_desc; 734 i--;
742 adapter->rx_ring[i].size = old_size; 735 ixgbe_free_rx_resources(adapter,
743 adapter->rx_ring[i].dma = old_dma; 736 &temp_ring[i]);
737 }
744 goto err_setup; 738 goto err_setup;
745 } 739 }
746
747 vfree(old_rx_buf);
748 pci_free_consistent(adapter->pdev, old_size, old_desc,
749 old_dma);
750 } 740 }
741
742 for (i = 0; i < adapter->num_rx_queues; i++)
743 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
744
745 memcpy(adapter->rx_ring, temp_ring,
746 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
747
748 adapter->rx_ring_count = new_rx_count;
751 } 749 }
752 750
751 /* success! */
753 err = 0; 752 err = 0;
754err_setup: 753err_setup:
755 if (netif_running(adapter->netdev)) 754 if (netif_running(netdev))
756 ixgbe_up(adapter); 755 ixgbe_up(adapter);
757 756
758 clear_bit(__IXGBE_RESETTING, &adapter->state); 757 clear_bit(__IXGBE_RESETTING, &adapter->state);
@@ -770,7 +769,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
770} 769}
771 770
772static void ixgbe_get_ethtool_stats(struct net_device *netdev, 771static void ixgbe_get_ethtool_stats(struct net_device *netdev,
773 struct ethtool_stats *stats, u64 *data) 772 struct ethtool_stats *stats, u64 *data)
774{ 773{
775 struct ixgbe_adapter *adapter = netdev_priv(netdev); 774 struct ixgbe_adapter *adapter = netdev_priv(netdev);
776 u64 *queue_stat; 775 u64 *queue_stat;
@@ -778,12 +777,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
778 int j, k; 777 int j, k;
779 int i; 778 int i;
780 u64 aggregated = 0, flushed = 0, no_desc = 0; 779 u64 aggregated = 0, flushed = 0, no_desc = 0;
780 for (i = 0; i < adapter->num_rx_queues; i++) {
781 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
782 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
783 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
784 }
785 adapter->lro_aggregated = aggregated;
786 adapter->lro_flushed = flushed;
787 adapter->lro_no_desc = no_desc;
781 788
782 ixgbe_update_stats(adapter); 789 ixgbe_update_stats(adapter);
783 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 790 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
784 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 791 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
785 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 792 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
786 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 793 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
787 } 794 }
788 for (j = 0; j < adapter->num_tx_queues; j++) { 795 for (j = 0; j < adapter->num_tx_queues; j++) {
789 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 796 queue_stat = (u64 *)&adapter->tx_ring[j].stats;
@@ -792,24 +799,18 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
792 i += k; 799 i += k;
793 } 800 }
794 for (j = 0; j < adapter->num_rx_queues; j++) { 801 for (j = 0; j < adapter->num_rx_queues; j++) {
795 aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
796 flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
797 no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
798 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 802 queue_stat = (u64 *)&adapter->rx_ring[j].stats;
799 for (k = 0; k < stat_count; k++) 803 for (k = 0; k < stat_count; k++)
800 data[i + k] = queue_stat[k]; 804 data[i + k] = queue_stat[k];
801 i += k; 805 i += k;
802 } 806 }
803 adapter->lro_aggregated = aggregated;
804 adapter->lro_flushed = flushed;
805 adapter->lro_no_desc = no_desc;
806} 807}
807 808
808static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 809static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
809 u8 *data) 810 u8 *data)
810{ 811{
811 struct ixgbe_adapter *adapter = netdev_priv(netdev); 812 struct ixgbe_adapter *adapter = netdev_priv(netdev);
812 u8 *p = data; 813 char *p = (char *)data;
813 int i; 814 int i;
814 815
815 switch (stringset) { 816 switch (stringset) {
@@ -831,14 +832,14 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
831 sprintf(p, "rx_queue_%u_bytes", i); 832 sprintf(p, "rx_queue_%u_bytes", i);
832 p += ETH_GSTRING_LEN; 833 p += ETH_GSTRING_LEN;
833 } 834 }
834/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 835 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
835 break; 836 break;
836 } 837 }
837} 838}
838 839
839 840
840static void ixgbe_get_wol(struct net_device *netdev, 841static void ixgbe_get_wol(struct net_device *netdev,
841 struct ethtool_wolinfo *wol) 842 struct ethtool_wolinfo *wol)
842{ 843{
843 wol->supported = 0; 844 wol->supported = 0;
844 wol->wolopts = 0; 845 wol->wolopts = 0;
@@ -859,16 +860,17 @@ static int ixgbe_nway_reset(struct net_device *netdev)
859static int ixgbe_phys_id(struct net_device *netdev, u32 data) 860static int ixgbe_phys_id(struct net_device *netdev, u32 data)
860{ 861{
861 struct ixgbe_adapter *adapter = netdev_priv(netdev); 862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
862 u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); 863 struct ixgbe_hw *hw = &adapter->hw;
864 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
863 u32 i; 865 u32 i;
864 866
865 if (!data || data > 300) 867 if (!data || data > 300)
866 data = 300; 868 data = 300;
867 869
868 for (i = 0; i < (data * 1000); i += 400) { 870 for (i = 0; i < (data * 1000); i += 400) {
869 ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); 871 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
870 msleep_interruptible(200); 872 msleep_interruptible(200);
871 ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); 873 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
872 msleep_interruptible(200); 874 msleep_interruptible(200);
873 } 875 }
874 876
@@ -879,67 +881,75 @@ static int ixgbe_phys_id(struct net_device *netdev, u32 data)
879} 881}
880 882
881static int ixgbe_get_coalesce(struct net_device *netdev, 883static int ixgbe_get_coalesce(struct net_device *netdev,
882 struct ethtool_coalesce *ec) 884 struct ethtool_coalesce *ec)
883{ 885{
884 struct ixgbe_adapter *adapter = netdev_priv(netdev); 886 struct ixgbe_adapter *adapter = netdev_priv(netdev);
885 887
886 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
887 ec->rx_coalesce_usecs = adapter->rx_eitr;
888 else
889 ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
890
891 if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
892 ec->tx_coalesce_usecs = adapter->tx_eitr;
893 else
894 ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
895
896 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 888 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
889
890 /* only valid if in constant ITR mode */
891 switch (adapter->itr_setting) {
892 case 0:
893 /* throttling disabled */
894 ec->rx_coalesce_usecs = 0;
895 break;
896 case 1:
897 /* dynamic ITR mode */
898 ec->rx_coalesce_usecs = 1;
899 break;
900 default:
901 /* fixed interrupt rate mode */
902 ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
903 break;
904 }
897 return 0; 905 return 0;
898} 906}
899 907
900static int ixgbe_set_coalesce(struct net_device *netdev, 908static int ixgbe_set_coalesce(struct net_device *netdev,
901 struct ethtool_coalesce *ec) 909 struct ethtool_coalesce *ec)
902{ 910{
903 struct ixgbe_adapter *adapter = netdev_priv(netdev); 911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
904 912 struct ixgbe_hw *hw = &adapter->hw;
905 if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 913 int i;
906 ((ec->rx_coalesce_usecs != 0) &&
907 (ec->rx_coalesce_usecs != 1) &&
908 (ec->rx_coalesce_usecs != 3) &&
909 (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
910 return -EINVAL;
911 if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
912 ((ec->tx_coalesce_usecs != 0) &&
913 (ec->tx_coalesce_usecs != 1) &&
914 (ec->tx_coalesce_usecs != 3) &&
915 (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
916 return -EINVAL;
917
918 /* convert to rate of irq's per second */
919 if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
920 adapter->rx_eitr = ec->rx_coalesce_usecs;
921 else
922 adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
923
924 if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
925 adapter->tx_eitr = ec->rx_coalesce_usecs;
926 else
927 adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
928 914
929 if (ec->tx_max_coalesced_frames_irq) 915 if (ec->tx_max_coalesced_frames_irq)
930 adapter->tx_ring[0].work_limit = 916 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
931 ec->tx_max_coalesced_frames_irq; 917
918 if (ec->rx_coalesce_usecs > 1) {
919 /* store the value in ints/second */
920 adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
921
922 /* static value of interrupt rate */
923 adapter->itr_setting = adapter->eitr_param;
924 /* clear the lower bit */
925 adapter->itr_setting &= ~1;
926 } else if (ec->rx_coalesce_usecs == 1) {
927 /* 1 means dynamic mode */
928 adapter->eitr_param = 20000;
929 adapter->itr_setting = 1;
930 } else {
931 /* any other value means disable eitr, which is best
932 * served by setting the interrupt rate very high */
933 adapter->eitr_param = 3000000;
934 adapter->itr_setting = 0;
935 }
932 936
933 if (netif_running(netdev)) { 937 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
934 ixgbe_down(adapter); 938 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
935 ixgbe_up(adapter); 939 if (q_vector->txr_count && !q_vector->rxr_count)
940 q_vector->eitr = (adapter->eitr_param >> 1);
941 else
942 /* rx only or mixed */
943 q_vector->eitr = adapter->eitr_param;
944 IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
945 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
936 } 946 }
937 947
938 return 0; 948 return 0;
939} 949}
940 950
941 951
942static struct ethtool_ops ixgbe_ethtool_ops = { 952static const struct ethtool_ops ixgbe_ethtool_ops = {
943 .get_settings = ixgbe_get_settings, 953 .get_settings = ixgbe_get_settings,
944 .set_settings = ixgbe_set_settings, 954 .set_settings = ixgbe_set_settings,
945 .get_drvinfo = ixgbe_get_drvinfo, 955 .get_drvinfo = ixgbe_get_drvinfo,
@@ -966,7 +976,7 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
966 .set_tso = ixgbe_set_tso, 976 .set_tso = ixgbe_set_tso,
967 .get_strings = ixgbe_get_strings, 977 .get_strings = ixgbe_get_strings,
968 .phys_id = ixgbe_phys_id, 978 .phys_id = ixgbe_phys_id,
969 .get_sset_count = ixgbe_get_sset_count, 979 .get_sset_count = ixgbe_get_sset_count,
970 .get_ethtool_stats = ixgbe_get_ethtool_stats, 980 .get_ethtool_stats = ixgbe_get_ethtool_stats,
971 .get_coalesce = ixgbe_get_coalesce, 981 .get_coalesce = ixgbe_get_coalesce,
972 .set_coalesce = ixgbe_set_coalesce, 982 .set_coalesce = ixgbe_set_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 53f41b649f03..ca17af4349d0 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -46,15 +45,14 @@
46 45
47char ixgbe_driver_name[] = "ixgbe"; 46char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 49
51#define DRV_VERSION "1.3.18-k4" 50#define DRV_VERSION "1.3.30-k2"
52const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] = 52static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
54 "Copyright (c) 1999-2007 Intel Corporation.";
55 53
56static const struct ixgbe_info *ixgbe_info_tbl[] = { 54static const struct ixgbe_info *ixgbe_info_tbl[] = {
57 [board_82598] = &ixgbe_82598_info, 55 [board_82598] = &ixgbe_82598_info,
58}; 56};
59 57
60/* ixgbe_pci_tbl - PCI Device ID Table 58/* ixgbe_pci_tbl - PCI Device ID Table
@@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
74 board_82598 }, 72 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 }, 74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
76 board_82598 },
77 77
78 /* required last entry */ 78 /* required last entry */
79 {0, } 79 {0, }
80}; 80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 82
83#ifdef CONFIG_DCA 83#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p); 85 void *p);
86static struct notifier_block dca_notifier = { 86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca, 87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL, 88 .next = NULL,
@@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
104 /* Let firmware take over control of h/w */ 104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108} 108}
109 109
110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
114 /* Let firmware know the driver has taken over */ 114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118} 118}
119 119
120#ifdef DEBUG
121/**
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
124 **/
125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126{
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
130}
131#endif
132
133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 120static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector) 121 u8 msix_vector)
135{ 122{
136 u32 ivar, index; 123 u32 ivar, index;
137 124
@@ -144,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
144} 131}
145 132
146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 133static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer 134 struct ixgbe_tx_buffer
148 *tx_buffer_info) 135 *tx_buffer_info)
149{ 136{
150 if (tx_buffer_info->dma) { 137 if (tx_buffer_info->dma) {
151 pci_unmap_page(adapter->pdev, 138 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
152 tx_buffer_info->dma, 139 tx_buffer_info->length, PCI_DMA_TODEVICE);
153 tx_buffer_info->length, PCI_DMA_TODEVICE);
154 tx_buffer_info->dma = 0; 140 tx_buffer_info->dma = 0;
155 } 141 }
156 if (tx_buffer_info->skb) { 142 if (tx_buffer_info->skb) {
@@ -161,107 +147,120 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
161} 147}
162 148
163static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 149static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
164 struct ixgbe_ring *tx_ring, 150 struct ixgbe_ring *tx_ring,
165 unsigned int eop, 151 unsigned int eop)
166 union ixgbe_adv_tx_desc *eop_desc)
167{ 152{
153 struct ixgbe_hw *hw = &adapter->hw;
154 u32 head, tail;
155
168 /* Detect a transmit hang in hardware, this serializes the 156 /* Detect a transmit hang in hardware, this serializes the
169 * check with the clearing of time_stamp and movement of i */ 157 * check with the clearing of time_stamp and movement of eop */
158 head = IXGBE_READ_REG(hw, tx_ring->head);
159 tail = IXGBE_READ_REG(hw, tx_ring->tail);
170 adapter->detect_tx_hung = false; 160 adapter->detect_tx_hung = false;
171 if (tx_ring->tx_buffer_info[eop].dma && 161 if ((head != tail) &&
162 tx_ring->tx_buffer_info[eop].time_stamp &&
172 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 163 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
173 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 164 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
174 /* detected Tx unit hang */ 165 /* detected Tx unit hang */
166 union ixgbe_adv_tx_desc *tx_desc;
167 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
175 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 168 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
176 " TDH <%x>\n" 169 " Tx Queue <%d>\n"
177 " TDT <%x>\n" 170 " TDH, TDT <%x>, <%x>\n"
178 " next_to_use <%x>\n" 171 " next_to_use <%x>\n"
179 " next_to_clean <%x>\n" 172 " next_to_clean <%x>\n"
180 "tx_buffer_info[next_to_clean]\n" 173 "tx_buffer_info[next_to_clean]\n"
181 " time_stamp <%lx>\n" 174 " time_stamp <%lx>\n"
182 " next_to_watch <%x>\n" 175 " jiffies <%lx>\n",
183 " jiffies <%lx>\n" 176 tx_ring->queue_index,
184 " next_to_watch.status <%x>\n", 177 head, tail,
185 readl(adapter->hw.hw_addr + tx_ring->head), 178 tx_ring->next_to_use, eop,
186 readl(adapter->hw.hw_addr + tx_ring->tail), 179 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
187 tx_ring->next_to_use,
188 tx_ring->next_to_clean,
189 tx_ring->tx_buffer_info[eop].time_stamp,
190 eop, jiffies, eop_desc->wb.status);
191 return true; 180 return true;
192 } 181 }
193 182
194 return false; 183 return false;
195} 184}
196 185
197#define IXGBE_MAX_TXD_PWR 14 186#define IXGBE_MAX_TXD_PWR 14
198#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 187#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
199 188
200/* Tx Descriptors needed, worst case */ 189/* Tx Descriptors needed, worst case */
201#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 190#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 191 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 192#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 193 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
194
195#define GET_TX_HEAD_FROM_RING(ring) (\
196 *(volatile u32 *) \
197 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
198static void ixgbe_tx_timeout(struct net_device *netdev);
205 199
206/** 200/**
207 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 201 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
208 * @adapter: board private structure 202 * @adapter: board private structure
203 * @tx_ring: tx ring to clean
209 **/ 204 **/
210static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 205static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
211 struct ixgbe_ring *tx_ring) 206 struct ixgbe_ring *tx_ring)
212{ 207{
213 struct net_device *netdev = adapter->netdev; 208 union ixgbe_adv_tx_desc *tx_desc;
214 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
215 struct ixgbe_tx_buffer *tx_buffer_info; 209 struct ixgbe_tx_buffer *tx_buffer_info;
216 unsigned int i, eop; 210 struct net_device *netdev = adapter->netdev;
217 bool cleaned = false; 211 struct sk_buff *skb;
218 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 212 unsigned int i;
213 u32 head, oldhead;
214 unsigned int count = 0;
215 unsigned int total_bytes = 0, total_packets = 0;
219 216
217 rmb();
218 head = GET_TX_HEAD_FROM_RING(tx_ring);
219 head = le32_to_cpu(head);
220 i = tx_ring->next_to_clean; 220 i = tx_ring->next_to_clean;
221 eop = tx_ring->tx_buffer_info[i].next_to_watch; 221 while (1) {
222 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 222 while (i != head) {
223 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
224 cleaned = false;
225 while (!cleaned) {
226 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 223 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
227 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 224 tx_buffer_info = &tx_ring->tx_buffer_info[i];
228 cleaned = (i == eop); 225 skb = tx_buffer_info->skb;
229 226
230 tx_ring->stats.bytes += tx_buffer_info->length; 227 if (skb) {
231 if (cleaned) {
232 struct sk_buff *skb = tx_buffer_info->skb;
233 unsigned int segs, bytecount; 228 unsigned int segs, bytecount;
229
230 /* gso_segs is currently only valid for tcp */
234 segs = skb_shinfo(skb)->gso_segs ?: 1; 231 segs = skb_shinfo(skb)->gso_segs ?: 1;
235 /* multiply data chunks by size of headers */ 232 /* multiply data chunks by size of headers */
236 bytecount = ((segs - 1) * skb_headlen(skb)) + 233 bytecount = ((segs - 1) * skb_headlen(skb)) +
237 skb->len; 234 skb->len;
238 total_tx_packets += segs; 235 total_packets += segs;
239 total_tx_bytes += bytecount; 236 total_bytes += bytecount;
240 } 237 }
238
241 ixgbe_unmap_and_free_tx_resource(adapter, 239 ixgbe_unmap_and_free_tx_resource(adapter,
242 tx_buffer_info); 240 tx_buffer_info);
243 tx_desc->wb.status = 0;
244 241
245 i++; 242 i++;
246 if (i == tx_ring->count) 243 if (i == tx_ring->count)
247 i = 0; 244 i = 0;
248 }
249
250 tx_ring->stats.packets++;
251
252 eop = tx_ring->tx_buffer_info[i].next_to_watch;
253 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
254
255 /* weight of a sort for tx, avoid endless transmit cleanup */
256 if (total_tx_packets >= tx_ring->work_limit)
257 break;
258 }
259 245
246 count++;
247 if (count == tx_ring->count)
248 goto done_cleaning;
249 }
250 oldhead = head;
251 rmb();
252 head = GET_TX_HEAD_FROM_RING(tx_ring);
253 head = le32_to_cpu(head);
254 if (head == oldhead)
255 goto done_cleaning;
256 } /* while (1) */
257
258done_cleaning:
260 tx_ring->next_to_clean = i; 259 tx_ring->next_to_clean = i;
261 260
262#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 261#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
263 if (total_tx_packets && netif_carrier_ok(netdev) && 262 if (unlikely(count && netif_carrier_ok(netdev) &&
264 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 263 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
265 /* Make sure that anybody stopping the queue after this 264 /* Make sure that anybody stopping the queue after this
266 * sees the new next_to_clean. 265 * sees the new next_to_clean.
267 */ 266 */
@@ -269,59 +268,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
269 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 268 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
270 !test_bit(__IXGBE_DOWN, &adapter->state)) { 269 !test_bit(__IXGBE_DOWN, &adapter->state)) {
271 netif_wake_subqueue(netdev, tx_ring->queue_index); 270 netif_wake_subqueue(netdev, tx_ring->queue_index);
272 adapter->restart_queue++; 271 ++adapter->restart_queue;
273 } 272 }
274 } 273 }
275 274
276 if (adapter->detect_tx_hung) 275 if (adapter->detect_tx_hung) {
277 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 276 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
278 netif_stop_subqueue(netdev, tx_ring->queue_index); 277 /* schedule immediate reset if we believe we hung */
279 278 DPRINTK(PROBE, INFO,
280 if (total_tx_packets >= tx_ring->work_limit) 279 "tx hang %d detected, resetting adapter\n",
281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 280 adapter->tx_timeout_count + 1);
281 ixgbe_tx_timeout(adapter->netdev);
282 }
283 }
282 284
283 tx_ring->total_bytes += total_tx_bytes; 285 /* re-arm the interrupt */
284 tx_ring->total_packets += total_tx_packets; 286 if ((total_packets >= tx_ring->work_limit) ||
285 adapter->net_stats.tx_bytes += total_tx_bytes; 287 (count == tx_ring->count))
286 adapter->net_stats.tx_packets += total_tx_packets; 288 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
287 cleaned = total_tx_packets ? true : false; 289
288 return cleaned; 290 tx_ring->total_bytes += total_bytes;
291 tx_ring->total_packets += total_packets;
292 tx_ring->stats.bytes += total_bytes;
293 tx_ring->stats.packets += total_packets;
294 adapter->net_stats.tx_bytes += total_bytes;
295 adapter->net_stats.tx_packets += total_packets;
296 return (total_packets ? true : false);
289} 297}
290 298
291#ifdef CONFIG_DCA 299#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
292static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 300static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
293 struct ixgbe_ring *rxr) 301 struct ixgbe_ring *rx_ring)
294{ 302{
295 u32 rxctrl; 303 u32 rxctrl;
296 int cpu = get_cpu(); 304 int cpu = get_cpu();
297 int q = rxr - adapter->rx_ring; 305 int q = rx_ring - adapter->rx_ring;
298 306
299 if (rxr->cpu != cpu) { 307 if (rx_ring->cpu != cpu) {
300 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 308 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
301 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 309 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
302 rxctrl |= dca_get_tag(cpu); 310 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
303 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 311 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
304 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 312 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
306 rxr->cpu = cpu; 314 rx_ring->cpu = cpu;
307 } 315 }
308 put_cpu(); 316 put_cpu();
309} 317}
310 318
311static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 319static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
312 struct ixgbe_ring *txr) 320 struct ixgbe_ring *tx_ring)
313{ 321{
314 u32 txctrl; 322 u32 txctrl;
315 int cpu = get_cpu(); 323 int cpu = get_cpu();
316 int q = txr - adapter->tx_ring; 324 int q = tx_ring - adapter->tx_ring;
317 325
318 if (txr->cpu != cpu) { 326 if (tx_ring->cpu != cpu) {
319 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 327 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
320 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 328 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
321 txctrl |= dca_get_tag(cpu); 329 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
322 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 330 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
324 txr->cpu = cpu; 332 tx_ring->cpu = cpu;
325 } 333 }
326 put_cpu(); 334 put_cpu();
327} 335}
@@ -351,11 +359,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
351 359
352 switch (event) { 360 switch (event) {
353 case DCA_PROVIDER_ADD: 361 case DCA_PROVIDER_ADD:
354 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 362 /* if we're already enabled, don't do it again */
363 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
364 break;
355 /* Always use CB2 mode, difference is masked 365 /* Always use CB2 mode, difference is masked
356 * in the CB driver. */ 366 * in the CB driver. */
357 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
358 if (dca_add_requester(dev) == 0) { 368 if (dca_add_requester(dev) == 0) {
369 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
359 ixgbe_setup_dca(adapter); 370 ixgbe_setup_dca(adapter);
360 break; 371 break;
361 } 372 }
@@ -372,7 +383,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
372 return 0; 383 return 0;
373} 384}
374 385
375#endif /* CONFIG_DCA */ 386#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
376/** 387/**
377 * ixgbe_receive_skb - Send a completed packet up the stack 388 * ixgbe_receive_skb - Send a completed packet up the stack
378 * @adapter: board private structure 389 * @adapter: board private structure
@@ -382,8 +393,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
382 * @rx_desc: rx descriptor 393 * @rx_desc: rx descriptor
383 **/ 394 **/
384static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 395static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
385 struct sk_buff *skb, u8 status, 396 struct sk_buff *skb, u8 status,
386 struct ixgbe_ring *ring, 397 struct ixgbe_ring *ring,
387 union ixgbe_adv_rx_desc *rx_desc) 398 union ixgbe_adv_rx_desc *rx_desc)
388{ 399{
389 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 400 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
@@ -420,14 +431,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
420 * @skb: skb currently being received and modified 431 * @skb: skb currently being received and modified
421 **/ 432 **/
422static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 433static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
423 u32 status_err, 434 u32 status_err, struct sk_buff *skb)
424 struct sk_buff *skb)
425{ 435{
426 skb->ip_summed = CHECKSUM_NONE; 436 skb->ip_summed = CHECKSUM_NONE;
427 437
428 /* Ignore Checksum bit is set, or rx csum disabled */ 438 /* Rx csum disabled */
429 if ((status_err & IXGBE_RXD_STAT_IXSM) || 439 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
430 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
431 return; 440 return;
432 441
433 /* if IP and error */ 442 /* if IP and error */
@@ -455,37 +464,44 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
455 * @adapter: address of board private structure 464 * @adapter: address of board private structure
456 **/ 465 **/
457static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 466static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
458 struct ixgbe_ring *rx_ring, 467 struct ixgbe_ring *rx_ring,
459 int cleaned_count) 468 int cleaned_count)
460{ 469{
461 struct net_device *netdev = adapter->netdev;
462 struct pci_dev *pdev = adapter->pdev; 470 struct pci_dev *pdev = adapter->pdev;
463 union ixgbe_adv_rx_desc *rx_desc; 471 union ixgbe_adv_rx_desc *rx_desc;
464 struct ixgbe_rx_buffer *rx_buffer_info; 472 struct ixgbe_rx_buffer *bi;
465 struct sk_buff *skb;
466 unsigned int i; 473 unsigned int i;
467 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; 474 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
468 475
469 i = rx_ring->next_to_use; 476 i = rx_ring->next_to_use;
470 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 477 bi = &rx_ring->rx_buffer_info[i];
471 478
472 while (cleaned_count--) { 479 while (cleaned_count--) {
473 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 480 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
474 481
475 if (!rx_buffer_info->page && 482 if (!bi->page_dma &&
476 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 483 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
477 rx_buffer_info->page = alloc_page(GFP_ATOMIC); 484 if (!bi->page) {
478 if (!rx_buffer_info->page) { 485 bi->page = alloc_page(GFP_ATOMIC);
479 adapter->alloc_rx_page_failed++; 486 if (!bi->page) {
480 goto no_buffers; 487 adapter->alloc_rx_page_failed++;
488 goto no_buffers;
489 }
490 bi->page_offset = 0;
491 } else {
492 /* use a half page if we're re-using */
493 bi->page_offset ^= (PAGE_SIZE / 2);
481 } 494 }
482 rx_buffer_info->page_dma = 495
483 pci_map_page(pdev, rx_buffer_info->page, 496 bi->page_dma = pci_map_page(pdev, bi->page,
484 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); 497 bi->page_offset,
498 (PAGE_SIZE / 2),
499 PCI_DMA_FROMDEVICE);
485 } 500 }
486 501
487 if (!rx_buffer_info->skb) { 502 if (!bi->skb) {
488 skb = netdev_alloc_skb(netdev, bufsz); 503 struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
504 bufsz);
489 505
490 if (!skb) { 506 if (!skb) {
491 adapter->alloc_rx_buff_failed++; 507 adapter->alloc_rx_buff_failed++;
@@ -499,28 +515,25 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
499 */ 515 */
500 skb_reserve(skb, NET_IP_ALIGN); 516 skb_reserve(skb, NET_IP_ALIGN);
501 517
502 rx_buffer_info->skb = skb; 518 bi->skb = skb;
503 rx_buffer_info->dma = pci_map_single(pdev, skb->data, 519 bi->dma = pci_map_single(pdev, skb->data, bufsz,
504 bufsz, 520 PCI_DMA_FROMDEVICE);
505 PCI_DMA_FROMDEVICE);
506 } 521 }
507 /* Refresh the desc even if buffer_addrs didn't change because 522 /* Refresh the desc even if buffer_addrs didn't change because
508 * each write-back erases this info. */ 523 * each write-back erases this info. */
509 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 524 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
510 rx_desc->read.pkt_addr = 525 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
511 cpu_to_le64(rx_buffer_info->page_dma); 526 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
512 rx_desc->read.hdr_addr =
513 cpu_to_le64(rx_buffer_info->dma);
514 } else { 527 } else {
515 rx_desc->read.pkt_addr = 528 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
516 cpu_to_le64(rx_buffer_info->dma);
517 } 529 }
518 530
519 i++; 531 i++;
520 if (i == rx_ring->count) 532 if (i == rx_ring->count)
521 i = 0; 533 i = 0;
522 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 534 bi = &rx_ring->rx_buffer_info[i];
523 } 535 }
536
524no_buffers: 537no_buffers:
525 if (rx_ring->next_to_use != i) { 538 if (rx_ring->next_to_use != i) {
526 rx_ring->next_to_use = i; 539 rx_ring->next_to_use = i;
@@ -538,46 +551,54 @@ no_buffers:
538 } 551 }
539} 552}
540 553
554static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
555{
556 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
557}
558
559static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
560{
561 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
562}
563
541static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 564static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
542 struct ixgbe_ring *rx_ring, 565 struct ixgbe_ring *rx_ring,
543 int *work_done, int work_to_do) 566 int *work_done, int work_to_do)
544{ 567{
545 struct net_device *netdev = adapter->netdev;
546 struct pci_dev *pdev = adapter->pdev; 568 struct pci_dev *pdev = adapter->pdev;
547 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 569 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
548 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 570 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
549 struct sk_buff *skb; 571 struct sk_buff *skb;
550 unsigned int i; 572 unsigned int i;
551 u32 upper_len, len, staterr; 573 u32 len, staterr;
552 u16 hdr_info; 574 u16 hdr_info;
553 bool cleaned = false; 575 bool cleaned = false;
554 int cleaned_count = 0; 576 int cleaned_count = 0;
555 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 577 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
556 578
557 i = rx_ring->next_to_clean; 579 i = rx_ring->next_to_clean;
558 upper_len = 0;
559 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 580 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
560 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 581 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
561 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 582 rx_buffer_info = &rx_ring->rx_buffer_info[i];
562 583
563 while (staterr & IXGBE_RXD_STAT_DD) { 584 while (staterr & IXGBE_RXD_STAT_DD) {
585 u32 upper_len = 0;
564 if (*work_done >= work_to_do) 586 if (*work_done >= work_to_do)
565 break; 587 break;
566 (*work_done)++; 588 (*work_done)++;
567 589
568 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 590 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
569 hdr_info = 591 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
570 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); 592 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
571 len = 593 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
572 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
573 IXGBE_RXDADV_HDRBUFLEN_SHIFT);
574 if (hdr_info & IXGBE_RXDADV_SPH) 594 if (hdr_info & IXGBE_RXDADV_SPH)
575 adapter->rx_hdr_split++; 595 adapter->rx_hdr_split++;
576 if (len > IXGBE_RX_HDR_SIZE) 596 if (len > IXGBE_RX_HDR_SIZE)
577 len = IXGBE_RX_HDR_SIZE; 597 len = IXGBE_RX_HDR_SIZE;
578 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 598 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
579 } else 599 } else {
580 len = le16_to_cpu(rx_desc->wb.upper.length); 600 len = le16_to_cpu(rx_desc->wb.upper.length);
601 }
581 602
582 cleaned = true; 603 cleaned = true;
583 skb = rx_buffer_info->skb; 604 skb = rx_buffer_info->skb;
@@ -586,18 +607,25 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
586 607
587 if (len && !skb_shinfo(skb)->nr_frags) { 608 if (len && !skb_shinfo(skb)->nr_frags) {
588 pci_unmap_single(pdev, rx_buffer_info->dma, 609 pci_unmap_single(pdev, rx_buffer_info->dma,
589 adapter->rx_buf_len + NET_IP_ALIGN, 610 rx_ring->rx_buf_len + NET_IP_ALIGN,
590 PCI_DMA_FROMDEVICE); 611 PCI_DMA_FROMDEVICE);
591 skb_put(skb, len); 612 skb_put(skb, len);
592 } 613 }
593 614
594 if (upper_len) { 615 if (upper_len) {
595 pci_unmap_page(pdev, rx_buffer_info->page_dma, 616 pci_unmap_page(pdev, rx_buffer_info->page_dma,
596 PAGE_SIZE, PCI_DMA_FROMDEVICE); 617 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
597 rx_buffer_info->page_dma = 0; 618 rx_buffer_info->page_dma = 0;
598 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 619 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
599 rx_buffer_info->page, 0, upper_len); 620 rx_buffer_info->page,
600 rx_buffer_info->page = NULL; 621 rx_buffer_info->page_offset,
622 upper_len);
623
624 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
625 (page_count(rx_buffer_info->page) != 1))
626 rx_buffer_info->page = NULL;
627 else
628 get_page(rx_buffer_info->page);
601 629
602 skb->len += upper_len; 630 skb->len += upper_len;
603 skb->data_len += upper_len; 631 skb->data_len += upper_len;
@@ -620,6 +648,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
620 rx_buffer_info->skb = next_buffer->skb; 648 rx_buffer_info->skb = next_buffer->skb;
621 rx_buffer_info->dma = next_buffer->dma; 649 rx_buffer_info->dma = next_buffer->dma;
622 next_buffer->skb = skb; 650 next_buffer->skb = skb;
651 next_buffer->dma = 0;
623 adapter->non_eop_descs++; 652 adapter->non_eop_descs++;
624 goto next_desc; 653 goto next_desc;
625 } 654 }
@@ -635,9 +664,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
635 total_rx_bytes += skb->len; 664 total_rx_bytes += skb->len;
636 total_rx_packets++; 665 total_rx_packets++;
637 666
638 skb->protocol = eth_type_trans(skb, netdev); 667 skb->protocol = eth_type_trans(skb, adapter->netdev);
639 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 668 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
640 netdev->last_rx = jiffies; 669 adapter->netdev->last_rx = jiffies;
641 670
642next_desc: 671next_desc:
643 rx_desc->wb.upper.status_error = 0; 672 rx_desc->wb.upper.status_error = 0;
@@ -666,9 +695,6 @@ next_desc:
666 if (cleaned_count) 695 if (cleaned_count)
667 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 696 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
668 697
669 adapter->net_stats.rx_bytes += total_rx_bytes;
670 adapter->net_stats.rx_packets += total_rx_packets;
671
672 rx_ring->total_packets += total_rx_packets; 698 rx_ring->total_packets += total_rx_packets;
673 rx_ring->total_bytes += total_rx_bytes; 699 rx_ring->total_bytes += total_rx_bytes;
674 adapter->net_stats.rx_bytes += total_rx_bytes; 700 adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -700,43 +726,43 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
700 q_vector = &adapter->q_vector[v_idx]; 726 q_vector = &adapter->q_vector[v_idx];
701 /* XXX for_each_bit(...) */ 727 /* XXX for_each_bit(...) */
702 r_idx = find_first_bit(q_vector->rxr_idx, 728 r_idx = find_first_bit(q_vector->rxr_idx,
703 adapter->num_rx_queues); 729 adapter->num_rx_queues);
704 730
705 for (i = 0; i < q_vector->rxr_count; i++) { 731 for (i = 0; i < q_vector->rxr_count; i++) {
706 j = adapter->rx_ring[r_idx].reg_idx; 732 j = adapter->rx_ring[r_idx].reg_idx;
707 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 733 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
708 r_idx = find_next_bit(q_vector->rxr_idx, 734 r_idx = find_next_bit(q_vector->rxr_idx,
709 adapter->num_rx_queues, 735 adapter->num_rx_queues,
710 r_idx + 1); 736 r_idx + 1);
711 } 737 }
712 r_idx = find_first_bit(q_vector->txr_idx, 738 r_idx = find_first_bit(q_vector->txr_idx,
713 adapter->num_tx_queues); 739 adapter->num_tx_queues);
714 740
715 for (i = 0; i < q_vector->txr_count; i++) { 741 for (i = 0; i < q_vector->txr_count; i++) {
716 j = adapter->tx_ring[r_idx].reg_idx; 742 j = adapter->tx_ring[r_idx].reg_idx;
717 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 743 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
718 r_idx = find_next_bit(q_vector->txr_idx, 744 r_idx = find_next_bit(q_vector->txr_idx,
719 adapter->num_tx_queues, 745 adapter->num_tx_queues,
720 r_idx + 1); 746 r_idx + 1);
721 } 747 }
722 748
723 /* if this is a tx only vector use half the irq (tx) rate */ 749 /* if this is a tx only vector halve the interrupt rate */
724 if (q_vector->txr_count && !q_vector->rxr_count) 750 if (q_vector->txr_count && !q_vector->rxr_count)
725 q_vector->eitr = adapter->tx_eitr; 751 q_vector->eitr = (adapter->eitr_param >> 1);
726 else 752 else
727 /* rx only or mixed */ 753 /* rx only */
728 q_vector->eitr = adapter->rx_eitr; 754 q_vector->eitr = adapter->eitr_param;
729 755
730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
731 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 757 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
732 } 758 }
733 759
734 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 760 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
735 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 761 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
736 762
737 /* set up to autoclear timer, lsc, and the vectors */ 763 /* set up to autoclear timer, and the vectors */
738 mask = IXGBE_EIMS_ENABLE_MASK; 764 mask = IXGBE_EIMS_ENABLE_MASK;
739 mask &= ~IXGBE_EIMS_OTHER; 765 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
741} 767}
742 768
@@ -766,8 +792,8 @@ enum latency_range {
766 * parameter (see ixgbe_param.c) 792 * parameter (see ixgbe_param.c)
767 **/ 793 **/
768static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 794static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
769 u32 eitr, u8 itr_setting, 795 u32 eitr, u8 itr_setting,
770 int packets, int bytes) 796 int packets, int bytes)
771{ 797{
772 unsigned int retval = itr_setting; 798 unsigned int retval = itr_setting;
773 u32 timepassed_us; 799 u32 timepassed_us;
@@ -814,40 +840,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
814 u32 new_itr; 840 u32 new_itr;
815 u8 current_itr, ret_itr; 841 u8 current_itr, ret_itr;
816 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / 842 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
817 sizeof(struct ixgbe_q_vector); 843 sizeof(struct ixgbe_q_vector);
818 struct ixgbe_ring *rx_ring, *tx_ring; 844 struct ixgbe_ring *rx_ring, *tx_ring;
819 845
820 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 846 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
821 for (i = 0; i < q_vector->txr_count; i++) { 847 for (i = 0; i < q_vector->txr_count; i++) {
822 tx_ring = &(adapter->tx_ring[r_idx]); 848 tx_ring = &(adapter->tx_ring[r_idx]);
823 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 849 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
824 q_vector->tx_eitr, 850 q_vector->tx_itr,
825 tx_ring->total_packets, 851 tx_ring->total_packets,
826 tx_ring->total_bytes); 852 tx_ring->total_bytes);
827 /* if the result for this queue would decrease interrupt 853 /* if the result for this queue would decrease interrupt
828 * rate for this vector then use that result */ 854 * rate for this vector then use that result */
829 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? 855 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
830 q_vector->tx_eitr - 1 : ret_itr); 856 q_vector->tx_itr - 1 : ret_itr);
831 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 857 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
832 r_idx + 1); 858 r_idx + 1);
833 } 859 }
834 860
835 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 861 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
836 for (i = 0; i < q_vector->rxr_count; i++) { 862 for (i = 0; i < q_vector->rxr_count; i++) {
837 rx_ring = &(adapter->rx_ring[r_idx]); 863 rx_ring = &(adapter->rx_ring[r_idx]);
838 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 864 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
839 q_vector->rx_eitr, 865 q_vector->rx_itr,
840 rx_ring->total_packets, 866 rx_ring->total_packets,
841 rx_ring->total_bytes); 867 rx_ring->total_bytes);
842 /* if the result for this queue would decrease interrupt 868 /* if the result for this queue would decrease interrupt
843 * rate for this vector then use that result */ 869 * rate for this vector then use that result */
844 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? 870 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
845 q_vector->rx_eitr - 1 : ret_itr); 871 q_vector->rx_itr - 1 : ret_itr);
846 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 872 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
847 r_idx + 1); 873 r_idx + 1);
848 } 874 }
849 875
850 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 876 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
851 877
852 switch (current_itr) { 878 switch (current_itr) {
853 /* counts and packets in update_itr are dependent on these numbers */ 879 /* counts and packets in update_itr are dependent on these numbers */
@@ -871,13 +897,27 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
871 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 897 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
872 /* must write high and low 16 bits to reset counter */ 898 /* must write high and low 16 bits to reset counter */
873 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 899 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
874 itr_reg); 900 itr_reg);
875 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); 901 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
876 } 902 }
877 903
878 return; 904 return;
879} 905}
880 906
907
908static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
909{
910 struct ixgbe_hw *hw = &adapter->hw;
911
912 adapter->lsc_int++;
913 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
914 adapter->link_check_timeout = jiffies;
915 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
916 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
917 schedule_work(&adapter->watchdog_task);
918 }
919}
920
881static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 921static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
882{ 922{
883 struct net_device *netdev = data; 923 struct net_device *netdev = data;
@@ -885,11 +925,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
885 struct ixgbe_hw *hw = &adapter->hw; 925 struct ixgbe_hw *hw = &adapter->hw;
886 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 926 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
887 927
888 if (eicr & IXGBE_EICR_LSC) { 928 if (eicr & IXGBE_EICR_LSC)
889 adapter->lsc_int++; 929 ixgbe_check_lsc(adapter);
890 if (!test_bit(__IXGBE_DOWN, &adapter->state))
891 mod_timer(&adapter->watchdog_timer, jiffies);
892 }
893 930
894 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 931 if (!test_bit(__IXGBE_DOWN, &adapter->state))
895 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 932 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -901,7 +938,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
901{ 938{
902 struct ixgbe_q_vector *q_vector = data; 939 struct ixgbe_q_vector *q_vector = data;
903 struct ixgbe_adapter *adapter = q_vector->adapter; 940 struct ixgbe_adapter *adapter = q_vector->adapter;
904 struct ixgbe_ring *txr; 941 struct ixgbe_ring *tx_ring;
905 int i, r_idx; 942 int i, r_idx;
906 943
907 if (!q_vector->txr_count) 944 if (!q_vector->txr_count)
@@ -909,16 +946,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
909 946
910 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 947 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
911 for (i = 0; i < q_vector->txr_count; i++) { 948 for (i = 0; i < q_vector->txr_count; i++) {
912 txr = &(adapter->tx_ring[r_idx]); 949 tx_ring = &(adapter->tx_ring[r_idx]);
913#ifdef CONFIG_DCA 950#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
914 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
915 ixgbe_update_tx_dca(adapter, txr); 952 ixgbe_update_tx_dca(adapter, tx_ring);
916#endif 953#endif
917 txr->total_bytes = 0; 954 tx_ring->total_bytes = 0;
918 txr->total_packets = 0; 955 tx_ring->total_packets = 0;
919 ixgbe_clean_tx_irq(adapter, txr); 956 ixgbe_clean_tx_irq(adapter, tx_ring);
920 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 957 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
921 r_idx + 1); 958 r_idx + 1);
922 } 959 }
923 960
924 return IRQ_HANDLED; 961 return IRQ_HANDLED;
@@ -933,18 +970,26 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
933{ 970{
934 struct ixgbe_q_vector *q_vector = data; 971 struct ixgbe_q_vector *q_vector = data;
935 struct ixgbe_adapter *adapter = q_vector->adapter; 972 struct ixgbe_adapter *adapter = q_vector->adapter;
936 struct ixgbe_ring *rxr; 973 struct ixgbe_ring *rx_ring;
937 int r_idx; 974 int r_idx;
975 int i;
938 976
939 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 977 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
978 for (i = 0; i < q_vector->rxr_count; i++) {
979 rx_ring = &(adapter->rx_ring[r_idx]);
980 rx_ring->total_bytes = 0;
981 rx_ring->total_packets = 0;
982 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
983 r_idx + 1);
984 }
985
940 if (!q_vector->rxr_count) 986 if (!q_vector->rxr_count)
941 return IRQ_HANDLED; 987 return IRQ_HANDLED;
942 988
943 rxr = &(adapter->rx_ring[r_idx]); 989 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
990 rx_ring = &(adapter->rx_ring[r_idx]);
944 /* disable interrupts on this vector only */ 991 /* disable interrupts on this vector only */
945 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); 992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
946 rxr->total_bytes = 0;
947 rxr->total_packets = 0;
948 netif_rx_schedule(adapter->netdev, &q_vector->napi); 993 netif_rx_schedule(adapter->netdev, &q_vector->napi);
949 994
950 return IRQ_HANDLED; 995 return IRQ_HANDLED;
@@ -963,39 +1008,90 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
963 * @napi: napi struct with our devices info in it 1008 * @napi: napi struct with our devices info in it
964 * @budget: amount of work driver is allowed to do this pass, in packets 1009 * @budget: amount of work driver is allowed to do this pass, in packets
965 * 1010 *
1011 * This function is optimized for cleaning one queue only on a single
1012 * q_vector!!!
966 **/ 1013 **/
967static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1014static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
968{ 1015{
969 struct ixgbe_q_vector *q_vector = 1016 struct ixgbe_q_vector *q_vector =
970 container_of(napi, struct ixgbe_q_vector, napi); 1017 container_of(napi, struct ixgbe_q_vector, napi);
971 struct ixgbe_adapter *adapter = q_vector->adapter; 1018 struct ixgbe_adapter *adapter = q_vector->adapter;
972 struct ixgbe_ring *rxr; 1019 struct ixgbe_ring *rx_ring = NULL;
973 int work_done = 0; 1020 int work_done = 0;
974 long r_idx; 1021 long r_idx;
975 1022
976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1023 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
977 rxr = &(adapter->rx_ring[r_idx]); 1024 rx_ring = &(adapter->rx_ring[r_idx]);
978#ifdef CONFIG_DCA 1025#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
979 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
980 ixgbe_update_rx_dca(adapter, rxr); 1027 ixgbe_update_rx_dca(adapter, rx_ring);
981#endif 1028#endif
982 1029
983 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 1030 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
984 1031
985 /* If all Rx work done, exit the polling mode */ 1032 /* If all Rx work done, exit the polling mode */
986 if (work_done < budget) { 1033 if (work_done < budget) {
987 netif_rx_complete(adapter->netdev, napi); 1034 netif_rx_complete(adapter->netdev, napi);
988 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 1035 if (adapter->itr_setting & 3)
989 ixgbe_set_itr_msix(q_vector); 1036 ixgbe_set_itr_msix(q_vector);
990 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1037 if (!test_bit(__IXGBE_DOWN, &adapter->state))
991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); 1038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
992 } 1039 }
993 1040
994 return work_done; 1041 return work_done;
995} 1042}
996 1043
1044/**
1045 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1046 * @napi: napi struct with our devices info in it
1047 * @budget: amount of work driver is allowed to do this pass, in packets
1048 *
1049 * This function will clean more than one rx queue associated with a
1050 * q_vector.
1051 **/
1052static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1053{
1054 struct ixgbe_q_vector *q_vector =
1055 container_of(napi, struct ixgbe_q_vector, napi);
1056 struct ixgbe_adapter *adapter = q_vector->adapter;
1057 struct ixgbe_ring *rx_ring = NULL;
1058 int work_done = 0, i;
1059 long r_idx;
1060 u16 enable_mask = 0;
1061
1062 /* attempt to distribute budget to each queue fairly, but don't allow
1063 * the budget to go below 1 because we'll exit polling */
1064 budget /= (q_vector->rxr_count ?: 1);
1065 budget = max(budget, 1);
1066 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1067 for (i = 0; i < q_vector->rxr_count; i++) {
1068 rx_ring = &(adapter->rx_ring[r_idx]);
1069#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1070 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1071 ixgbe_update_rx_dca(adapter, rx_ring);
1072#endif
1073 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1074 enable_mask |= rx_ring->v_idx;
1075 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1076 r_idx + 1);
1077 }
1078
1079 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1080 rx_ring = &(adapter->rx_ring[r_idx]);
1081 /* If all Rx work done, exit the polling mode */
1082 if (work_done < budget) {
1083 netif_rx_complete(adapter->netdev, napi);
1084 if (adapter->itr_setting & 3)
1085 ixgbe_set_itr_msix(q_vector);
1086 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1087 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1088 return 0;
1089 }
1090
1091 return work_done;
1092}
997static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1093static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
998 int r_idx) 1094 int r_idx)
999{ 1095{
1000 a->q_vector[v_idx].adapter = a; 1096 a->q_vector[v_idx].adapter = a;
1001 set_bit(r_idx, a->q_vector[v_idx].rxr_idx); 1097 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
@@ -1004,7 +1100,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1004} 1100}
1005 1101
1006static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1102static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1007 int r_idx) 1103 int r_idx)
1008{ 1104{
1009 a->q_vector[v_idx].adapter = a; 1105 a->q_vector[v_idx].adapter = a;
1010 set_bit(r_idx, a->q_vector[v_idx].txr_idx); 1106 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
@@ -1024,7 +1120,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1024 * mapping configurations in here. 1120 * mapping configurations in here.
1025 **/ 1121 **/
1026static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1122static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1027 int vectors) 1123 int vectors)
1028{ 1124{
1029 int v_start = 0; 1125 int v_start = 0;
1030 int rxr_idx = 0, txr_idx = 0; 1126 int rxr_idx = 0, txr_idx = 0;
@@ -1101,28 +1197,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1101 goto out; 1197 goto out;
1102 1198
1103#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1199#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1104 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1200 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1105 &ixgbe_msix_clean_many) 1201 &ixgbe_msix_clean_many)
1106 for (vector = 0; vector < q_vectors; vector++) { 1202 for (vector = 0; vector < q_vectors; vector++) {
1107 handler = SET_HANDLER(&adapter->q_vector[vector]); 1203 handler = SET_HANDLER(&adapter->q_vector[vector]);
1108 sprintf(adapter->name[vector], "%s:v%d-%s", 1204 sprintf(adapter->name[vector], "%s:v%d-%s",
1109 netdev->name, vector, 1205 netdev->name, vector,
1110 (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1206 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1111 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1207 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1112 err = request_irq(adapter->msix_entries[vector].vector, 1208 err = request_irq(adapter->msix_entries[vector].vector,
1113 handler, 0, adapter->name[vector], 1209 handler, 0, adapter->name[vector],
1114 &(adapter->q_vector[vector])); 1210 &(adapter->q_vector[vector]));
1115 if (err) { 1211 if (err) {
1116 DPRINTK(PROBE, ERR, 1212 DPRINTK(PROBE, ERR,
1117 "request_irq failed for MSIX interrupt " 1213 "request_irq failed for MSIX interrupt "
1118 "Error: %d\n", err); 1214 "Error: %d\n", err);
1119 goto free_queue_irqs; 1215 goto free_queue_irqs;
1120 } 1216 }
1121 } 1217 }
1122 1218
1123 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1219 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1124 err = request_irq(adapter->msix_entries[vector].vector, 1220 err = request_irq(adapter->msix_entries[vector].vector,
1125 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1221 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1126 if (err) { 1222 if (err) {
1127 DPRINTK(PROBE, ERR, 1223 DPRINTK(PROBE, ERR,
1128 "request_irq for msix_lsc failed: %d\n", err); 1224 "request_irq for msix_lsc failed: %d\n", err);
@@ -1134,7 +1230,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1134free_queue_irqs: 1230free_queue_irqs:
1135 for (i = vector - 1; i >= 0; i--) 1231 for (i = vector - 1; i >= 0; i--)
1136 free_irq(adapter->msix_entries[--vector].vector, 1232 free_irq(adapter->msix_entries[--vector].vector,
1137 &(adapter->q_vector[i])); 1233 &(adapter->q_vector[i]));
1138 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1234 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1139 pci_disable_msix(adapter->pdev); 1235 pci_disable_msix(adapter->pdev);
1140 kfree(adapter->msix_entries); 1236 kfree(adapter->msix_entries);
@@ -1152,16 +1248,16 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1152 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1248 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1153 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1249 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1154 1250
1155 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, 1251 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1156 q_vector->tx_eitr, 1252 q_vector->tx_itr,
1157 tx_ring->total_packets, 1253 tx_ring->total_packets,
1158 tx_ring->total_bytes); 1254 tx_ring->total_bytes);
1159 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, 1255 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1160 q_vector->rx_eitr, 1256 q_vector->rx_itr,
1161 rx_ring->total_packets, 1257 rx_ring->total_packets,
1162 rx_ring->total_bytes); 1258 rx_ring->total_bytes);
1163 1259
1164 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 1260 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1165 1261
1166 switch (current_itr) { 1262 switch (current_itr) {
1167 /* counts and packets in update_itr are dependent on these numbers */ 1263 /* counts and packets in update_itr are dependent on these numbers */
@@ -1206,19 +1302,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1206 struct ixgbe_hw *hw = &adapter->hw; 1302 struct ixgbe_hw *hw = &adapter->hw;
1207 u32 eicr; 1303 u32 eicr;
1208 1304
1209
1210 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1305 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1211 * therefore no explict interrupt disable is necessary */ 1306 * therefore no explict interrupt disable is necessary */
1212 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1307 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1213 if (!eicr) 1308 if (!eicr) {
1309 /* shared interrupt alert!
1310 * make sure interrupts are enabled because the read will
1311 * have disabled interrupts due to EIAM */
1312 ixgbe_irq_enable(adapter);
1214 return IRQ_NONE; /* Not our interrupt */ 1313 return IRQ_NONE; /* Not our interrupt */
1215
1216 if (eicr & IXGBE_EICR_LSC) {
1217 adapter->lsc_int++;
1218 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1219 mod_timer(&adapter->watchdog_timer, jiffies);
1220 } 1314 }
1221 1315
1316 if (eicr & IXGBE_EICR_LSC)
1317 ixgbe_check_lsc(adapter);
1222 1318
1223 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1319 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1224 adapter->tx_ring[0].total_packets = 0; 1320 adapter->tx_ring[0].total_packets = 0;
@@ -1261,10 +1357,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1261 err = ixgbe_request_msix_irqs(adapter); 1357 err = ixgbe_request_msix_irqs(adapter);
1262 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1358 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1263 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1359 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1264 netdev->name, netdev); 1360 netdev->name, netdev);
1265 } else { 1361 } else {
1266 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1362 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1267 netdev->name, netdev); 1363 netdev->name, netdev);
1268 } 1364 }
1269 1365
1270 if (err) 1366 if (err)
@@ -1288,7 +1384,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1288 i--; 1384 i--;
1289 for (; i >= 0; i--) { 1385 for (; i >= 0; i--) {
1290 free_irq(adapter->msix_entries[i].vector, 1386 free_irq(adapter->msix_entries[i].vector,
1291 &(adapter->q_vector[i])); 1387 &(adapter->q_vector[i]));
1292 } 1388 }
1293 1389
1294 ixgbe_reset_q_vectors(adapter); 1390 ixgbe_reset_q_vectors(adapter);
@@ -1335,7 +1431,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1335 struct ixgbe_hw *hw = &adapter->hw; 1431 struct ixgbe_hw *hw = &adapter->hw;
1336 1432
1337 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1433 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1338 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); 1434 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1339 1435
1340 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1436 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1341 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1437 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
@@ -1347,26 +1443,31 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1347} 1443}
1348 1444
1349/** 1445/**
1350 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset 1446 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1351 * @adapter: board private structure 1447 * @adapter: board private structure
1352 * 1448 *
1353 * Configure the Tx unit of the MAC after a reset. 1449 * Configure the Tx unit of the MAC after a reset.
1354 **/ 1450 **/
1355static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 1451static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1356{ 1452{
1357 u64 tdba; 1453 u64 tdba, tdwba;
1358 struct ixgbe_hw *hw = &adapter->hw; 1454 struct ixgbe_hw *hw = &adapter->hw;
1359 u32 i, j, tdlen, txctrl; 1455 u32 i, j, tdlen, txctrl;
1360 1456
1361 /* Setup the HW Tx Head and Tail descriptor pointers */ 1457 /* Setup the HW Tx Head and Tail descriptor pointers */
1362 for (i = 0; i < adapter->num_tx_queues; i++) { 1458 for (i = 0; i < adapter->num_tx_queues; i++) {
1363 j = adapter->tx_ring[i].reg_idx; 1459 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1364 tdba = adapter->tx_ring[i].dma; 1460 j = ring->reg_idx;
1365 tdlen = adapter->tx_ring[i].count * 1461 tdba = ring->dma;
1366 sizeof(union ixgbe_adv_tx_desc); 1462 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1367 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 1463 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1368 (tdba & DMA_32BIT_MASK)); 1464 (tdba & DMA_32BIT_MASK));
1369 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 1465 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1466 tdwba = ring->dma +
1467 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1468 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1469 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1470 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
1370 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 1471 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1371 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 1472 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1372 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1473 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
@@ -1375,20 +1476,66 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1375 /* Disable Tx Head Writeback RO bit, since this hoses 1476 /* Disable Tx Head Writeback RO bit, since this hoses
1376 * bookkeeping if things aren't delivered in order. 1477 * bookkeeping if things aren't delivered in order.
1377 */ 1478 */
1378 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1479 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1379 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1480 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1380 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); 1481 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1381 } 1482 }
1382} 1483}
1383 1484
1384#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1485#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1385 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1486
1487static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1488{
1489 struct ixgbe_ring *rx_ring;
1490 u32 srrctl;
1491 int queue0;
1492 unsigned long mask;
1493
1494 /* program one srrctl register per VMDq index */
1495 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1496 long shift, len;
1497 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1498 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1499 shift = find_first_bit(&mask, len);
1500 queue0 = index & mask;
1501 index = (index & mask) >> shift;
1502 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1503 } else {
1504 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1505 queue0 = index & mask;
1506 index = index & mask;
1507 }
1508
1509 rx_ring = &adapter->rx_ring[queue0];
1510
1511 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1512
1513 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1514 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1515
1516 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1517 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1518 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1519 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1520 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1521 IXGBE_SRRCTL_BSIZEHDR_MASK);
1522 } else {
1523 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1524
1525 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1526 srrctl |= IXGBE_RXBUFFER_2048 >>
1527 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1528 else
1529 srrctl |= rx_ring->rx_buf_len >>
1530 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1531 }
1532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1533}
1386 1534
1387#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1388/** 1535/**
1389 * ixgbe_get_skb_hdr - helper function for LRO header processing 1536 * ixgbe_get_skb_hdr - helper function for LRO header processing
1390 * @skb: pointer to sk_buff to be added to LRO packet 1537 * @skb: pointer to sk_buff to be added to LRO packet
1391 * @iphdr: pointer to tcp header structure 1538 * @iphdr: pointer to ip header structure
1392 * @tcph: pointer to tcp header structure 1539 * @tcph: pointer to tcp header structure
1393 * @hdr_flags: pointer to header flags 1540 * @hdr_flags: pointer to header flags
1394 * @priv: private data 1541 * @priv: private data
@@ -1399,8 +1546,8 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1399 union ixgbe_adv_rx_desc *rx_desc = priv; 1546 union ixgbe_adv_rx_desc *rx_desc = priv;
1400 1547
1401 /* Verify that this is a valid IPv4 TCP packet */ 1548 /* Verify that this is a valid IPv4 TCP packet */
1402 if (!(rx_desc->wb.lower.lo_dword.pkt_info & 1549 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1403 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) 1550 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1404 return -1; 1551 return -1;
1405 1552
1406 /* Set network headers */ 1553 /* Set network headers */
@@ -1412,8 +1559,11 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1412 return 0; 1559 return 0;
1413} 1560}
1414 1561
1562#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1563 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1564
1415/** 1565/**
1416 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset 1566 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1417 * @adapter: board private structure 1567 * @adapter: board private structure
1418 * 1568 *
1419 * Configure the Rx unit of the MAC after a reset. 1569 * Configure the Rx unit of the MAC after a reset.
@@ -1426,25 +1576,26 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1426 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1576 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1427 int i, j; 1577 int i, j;
1428 u32 rdlen, rxctrl, rxcsum; 1578 u32 rdlen, rxctrl, rxcsum;
1429 u32 random[10]; 1579 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1580 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1581 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1430 u32 fctrl, hlreg0; 1582 u32 fctrl, hlreg0;
1431 u32 pages; 1583 u32 pages;
1432 u32 reta = 0, mrqc, srrctl; 1584 u32 reta = 0, mrqc;
1585 u32 rdrxctl;
1586 int rx_buf_len;
1433 1587
1434 /* Decide whether to use packet split mode or not */ 1588 /* Decide whether to use packet split mode or not */
1435 if (netdev->mtu > ETH_DATA_LEN) 1589 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1436 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1437 else
1438 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1439 1590
1440 /* Set the RX buffer length according to the mode */ 1591 /* Set the RX buffer length according to the mode */
1441 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1592 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1442 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; 1593 rx_buf_len = IXGBE_RX_HDR_SIZE;
1443 } else { 1594 } else {
1444 if (netdev->mtu <= ETH_DATA_LEN) 1595 if (netdev->mtu <= ETH_DATA_LEN)
1445 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1596 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1446 else 1597 else
1447 adapter->rx_buf_len = ALIGN(max_frame, 1024); 1598 rx_buf_len = ALIGN(max_frame, 1024);
1448 } 1599 }
1449 1600
1450 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1601 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
@@ -1461,28 +1612,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1461 1612
1462 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 1613 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1463 1614
1464 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
1465 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1466 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1467
1468 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1469 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1470 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1471 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1472 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1473 IXGBE_SRRCTL_BSIZEHDR_MASK);
1474 } else {
1475 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1476
1477 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1478 srrctl |=
1479 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1480 else
1481 srrctl |=
1482 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1483 }
1484 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
1485
1486 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1615 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1487 /* disable receives while setting up the descriptors */ 1616 /* disable receives while setting up the descriptors */
1488 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1617 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1492,25 +1621,43 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1492 * the Base and Length of the Rx Descriptor Ring */ 1621 * the Base and Length of the Rx Descriptor Ring */
1493 for (i = 0; i < adapter->num_rx_queues; i++) { 1622 for (i = 0; i < adapter->num_rx_queues; i++) {
1494 rdba = adapter->rx_ring[i].dma; 1623 rdba = adapter->rx_ring[i].dma;
1495 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); 1624 j = adapter->rx_ring[i].reg_idx;
1496 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); 1625 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1497 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); 1626 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1498 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); 1627 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1499 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); 1628 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1500 adapter->rx_ring[i].head = IXGBE_RDH(i); 1629 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1501 adapter->rx_ring[i].tail = IXGBE_RDT(i); 1630 adapter->rx_ring[i].head = IXGBE_RDH(j);
1502 } 1631 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1503 1632 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1504 /* Intitial LRO Settings */ 1633 /* Intitial LRO Settings */
1505 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; 1634 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1506 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; 1635 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1507 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; 1636 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1508 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; 1637 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1509 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1638 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1510 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; 1639 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1511 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; 1640 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1512 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1641 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1513 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1642 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1643
1644 ixgbe_configure_srrctl(adapter, j);
1645 }
1646
1647 /*
1648 * For VMDq support of different descriptor types or
1649 * buffer sizes through the use of multiple SRRCTL
1650 * registers, RDRXCTL.MVMEN must be set to 1
1651 *
1652 * also, the manual doesn't mention it clearly but DCA hints
1653 * will only use queue 0's tags unless this bit is set. Side
1654 * effects of setting this bit are only that SRRCTL must be
1655 * fully programmed [0..15]
1656 */
1657 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1658 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1659 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1660
1514 1661
1515 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1662 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1516 /* Fill out redirection table */ 1663 /* Fill out redirection table */
@@ -1525,22 +1672,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1525 } 1672 }
1526 1673
1527 /* Fill out hash function seeds */ 1674 /* Fill out hash function seeds */
1528 /* XXX use a random constant here to glue certain flows */
1529 get_random_bytes(&random[0], 40);
1530 for (i = 0; i < 10; i++) 1675 for (i = 0; i < 10; i++)
1531 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); 1676 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
1532 1677
1533 mrqc = IXGBE_MRQC_RSSEN 1678 mrqc = IXGBE_MRQC_RSSEN
1534 /* Perform hash on these packet types */ 1679 /* Perform hash on these packet types */
1535 | IXGBE_MRQC_RSS_FIELD_IPV4 1680 | IXGBE_MRQC_RSS_FIELD_IPV4
1536 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 1681 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1537 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 1682 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1538 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 1683 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1539 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 1684 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1540 | IXGBE_MRQC_RSS_FIELD_IPV6 1685 | IXGBE_MRQC_RSS_FIELD_IPV6
1541 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 1686 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1542 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1687 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1543 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1688 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1544 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1689 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1545 } 1690 }
1546 1691
@@ -1562,7 +1707,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1562} 1707}
1563 1708
1564static void ixgbe_vlan_rx_register(struct net_device *netdev, 1709static void ixgbe_vlan_rx_register(struct net_device *netdev,
1565 struct vlan_group *grp) 1710 struct vlan_group *grp)
1566{ 1711{
1567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1712 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1568 u32 ctrl; 1713 u32 ctrl;
@@ -1586,14 +1731,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1586static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1731static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1587{ 1732{
1588 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1733 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1734 struct ixgbe_hw *hw = &adapter->hw;
1589 1735
1590 /* add VID to filter table */ 1736 /* add VID to filter table */
1591 ixgbe_set_vfta(&adapter->hw, vid, 0, true); 1737 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1592} 1738}
1593 1739
1594static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1740static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1595{ 1741{
1596 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1742 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1743 struct ixgbe_hw *hw = &adapter->hw;
1597 1744
1598 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1745 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1599 ixgbe_irq_disable(adapter); 1746 ixgbe_irq_disable(adapter);
@@ -1604,7 +1751,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1604 ixgbe_irq_enable(adapter); 1751 ixgbe_irq_enable(adapter);
1605 1752
1606 /* remove VID from filter table */ 1753 /* remove VID from filter table */
1607 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1754 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1608} 1755}
1609 1756
1610static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1757static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -1621,23 +1768,37 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1621 } 1768 }
1622} 1769}
1623 1770
1771static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1772{
1773 struct dev_mc_list *mc_ptr;
1774 u8 *addr = *mc_addr_ptr;
1775 *vmdq = 0;
1776
1777 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1778 if (mc_ptr->next)
1779 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1780 else
1781 *mc_addr_ptr = NULL;
1782
1783 return addr;
1784}
1785
1624/** 1786/**
1625 * ixgbe_set_multi - Multicast and Promiscuous mode set 1787 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1626 * @netdev: network interface device structure 1788 * @netdev: network interface device structure
1627 * 1789 *
1628 * The set_multi entry point is called whenever the multicast address 1790 * The set_rx_method entry point is called whenever the unicast/multicast
1629 * list or the network interface flags are updated. This routine is 1791 * address list or the network interface flags are updated. This routine is
1630 * responsible for configuring the hardware for proper multicast, 1792 * responsible for configuring the hardware for proper unicast, multicast and
1631 * promiscuous mode, and all-multi behavior. 1793 * promiscuous mode.
1632 **/ 1794 **/
1633static void ixgbe_set_multi(struct net_device *netdev) 1795static void ixgbe_set_rx_mode(struct net_device *netdev)
1634{ 1796{
1635 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1797 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1636 struct ixgbe_hw *hw = &adapter->hw; 1798 struct ixgbe_hw *hw = &adapter->hw;
1637 struct dev_mc_list *mc_ptr;
1638 u8 *mta_list;
1639 u32 fctrl, vlnctrl; 1799 u32 fctrl, vlnctrl;
1640 int i; 1800 u8 *addr_list = NULL;
1801 int addr_count = 0;
1641 1802
1642 /* Check for Promiscuous and All Multicast modes */ 1803 /* Check for Promiscuous and All Multicast modes */
1643 1804
@@ -1645,6 +1806,7 @@ static void ixgbe_set_multi(struct net_device *netdev)
1645 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1806 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1646 1807
1647 if (netdev->flags & IFF_PROMISC) { 1808 if (netdev->flags & IFF_PROMISC) {
1809 hw->addr_ctrl.user_set_promisc = 1;
1648 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1810 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1649 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1811 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1650 } else { 1812 } else {
@@ -1655,33 +1817,25 @@ static void ixgbe_set_multi(struct net_device *netdev)
1655 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1817 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1656 } 1818 }
1657 vlnctrl |= IXGBE_VLNCTRL_VFE; 1819 vlnctrl |= IXGBE_VLNCTRL_VFE;
1820 hw->addr_ctrl.user_set_promisc = 0;
1658 } 1821 }
1659 1822
1660 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1823 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1661 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1824 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1662 1825
1663 if (netdev->mc_count) { 1826 /* reprogram secondary unicast list */
1664 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); 1827 addr_count = netdev->uc_count;
1665 if (!mta_list) 1828 if (addr_count)
1666 return; 1829 addr_list = netdev->uc_list->dmi_addr;
1667 1830 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
1668 /* Shared function expects packed array of only addresses. */ 1831 ixgbe_addr_list_itr);
1669 mc_ptr = netdev->mc_list; 1832
1670 1833 /* reprogram multicast list */
1671 for (i = 0; i < netdev->mc_count; i++) { 1834 addr_count = netdev->mc_count;
1672 if (!mc_ptr) 1835 if (addr_count)
1673 break; 1836 addr_list = netdev->mc_list->dmi_addr;
1674 memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, 1837 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1675 ETH_ALEN); 1838 ixgbe_addr_list_itr);
1676 mc_ptr = mc_ptr->next;
1677 }
1678
1679 ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
1680 kfree(mta_list);
1681 } else {
1682 ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
1683 }
1684
1685} 1839}
1686 1840
1687static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 1841static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -1695,10 +1849,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1695 q_vectors = 1; 1849 q_vectors = 1;
1696 1850
1697 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1851 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1852 struct napi_struct *napi;
1698 q_vector = &adapter->q_vector[q_idx]; 1853 q_vector = &adapter->q_vector[q_idx];
1699 if (!q_vector->rxr_count) 1854 if (!q_vector->rxr_count)
1700 continue; 1855 continue;
1701 napi_enable(&q_vector->napi); 1856 napi = &q_vector->napi;
1857 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1858 (q_vector->rxr_count > 1))
1859 napi->poll = &ixgbe_clean_rxonly_many;
1860
1861 napi_enable(napi);
1702 } 1862 }
1703} 1863}
1704 1864
@@ -1725,7 +1885,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1725 struct net_device *netdev = adapter->netdev; 1885 struct net_device *netdev = adapter->netdev;
1726 int i; 1886 int i;
1727 1887
1728 ixgbe_set_multi(netdev); 1888 ixgbe_set_rx_mode(netdev);
1729 1889
1730 ixgbe_restore_vlan(adapter); 1890 ixgbe_restore_vlan(adapter);
1731 1891
@@ -1733,7 +1893,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1733 ixgbe_configure_rx(adapter); 1893 ixgbe_configure_rx(adapter);
1734 for (i = 0; i < adapter->num_rx_queues; i++) 1894 for (i = 0; i < adapter->num_rx_queues; i++)
1735 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 1895 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1736 (adapter->rx_ring[i].count - 1)); 1896 (adapter->rx_ring[i].count - 1));
1737} 1897}
1738 1898
1739static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1899static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -1751,7 +1911,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1751 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 1911 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1752 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1912 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1753 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1913 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1754 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1914 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1755 } else { 1915 } else {
1756 /* MSI only */ 1916 /* MSI only */
1757 gpie = 0; 1917 gpie = 0;
@@ -1778,6 +1938,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1778 for (i = 0; i < adapter->num_tx_queues; i++) { 1938 for (i = 0; i < adapter->num_tx_queues; i++) {
1779 j = adapter->tx_ring[i].reg_idx; 1939 j = adapter->tx_ring[i].reg_idx;
1780 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 1940 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1941 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1942 txdctl |= (8 << 16);
1781 txdctl |= IXGBE_TXDCTL_ENABLE; 1943 txdctl |= IXGBE_TXDCTL_ENABLE;
1782 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 1944 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1783 } 1945 }
@@ -1812,6 +1974,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1812 1974
1813 /* bring the link up in the watchdog, this could race with our first 1975 /* bring the link up in the watchdog, this could race with our first
1814 * link up interrupt but shouldn't be a problem */ 1976 * link up interrupt but shouldn't be a problem */
1977 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1978 adapter->link_check_timeout = jiffies;
1815 mod_timer(&adapter->watchdog_timer, jiffies); 1979 mod_timer(&adapter->watchdog_timer, jiffies);
1816 return 0; 1980 return 0;
1817} 1981}
@@ -1836,58 +2000,22 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
1836 2000
1837void ixgbe_reset(struct ixgbe_adapter *adapter) 2001void ixgbe_reset(struct ixgbe_adapter *adapter)
1838{ 2002{
1839 if (ixgbe_init_hw(&adapter->hw)) 2003 struct ixgbe_hw *hw = &adapter->hw;
1840 DPRINTK(PROBE, ERR, "Hardware Error\n"); 2004 if (hw->mac.ops.init_hw(hw))
2005 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1841 2006
1842 /* reprogram the RAR[0] in case user changed it. */ 2007 /* reprogram the RAR[0] in case user changed it. */
1843 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2008 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1844 2009
1845} 2010}
1846 2011
1847#ifdef CONFIG_PM
1848static int ixgbe_resume(struct pci_dev *pdev)
1849{
1850 struct net_device *netdev = pci_get_drvdata(pdev);
1851 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1852 u32 err;
1853
1854 pci_set_power_state(pdev, PCI_D0);
1855 pci_restore_state(pdev);
1856 err = pci_enable_device(pdev);
1857 if (err) {
1858 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1859 "suspend\n");
1860 return err;
1861 }
1862 pci_set_master(pdev);
1863
1864 pci_enable_wake(pdev, PCI_D3hot, 0);
1865 pci_enable_wake(pdev, PCI_D3cold, 0);
1866
1867 if (netif_running(netdev)) {
1868 err = ixgbe_request_irq(adapter);
1869 if (err)
1870 return err;
1871 }
1872
1873 ixgbe_reset(adapter);
1874
1875 if (netif_running(netdev))
1876 ixgbe_up(adapter);
1877
1878 netif_device_attach(netdev);
1879
1880 return 0;
1881}
1882#endif
1883
1884/** 2012/**
1885 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 2013 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1886 * @adapter: board private structure 2014 * @adapter: board private structure
1887 * @rx_ring: ring to free buffers from 2015 * @rx_ring: ring to free buffers from
1888 **/ 2016 **/
1889static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 2017static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1890 struct ixgbe_ring *rx_ring) 2018 struct ixgbe_ring *rx_ring)
1891{ 2019{
1892 struct pci_dev *pdev = adapter->pdev; 2020 struct pci_dev *pdev = adapter->pdev;
1893 unsigned long size; 2021 unsigned long size;
@@ -1901,8 +2029,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1901 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 2029 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1902 if (rx_buffer_info->dma) { 2030 if (rx_buffer_info->dma) {
1903 pci_unmap_single(pdev, rx_buffer_info->dma, 2031 pci_unmap_single(pdev, rx_buffer_info->dma,
1904 adapter->rx_buf_len, 2032 rx_ring->rx_buf_len,
1905 PCI_DMA_FROMDEVICE); 2033 PCI_DMA_FROMDEVICE);
1906 rx_buffer_info->dma = 0; 2034 rx_buffer_info->dma = 0;
1907 } 2035 }
1908 if (rx_buffer_info->skb) { 2036 if (rx_buffer_info->skb) {
@@ -1911,12 +2039,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1911 } 2039 }
1912 if (!rx_buffer_info->page) 2040 if (!rx_buffer_info->page)
1913 continue; 2041 continue;
1914 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, 2042 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1915 PCI_DMA_FROMDEVICE); 2043 PCI_DMA_FROMDEVICE);
1916 rx_buffer_info->page_dma = 0; 2044 rx_buffer_info->page_dma = 0;
1917
1918 put_page(rx_buffer_info->page); 2045 put_page(rx_buffer_info->page);
1919 rx_buffer_info->page = NULL; 2046 rx_buffer_info->page = NULL;
2047 rx_buffer_info->page_offset = 0;
1920 } 2048 }
1921 2049
1922 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2050 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -1938,7 +2066,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1938 * @tx_ring: ring to be cleaned 2066 * @tx_ring: ring to be cleaned
1939 **/ 2067 **/
1940static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 2068static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1941 struct ixgbe_ring *tx_ring) 2069 struct ixgbe_ring *tx_ring)
1942{ 2070{
1943 struct ixgbe_tx_buffer *tx_buffer_info; 2071 struct ixgbe_tx_buffer *tx_buffer_info;
1944 unsigned long size; 2072 unsigned long size;
@@ -1991,75 +2119,64 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1991void ixgbe_down(struct ixgbe_adapter *adapter) 2119void ixgbe_down(struct ixgbe_adapter *adapter)
1992{ 2120{
1993 struct net_device *netdev = adapter->netdev; 2121 struct net_device *netdev = adapter->netdev;
2122 struct ixgbe_hw *hw = &adapter->hw;
1994 u32 rxctrl; 2123 u32 rxctrl;
2124 u32 txdctl;
2125 int i, j;
1995 2126
1996 /* signal that we are down to the interrupt handler */ 2127 /* signal that we are down to the interrupt handler */
1997 set_bit(__IXGBE_DOWN, &adapter->state); 2128 set_bit(__IXGBE_DOWN, &adapter->state);
1998 2129
1999 /* disable receives */ 2130 /* disable receives */
2000 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 2131 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2001 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, 2132 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2002 rxctrl & ~IXGBE_RXCTRL_RXEN);
2003 2133
2004 netif_tx_disable(netdev); 2134 netif_tx_disable(netdev);
2005 2135
2006 /* disable transmits in the hardware */ 2136 IXGBE_WRITE_FLUSH(hw);
2007
2008 /* flush both disables */
2009 IXGBE_WRITE_FLUSH(&adapter->hw);
2010 msleep(10); 2137 msleep(10);
2011 2138
2139 netif_tx_stop_all_queues(netdev);
2140
2012 ixgbe_irq_disable(adapter); 2141 ixgbe_irq_disable(adapter);
2013 2142
2014 ixgbe_napi_disable_all(adapter); 2143 ixgbe_napi_disable_all(adapter);
2144
2015 del_timer_sync(&adapter->watchdog_timer); 2145 del_timer_sync(&adapter->watchdog_timer);
2146 cancel_work_sync(&adapter->watchdog_task);
2147
2148 /* disable transmits in the hardware now that interrupts are off */
2149 for (i = 0; i < adapter->num_tx_queues; i++) {
2150 j = adapter->tx_ring[i].reg_idx;
2151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2152 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2153 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2154 }
2016 2155
2017 netif_carrier_off(netdev); 2156 netif_carrier_off(netdev);
2018 netif_tx_stop_all_queues(netdev);
2019 2157
2158#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2160 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2161 dca_remove_requester(&adapter->pdev->dev);
2162 }
2163
2164#endif
2020 if (!pci_channel_offline(adapter->pdev)) 2165 if (!pci_channel_offline(adapter->pdev))
2021 ixgbe_reset(adapter); 2166 ixgbe_reset(adapter);
2022 ixgbe_clean_all_tx_rings(adapter); 2167 ixgbe_clean_all_tx_rings(adapter);
2023 ixgbe_clean_all_rx_rings(adapter); 2168 ixgbe_clean_all_rx_rings(adapter);
2024 2169
2025} 2170#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2026 2171 /* since we reset the hardware DCA settings were cleared */
2027static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 2172 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2028{ 2173 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2029 struct net_device *netdev = pci_get_drvdata(pdev); 2174 /* always use CB2 mode, difference is masked
2030 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2175 * in the CB driver */
2031#ifdef CONFIG_PM 2176 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
2032 int retval = 0; 2177 ixgbe_setup_dca(adapter);
2033#endif
2034
2035 netif_device_detach(netdev);
2036
2037 if (netif_running(netdev)) {
2038 ixgbe_down(adapter);
2039 ixgbe_free_irq(adapter);
2040 } 2178 }
2041
2042#ifdef CONFIG_PM
2043 retval = pci_save_state(pdev);
2044 if (retval)
2045 return retval;
2046#endif 2179#endif
2047
2048 pci_enable_wake(pdev, PCI_D3hot, 0);
2049 pci_enable_wake(pdev, PCI_D3cold, 0);
2050
2051 ixgbe_release_hw_control(adapter);
2052
2053 pci_disable_device(pdev);
2054
2055 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2056
2057 return 0;
2058}
2059
2060static void ixgbe_shutdown(struct pci_dev *pdev)
2061{
2062 ixgbe_suspend(pdev, PMSG_SUSPEND);
2063} 2180}
2064 2181
2065/** 2182/**
@@ -2072,11 +2189,11 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
2072static int ixgbe_poll(struct napi_struct *napi, int budget) 2189static int ixgbe_poll(struct napi_struct *napi, int budget)
2073{ 2190{
2074 struct ixgbe_q_vector *q_vector = container_of(napi, 2191 struct ixgbe_q_vector *q_vector = container_of(napi,
2075 struct ixgbe_q_vector, napi); 2192 struct ixgbe_q_vector, napi);
2076 struct ixgbe_adapter *adapter = q_vector->adapter; 2193 struct ixgbe_adapter *adapter = q_vector->adapter;
2077 int tx_cleaned = 0, work_done = 0; 2194 int tx_cleaned, work_done = 0;
2078 2195
2079#ifdef CONFIG_DCA 2196#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2080 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2081 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2198 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2082 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 2199 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
@@ -2092,12 +2209,11 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2092 /* If budget not fully consumed, exit the polling mode */ 2209 /* If budget not fully consumed, exit the polling mode */
2093 if (work_done < budget) { 2210 if (work_done < budget) {
2094 netif_rx_complete(adapter->netdev, napi); 2211 netif_rx_complete(adapter->netdev, napi);
2095 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 2212 if (adapter->itr_setting & 3)
2096 ixgbe_set_itr(adapter); 2213 ixgbe_set_itr(adapter);
2097 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2214 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2098 ixgbe_irq_enable(adapter); 2215 ixgbe_irq_enable(adapter);
2099 } 2216 }
2100
2101 return work_done; 2217 return work_done;
2102} 2218}
2103 2219
@@ -2123,8 +2239,48 @@ static void ixgbe_reset_task(struct work_struct *work)
2123 ixgbe_reinit_locked(adapter); 2239 ixgbe_reinit_locked(adapter);
2124} 2240}
2125 2241
2242static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2243{
2244 int nrq = 1, ntq = 1;
2245 int feature_mask = 0, rss_i, rss_m;
2246
2247 /* Number of supported queues */
2248 switch (adapter->hw.mac.type) {
2249 case ixgbe_mac_82598EB:
2250 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2251 rss_m = 0;
2252 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2253
2254 switch (adapter->flags & feature_mask) {
2255 case (IXGBE_FLAG_RSS_ENABLED):
2256 rss_m = 0xF;
2257 nrq = rss_i;
2258 ntq = rss_i;
2259 break;
2260 case 0:
2261 default:
2262 rss_i = 0;
2263 rss_m = 0;
2264 nrq = 1;
2265 ntq = 1;
2266 break;
2267 }
2268
2269 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2270 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2271 break;
2272 default:
2273 nrq = 1;
2274 ntq = 1;
2275 break;
2276 }
2277
2278 adapter->num_rx_queues = nrq;
2279 adapter->num_tx_queues = ntq;
2280}
2281
2126static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2282static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2127 int vectors) 2283 int vectors)
2128{ 2284{
2129 int err, vector_threshold; 2285 int err, vector_threshold;
2130 2286
@@ -2143,7 +2299,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2143 */ 2299 */
2144 while (vectors >= vector_threshold) { 2300 while (vectors >= vector_threshold) {
2145 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2301 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2146 vectors); 2302 vectors);
2147 if (!err) /* Success in acquiring all requested vectors. */ 2303 if (!err) /* Success in acquiring all requested vectors. */
2148 break; 2304 break;
2149 else if (err < 0) 2305 else if (err < 0)
@@ -2162,54 +2318,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2162 kfree(adapter->msix_entries); 2318 kfree(adapter->msix_entries);
2163 adapter->msix_entries = NULL; 2319 adapter->msix_entries = NULL;
2164 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2320 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2165 adapter->num_tx_queues = 1; 2321 ixgbe_set_num_queues(adapter);
2166 adapter->num_rx_queues = 1;
2167 } else { 2322 } else {
2168 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2323 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2169 adapter->num_msix_vectors = vectors; 2324 adapter->num_msix_vectors = vectors;
2170 } 2325 }
2171} 2326}
2172 2327
2173static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2174{
2175 int nrq, ntq;
2176 int feature_mask = 0, rss_i, rss_m;
2177
2178 /* Number of supported queues */
2179 switch (adapter->hw.mac.type) {
2180 case ixgbe_mac_82598EB:
2181 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2182 rss_m = 0;
2183 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2184
2185 switch (adapter->flags & feature_mask) {
2186 case (IXGBE_FLAG_RSS_ENABLED):
2187 rss_m = 0xF;
2188 nrq = rss_i;
2189 ntq = rss_i;
2190 break;
2191 case 0:
2192 default:
2193 rss_i = 0;
2194 rss_m = 0;
2195 nrq = 1;
2196 ntq = 1;
2197 break;
2198 }
2199
2200 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2201 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2202 break;
2203 default:
2204 nrq = 1;
2205 ntq = 1;
2206 break;
2207 }
2208
2209 adapter->num_rx_queues = nrq;
2210 adapter->num_tx_queues = ntq;
2211}
2212
2213/** 2328/**
2214 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2329 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2215 * @adapter: board private structure to initialize 2330 * @adapter: board private structure to initialize
@@ -2219,9 +2334,6 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2219 **/ 2334 **/
2220static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2335static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2221{ 2336{
2222 /* TODO: Remove all uses of the indices in the cases where multiple
2223 * features are OR'd together, if the feature set makes sense.
2224 */
2225 int feature_mask = 0, rss_i; 2337 int feature_mask = 0, rss_i;
2226 int i, txr_idx, rxr_idx; 2338 int i, txr_idx, rxr_idx;
2227 2339
@@ -2262,21 +2374,22 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2262 int i; 2374 int i;
2263 2375
2264 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2376 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2265 sizeof(struct ixgbe_ring), GFP_KERNEL); 2377 sizeof(struct ixgbe_ring), GFP_KERNEL);
2266 if (!adapter->tx_ring) 2378 if (!adapter->tx_ring)
2267 goto err_tx_ring_allocation; 2379 goto err_tx_ring_allocation;
2268 2380
2269 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2381 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2270 sizeof(struct ixgbe_ring), GFP_KERNEL); 2382 sizeof(struct ixgbe_ring), GFP_KERNEL);
2271 if (!adapter->rx_ring) 2383 if (!adapter->rx_ring)
2272 goto err_rx_ring_allocation; 2384 goto err_rx_ring_allocation;
2273 2385
2274 for (i = 0; i < adapter->num_tx_queues; i++) { 2386 for (i = 0; i < adapter->num_tx_queues; i++) {
2275 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; 2387 adapter->tx_ring[i].count = adapter->tx_ring_count;
2276 adapter->tx_ring[i].queue_index = i; 2388 adapter->tx_ring[i].queue_index = i;
2277 } 2389 }
2390
2278 for (i = 0; i < adapter->num_rx_queues; i++) { 2391 for (i = 0; i < adapter->num_rx_queues; i++) {
2279 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2392 adapter->rx_ring[i].count = adapter->rx_ring_count;
2280 adapter->rx_ring[i].queue_index = i; 2393 adapter->rx_ring[i].queue_index = i;
2281 } 2394 }
2282 2395
@@ -2298,7 +2411,7 @@ err_tx_ring_allocation:
2298 * capabilities of the hardware and the kernel. 2411 * capabilities of the hardware and the kernel.
2299 **/ 2412 **/
2300static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2413static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2301 *adapter) 2414 *adapter)
2302{ 2415{
2303 int err = 0; 2416 int err = 0;
2304 int vector, v_budget; 2417 int vector, v_budget;
@@ -2310,7 +2423,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2310 * (roughly) twice the number of vectors as there are CPU's. 2423 * (roughly) twice the number of vectors as there are CPU's.
2311 */ 2424 */
2312 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2425 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2313 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2426 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2314 2427
2315 /* 2428 /*
2316 * At the same time, hardware can only support a maximum of 2429 * At the same time, hardware can only support a maximum of
@@ -2324,7 +2437,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2324 /* A failure in MSI-X entry allocation isn't fatal, but it does 2437 /* A failure in MSI-X entry allocation isn't fatal, but it does
2325 * mean we disable MSI-X capabilities of the adapter. */ 2438 * mean we disable MSI-X capabilities of the adapter. */
2326 adapter->msix_entries = kcalloc(v_budget, 2439 adapter->msix_entries = kcalloc(v_budget,
2327 sizeof(struct msix_entry), GFP_KERNEL); 2440 sizeof(struct msix_entry), GFP_KERNEL);
2328 if (!adapter->msix_entries) { 2441 if (!adapter->msix_entries) {
2329 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2442 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2330 ixgbe_set_num_queues(adapter); 2443 ixgbe_set_num_queues(adapter);
@@ -2333,7 +2446,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2333 err = ixgbe_alloc_queues(adapter); 2446 err = ixgbe_alloc_queues(adapter);
2334 if (err) { 2447 if (err) {
2335 DPRINTK(PROBE, ERR, "Unable to allocate memory " 2448 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2336 "for queues\n"); 2449 "for queues\n");
2337 goto out; 2450 goto out;
2338 } 2451 }
2339 2452
@@ -2354,7 +2467,7 @@ try_msi:
2354 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 2467 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2355 } else { 2468 } else {
2356 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 2469 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2357 "falling back to legacy. Error: %d\n", err); 2470 "falling back to legacy. Error: %d\n", err);
2358 /* reset err */ 2471 /* reset err */
2359 err = 0; 2472 err = 0;
2360 } 2473 }
@@ -2410,9 +2523,9 @@ static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2410 } 2523 }
2411 2524
2412 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 2525 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2413 "Tx Queue count = %u\n", 2526 "Tx Queue count = %u\n",
2414 (adapter->num_rx_queues > 1) ? "Enabled" : 2527 (adapter->num_rx_queues > 1) ? "Enabled" :
2415 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2528 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2416 2529
2417 set_bit(__IXGBE_DOWN, &adapter->state); 2530 set_bit(__IXGBE_DOWN, &adapter->state);
2418 2531
@@ -2439,33 +2552,44 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2439 struct pci_dev *pdev = adapter->pdev; 2552 struct pci_dev *pdev = adapter->pdev;
2440 unsigned int rss; 2553 unsigned int rss;
2441 2554
2555 /* PCI config space info */
2556
2557 hw->vendor_id = pdev->vendor;
2558 hw->device_id = pdev->device;
2559 hw->revision_id = pdev->revision;
2560 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2561 hw->subsystem_device_id = pdev->subsystem_device;
2562
2442 /* Set capability flags */ 2563 /* Set capability flags */
2443 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2564 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2444 adapter->ring_feature[RING_F_RSS].indices = rss; 2565 adapter->ring_feature[RING_F_RSS].indices = rss;
2445 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2566 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2446 2567
2447 /* Enable Dynamic interrupt throttling by default */
2448 adapter->rx_eitr = 1;
2449 adapter->tx_eitr = 1;
2450
2451 /* default flow control settings */ 2568 /* default flow control settings */
2452 hw->fc.original_type = ixgbe_fc_full; 2569 hw->fc.original_type = ixgbe_fc_none;
2453 hw->fc.type = ixgbe_fc_full; 2570 hw->fc.type = ixgbe_fc_none;
2571 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2572 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2573 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2574 hw->fc.send_xon = true;
2454 2575
2455 /* select 10G link by default */ 2576 /* select 10G link by default */
2456 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2577 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2457 if (hw->mac.ops.reset(hw)) { 2578
2458 dev_err(&pdev->dev, "HW Init failed\n"); 2579 /* enable itr by default in dynamic mode */
2459 return -EIO; 2580 adapter->itr_setting = 1;
2460 } 2581 adapter->eitr_param = 20000;
2461 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, 2582
2462 false)) { 2583 /* set defaults for eitr in MegaBytes */
2463 dev_err(&pdev->dev, "Link Speed setup failed\n"); 2584 adapter->eitr_low = 10;
2464 return -EIO; 2585 adapter->eitr_high = 20;
2465 } 2586
2587 /* set default ring sizes */
2588 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2589 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2466 2590
2467 /* initialize eeprom parameters */ 2591 /* initialize eeprom parameters */
2468 if (ixgbe_init_eeprom(hw)) { 2592 if (ixgbe_init_eeprom_params_generic(hw)) {
2469 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 2593 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2470 return -EIO; 2594 return -EIO;
2471 } 2595 }
@@ -2481,105 +2605,157 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2481/** 2605/**
2482 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 2606 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2483 * @adapter: board private structure 2607 * @adapter: board private structure
2484 * @txdr: tx descriptor ring (for a specific queue) to setup 2608 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2485 * 2609 *
2486 * Return 0 on success, negative on failure 2610 * Return 0 on success, negative on failure
2487 **/ 2611 **/
2488int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 2612int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2489 struct ixgbe_ring *txdr) 2613 struct ixgbe_ring *tx_ring)
2490{ 2614{
2491 struct pci_dev *pdev = adapter->pdev; 2615 struct pci_dev *pdev = adapter->pdev;
2492 int size; 2616 int size;
2493 2617
2494 size = sizeof(struct ixgbe_tx_buffer) * txdr->count; 2618 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2495 txdr->tx_buffer_info = vmalloc(size); 2619 tx_ring->tx_buffer_info = vmalloc(size);
2496 if (!txdr->tx_buffer_info) { 2620 if (!tx_ring->tx_buffer_info)
2497 DPRINTK(PROBE, ERR, 2621 goto err;
2498 "Unable to allocate memory for the transmit descriptor ring\n"); 2622 memset(tx_ring->tx_buffer_info, 0, size);
2499 return -ENOMEM;
2500 }
2501 memset(txdr->tx_buffer_info, 0, size);
2502 2623
2503 /* round up to nearest 4K */ 2624 /* round up to nearest 4K */
2504 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); 2625 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2505 txdr->size = ALIGN(txdr->size, 4096); 2626 sizeof(u32);
2506 2627 tx_ring->size = ALIGN(tx_ring->size, 4096);
2507 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2508 if (!txdr->desc) {
2509 vfree(txdr->tx_buffer_info);
2510 DPRINTK(PROBE, ERR,
2511 "Memory allocation failed for the tx desc ring\n");
2512 return -ENOMEM;
2513 }
2514 2628
2515 txdr->next_to_use = 0; 2629 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2516 txdr->next_to_clean = 0; 2630 &tx_ring->dma);
2517 txdr->work_limit = txdr->count; 2631 if (!tx_ring->desc)
2632 goto err;
2518 2633
2634 tx_ring->next_to_use = 0;
2635 tx_ring->next_to_clean = 0;
2636 tx_ring->work_limit = tx_ring->count;
2519 return 0; 2637 return 0;
2638
2639err:
2640 vfree(tx_ring->tx_buffer_info);
2641 tx_ring->tx_buffer_info = NULL;
2642 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2643 "descriptor ring\n");
2644 return -ENOMEM;
2645}
2646
2647/**
2648 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2649 * @adapter: board private structure
2650 *
2651 * If this function returns with an error, then it's possible one or
2652 * more of the rings is populated (while the rest are not). It is the
2653 * callers duty to clean those orphaned rings.
2654 *
2655 * Return 0 on success, negative on failure
2656 **/
2657static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2658{
2659 int i, err = 0;
2660
2661 for (i = 0; i < adapter->num_tx_queues; i++) {
2662 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2663 if (!err)
2664 continue;
2665 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
2666 break;
2667 }
2668
2669 return err;
2520} 2670}
2521 2671
2522/** 2672/**
2523 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2673 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2524 * @adapter: board private structure 2674 * @adapter: board private structure
2525 * @rxdr: rx descriptor ring (for a specific queue) to setup 2675 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2526 * 2676 *
2527 * Returns 0 on success, negative on failure 2677 * Returns 0 on success, negative on failure
2528 **/ 2678 **/
2529int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2679int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2530 struct ixgbe_ring *rxdr) 2680 struct ixgbe_ring *rx_ring)
2531{ 2681{
2532 struct pci_dev *pdev = adapter->pdev; 2682 struct pci_dev *pdev = adapter->pdev;
2533 int size; 2683 int size;
2534 2684
2535 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; 2685 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2536 rxdr->lro_mgr.lro_arr = vmalloc(size); 2686 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2537 if (!rxdr->lro_mgr.lro_arr) 2687 if (!rx_ring->lro_mgr.lro_arr)
2538 return -ENOMEM; 2688 return -ENOMEM;
2539 memset(rxdr->lro_mgr.lro_arr, 0, size); 2689 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2540 2690
2541 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2691 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2542 rxdr->rx_buffer_info = vmalloc(size); 2692 rx_ring->rx_buffer_info = vmalloc(size);
2543 if (!rxdr->rx_buffer_info) { 2693 if (!rx_ring->rx_buffer_info) {
2544 DPRINTK(PROBE, ERR, 2694 DPRINTK(PROBE, ERR,
2545 "vmalloc allocation failed for the rx desc ring\n"); 2695 "vmalloc allocation failed for the rx desc ring\n");
2546 goto alloc_failed; 2696 goto alloc_failed;
2547 } 2697 }
2548 memset(rxdr->rx_buffer_info, 0, size); 2698 memset(rx_ring->rx_buffer_info, 0, size);
2549 2699
2550 /* Round up to nearest 4K */ 2700 /* Round up to nearest 4K */
2551 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); 2701 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2552 rxdr->size = ALIGN(rxdr->size, 4096); 2702 rx_ring->size = ALIGN(rx_ring->size, 4096);
2553 2703
2554 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2704 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
2555 2705
2556 if (!rxdr->desc) { 2706 if (!rx_ring->desc) {
2557 DPRINTK(PROBE, ERR, 2707 DPRINTK(PROBE, ERR,
2558 "Memory allocation failed for the rx desc ring\n"); 2708 "Memory allocation failed for the rx desc ring\n");
2559 vfree(rxdr->rx_buffer_info); 2709 vfree(rx_ring->rx_buffer_info);
2560 goto alloc_failed; 2710 goto alloc_failed;
2561 } 2711 }
2562 2712
2563 rxdr->next_to_clean = 0; 2713 rx_ring->next_to_clean = 0;
2564 rxdr->next_to_use = 0; 2714 rx_ring->next_to_use = 0;
2565 2715
2566 return 0; 2716 return 0;
2567 2717
2568alloc_failed: 2718alloc_failed:
2569 vfree(rxdr->lro_mgr.lro_arr); 2719 vfree(rx_ring->lro_mgr.lro_arr);
2570 rxdr->lro_mgr.lro_arr = NULL; 2720 rx_ring->lro_mgr.lro_arr = NULL;
2571 return -ENOMEM; 2721 return -ENOMEM;
2572} 2722}
2573 2723
2574/** 2724/**
2725 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2726 * @adapter: board private structure
2727 *
2728 * If this function returns with an error, then it's possible one or
2729 * more of the rings is populated (while the rest are not). It is the
2730 * callers duty to clean those orphaned rings.
2731 *
2732 * Return 0 on success, negative on failure
2733 **/
2734
2735static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2736{
2737 int i, err = 0;
2738
2739 for (i = 0; i < adapter->num_rx_queues; i++) {
2740 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2741 if (!err)
2742 continue;
2743 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
2744 break;
2745 }
2746
2747 return err;
2748}
2749
2750/**
2575 * ixgbe_free_tx_resources - Free Tx Resources per Queue 2751 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2576 * @adapter: board private structure 2752 * @adapter: board private structure
2577 * @tx_ring: Tx descriptor ring for a specific queue 2753 * @tx_ring: Tx descriptor ring for a specific queue
2578 * 2754 *
2579 * Free all transmit software resources 2755 * Free all transmit software resources
2580 **/ 2756 **/
2581static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2757void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2582 struct ixgbe_ring *tx_ring) 2758 struct ixgbe_ring *tx_ring)
2583{ 2759{
2584 struct pci_dev *pdev = adapter->pdev; 2760 struct pci_dev *pdev = adapter->pdev;
2585 2761
@@ -2614,8 +2790,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2614 * 2790 *
2615 * Free all receive software resources 2791 * Free all receive software resources
2616 **/ 2792 **/
2617static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2793void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2618 struct ixgbe_ring *rx_ring) 2794 struct ixgbe_ring *rx_ring)
2619{ 2795{
2620 struct pci_dev *pdev = adapter->pdev; 2796 struct pci_dev *pdev = adapter->pdev;
2621 2797
@@ -2647,59 +2823,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2647} 2823}
2648 2824
2649/** 2825/**
2650 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2651 * @adapter: board private structure
2652 *
2653 * If this function returns with an error, then it's possible one or
2654 * more of the rings is populated (while the rest are not). It is the
2655 * callers duty to clean those orphaned rings.
2656 *
2657 * Return 0 on success, negative on failure
2658 **/
2659static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2660{
2661 int i, err = 0;
2662
2663 for (i = 0; i < adapter->num_tx_queues; i++) {
2664 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2665 if (err) {
2666 DPRINTK(PROBE, ERR,
2667 "Allocation for Tx Queue %u failed\n", i);
2668 break;
2669 }
2670 }
2671
2672 return err;
2673}
2674
2675/**
2676 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2677 * @adapter: board private structure
2678 *
2679 * If this function returns with an error, then it's possible one or
2680 * more of the rings is populated (while the rest are not). It is the
2681 * callers duty to clean those orphaned rings.
2682 *
2683 * Return 0 on success, negative on failure
2684 **/
2685
2686static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2687{
2688 int i, err = 0;
2689
2690 for (i = 0; i < adapter->num_rx_queues; i++) {
2691 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2692 if (err) {
2693 DPRINTK(PROBE, ERR,
2694 "Allocation for Rx Queue %u failed\n", i);
2695 break;
2696 }
2697 }
2698
2699 return err;
2700}
2701
2702/**
2703 * ixgbe_change_mtu - Change the Maximum Transfer Unit 2826 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2704 * @netdev: network interface device structure 2827 * @netdev: network interface device structure
2705 * @new_mtu: new value for maximum frame size 2828 * @new_mtu: new value for maximum frame size
@@ -2711,12 +2834,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2711 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2712 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2835 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2713 2836
2714 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || 2837 /* MTU < 68 is an error and causes problems on some kernels */
2715 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2838 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2716 return -EINVAL; 2839 return -EINVAL;
2717 2840
2718 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 2841 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2719 netdev->mtu, new_mtu); 2842 netdev->mtu, new_mtu);
2720 /* must set new MTU before calling down or up */ 2843 /* must set new MTU before calling down or up */
2721 netdev->mtu = new_mtu; 2844 netdev->mtu = new_mtu;
2722 2845
@@ -2811,6 +2934,135 @@ static int ixgbe_close(struct net_device *netdev)
2811} 2934}
2812 2935
2813/** 2936/**
2937 * ixgbe_napi_add_all - prep napi structs for use
2938 * @adapter: private struct
2939 * helper function to napi_add each possible q_vector->napi
2940 */
2941static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2942{
2943 int q_idx, q_vectors;
2944 int (*poll)(struct napi_struct *, int);
2945
2946 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2947 poll = &ixgbe_clean_rxonly;
2948 /* Only enable as many vectors as we have rx queues. */
2949 q_vectors = adapter->num_rx_queues;
2950 } else {
2951 poll = &ixgbe_poll;
2952 /* only one q_vector for legacy modes */
2953 q_vectors = 1;
2954 }
2955
2956 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2957 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2958 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
2959 }
2960}
2961
2962static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2963{
2964 int q_idx;
2965 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2966
2967 /* legacy and MSI only use one vector */
2968 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2969 q_vectors = 1;
2970
2971 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2972 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2973 if (!q_vector->rxr_count)
2974 continue;
2975 netif_napi_del(&q_vector->napi);
2976 }
2977}
2978
2979#ifdef CONFIG_PM
2980static int ixgbe_resume(struct pci_dev *pdev)
2981{
2982 struct net_device *netdev = pci_get_drvdata(pdev);
2983 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2984 u32 err;
2985
2986 pci_set_power_state(pdev, PCI_D0);
2987 pci_restore_state(pdev);
2988 err = pci_enable_device(pdev);
2989 if (err) {
2990 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
2991 "suspend\n");
2992 return err;
2993 }
2994 pci_set_master(pdev);
2995
2996 pci_enable_wake(pdev, PCI_D3hot, 0);
2997 pci_enable_wake(pdev, PCI_D3cold, 0);
2998
2999 err = ixgbe_init_interrupt_scheme(adapter);
3000 if (err) {
3001 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
3002 "device\n");
3003 return err;
3004 }
3005
3006 ixgbe_napi_add_all(adapter);
3007 ixgbe_reset(adapter);
3008
3009 if (netif_running(netdev)) {
3010 err = ixgbe_open(adapter->netdev);
3011 if (err)
3012 return err;
3013 }
3014
3015 netif_device_attach(netdev);
3016
3017 return 0;
3018}
3019
3020#endif /* CONFIG_PM */
3021static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023 struct net_device *netdev = pci_get_drvdata(pdev);
3024 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3025#ifdef CONFIG_PM
3026 int retval = 0;
3027#endif
3028
3029 netif_device_detach(netdev);
3030
3031 if (netif_running(netdev)) {
3032 ixgbe_down(adapter);
3033 ixgbe_free_irq(adapter);
3034 ixgbe_free_all_tx_resources(adapter);
3035 ixgbe_free_all_rx_resources(adapter);
3036 }
3037 ixgbe_reset_interrupt_capability(adapter);
3038 ixgbe_napi_del_all(adapter);
3039 kfree(adapter->tx_ring);
3040 kfree(adapter->rx_ring);
3041
3042#ifdef CONFIG_PM
3043 retval = pci_save_state(pdev);
3044 if (retval)
3045 return retval;
3046#endif
3047
3048 pci_enable_wake(pdev, PCI_D3hot, 0);
3049 pci_enable_wake(pdev, PCI_D3cold, 0);
3050
3051 ixgbe_release_hw_control(adapter);
3052
3053 pci_disable_device(pdev);
3054
3055 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3056
3057 return 0;
3058}
3059
3060static void ixgbe_shutdown(struct pci_dev *pdev)
3061{
3062 ixgbe_suspend(pdev, PMSG_SUSPEND);
3063}
3064
3065/**
2814 * ixgbe_update_stats - Update the board statistics counters. 3066 * ixgbe_update_stats - Update the board statistics counters.
2815 * @adapter: board private structure 3067 * @adapter: board private structure
2816 **/ 3068 **/
@@ -2883,7 +3135,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2883 3135
2884 /* Rx Errors */ 3136 /* Rx Errors */
2885 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 3137 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2886 adapter->stats.rlec; 3138 adapter->stats.rlec;
2887 adapter->net_stats.rx_dropped = 0; 3139 adapter->net_stats.rx_dropped = 0;
2888 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3140 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2889 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3141 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -2897,27 +3149,74 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2897static void ixgbe_watchdog(unsigned long data) 3149static void ixgbe_watchdog(unsigned long data)
2898{ 3150{
2899 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 3151 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2900 struct net_device *netdev = adapter->netdev; 3152 struct ixgbe_hw *hw = &adapter->hw;
2901 bool link_up; 3153
2902 u32 link_speed = 0; 3154 /* Do the watchdog outside of interrupt context due to the lovely
3155 * delays that some of the newer hardware requires */
3156 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3157 /* Cause software interrupt to ensure rx rings are cleaned */
3158 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3159 u32 eics =
3160 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3161 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3162 } else {
3163 /* For legacy and MSI interrupts don't set any bits that
3164 * are enabled for EIAM, because this operation would
3165 * set *both* EIMS and EICS for any bit in EIAM */
3166 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3167 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3168 }
3169 /* Reset the timer */
3170 mod_timer(&adapter->watchdog_timer,
3171 round_jiffies(jiffies + 2 * HZ));
3172 }
2903 3173
2904 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 3174 schedule_work(&adapter->watchdog_task);
3175}
3176
3177/**
3178 * ixgbe_watchdog_task - worker thread to bring link up
3179 * @work: pointer to work_struct containing our data
3180 **/
3181static void ixgbe_watchdog_task(struct work_struct *work)
3182{
3183 struct ixgbe_adapter *adapter = container_of(work,
3184 struct ixgbe_adapter,
3185 watchdog_task);
3186 struct net_device *netdev = adapter->netdev;
3187 struct ixgbe_hw *hw = &adapter->hw;
3188 u32 link_speed = adapter->link_speed;
3189 bool link_up = adapter->link_up;
3190
3191 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3192
3193 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3194 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3195 if (link_up ||
3196 time_after(jiffies, (adapter->link_check_timeout +
3197 IXGBE_TRY_LINK_TIMEOUT))) {
3198 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3199 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3200 }
3201 adapter->link_up = link_up;
3202 adapter->link_speed = link_speed;
3203 }
2905 3204
2906 if (link_up) { 3205 if (link_up) {
2907 if (!netif_carrier_ok(netdev)) { 3206 if (!netif_carrier_ok(netdev)) {
2908 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3207 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2909 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); 3208 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
2910#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3209#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2911#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3210#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2912 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 3211 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2913 "Flow Control: %s\n", 3212 "Flow Control: %s\n",
2914 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3213 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2915 "10 Gbps" : 3214 "10 Gbps" :
2916 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3215 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2917 "1 Gbps" : "unknown speed")), 3216 "1 Gbps" : "unknown speed")),
2918 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3217 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2919 (FLOW_RX ? "RX" : 3218 (FLOW_RX ? "RX" :
2920 (FLOW_TX ? "TX" : "None")))); 3219 (FLOW_TX ? "TX" : "None"))));
2921 3220
2922 netif_carrier_on(netdev); 3221 netif_carrier_on(netdev);
2923 netif_tx_wake_all_queues(netdev); 3222 netif_tx_wake_all_queues(netdev);
@@ -2926,6 +3225,8 @@ static void ixgbe_watchdog(unsigned long data)
2926 adapter->detect_tx_hung = true; 3225 adapter->detect_tx_hung = true;
2927 } 3226 }
2928 } else { 3227 } else {
3228 adapter->link_up = false;
3229 adapter->link_speed = 0;
2929 if (netif_carrier_ok(netdev)) { 3230 if (netif_carrier_ok(netdev)) {
2930 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3231 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2931 netif_carrier_off(netdev); 3232 netif_carrier_off(netdev);
@@ -2934,36 +3235,19 @@ static void ixgbe_watchdog(unsigned long data)
2934 } 3235 }
2935 3236
2936 ixgbe_update_stats(adapter); 3237 ixgbe_update_stats(adapter);
2937 3238 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2938 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2939 /* Cause software interrupt to ensure rx rings are cleaned */
2940 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2941 u32 eics =
2942 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2944 } else {
2945 /* for legacy and MSI interrupts don't set any bits that
2946 * are enabled for EIAM, because this operation would
2947 * set *both* EIMS and EICS for any bit in EIAM */
2948 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2949 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2950 }
2951 /* Reset the timer */
2952 mod_timer(&adapter->watchdog_timer,
2953 round_jiffies(jiffies + 2 * HZ));
2954 }
2955} 3239}
2956 3240
2957static int ixgbe_tso(struct ixgbe_adapter *adapter, 3241static int ixgbe_tso(struct ixgbe_adapter *adapter,
2958 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 3242 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2959 u32 tx_flags, u8 *hdr_len) 3243 u32 tx_flags, u8 *hdr_len)
2960{ 3244{
2961 struct ixgbe_adv_tx_context_desc *context_desc; 3245 struct ixgbe_adv_tx_context_desc *context_desc;
2962 unsigned int i; 3246 unsigned int i;
2963 int err; 3247 int err;
2964 struct ixgbe_tx_buffer *tx_buffer_info; 3248 struct ixgbe_tx_buffer *tx_buffer_info;
2965 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 3249 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2966 u32 mss_l4len_idx = 0, l4len; 3250 u32 mss_l4len_idx, l4len;
2967 3251
2968 if (skb_is_gso(skb)) { 3252 if (skb_is_gso(skb)) {
2969 if (skb_header_cloned(skb)) { 3253 if (skb_header_cloned(skb)) {
@@ -2979,16 +3263,16 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
2979 iph->tot_len = 0; 3263 iph->tot_len = 0;
2980 iph->check = 0; 3264 iph->check = 0;
2981 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 3265 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2982 iph->daddr, 0, 3266 iph->daddr, 0,
2983 IPPROTO_TCP, 3267 IPPROTO_TCP,
2984 0); 3268 0);
2985 adapter->hw_tso_ctxt++; 3269 adapter->hw_tso_ctxt++;
2986 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3270 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2987 ipv6_hdr(skb)->payload_len = 0; 3271 ipv6_hdr(skb)->payload_len = 0;
2988 tcp_hdr(skb)->check = 3272 tcp_hdr(skb)->check =
2989 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3273 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2990 &ipv6_hdr(skb)->daddr, 3274 &ipv6_hdr(skb)->daddr,
2991 0, IPPROTO_TCP, 0); 3275 0, IPPROTO_TCP, 0);
2992 adapter->hw_tso6_ctxt++; 3276 adapter->hw_tso6_ctxt++;
2993 } 3277 }
2994 3278
@@ -3002,7 +3286,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3002 vlan_macip_lens |= 3286 vlan_macip_lens |=
3003 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3287 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3004 vlan_macip_lens |= ((skb_network_offset(skb)) << 3288 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3005 IXGBE_ADVTXD_MACLEN_SHIFT); 3289 IXGBE_ADVTXD_MACLEN_SHIFT);
3006 *hdr_len += skb_network_offset(skb); 3290 *hdr_len += skb_network_offset(skb);
3007 vlan_macip_lens |= 3291 vlan_macip_lens |=
3008 (skb_transport_header(skb) - skb_network_header(skb)); 3292 (skb_transport_header(skb) - skb_network_header(skb));
@@ -3012,8 +3296,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3012 context_desc->seqnum_seed = 0; 3296 context_desc->seqnum_seed = 0;
3013 3297
3014 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3298 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3015 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3299 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
3016 IXGBE_ADVTXD_DTYP_CTXT); 3300 IXGBE_ADVTXD_DTYP_CTXT);
3017 3301
3018 if (skb->protocol == htons(ETH_P_IP)) 3302 if (skb->protocol == htons(ETH_P_IP))
3019 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3303 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3021,9 +3305,11 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3021 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3305 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3022 3306
3023 /* MSS L4LEN IDX */ 3307 /* MSS L4LEN IDX */
3024 mss_l4len_idx |= 3308 mss_l4len_idx =
3025 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 3309 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3026 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 3310 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3311 /* use index 1 for TSO */
3312 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3027 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3313 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3028 3314
3029 tx_buffer_info->time_stamp = jiffies; 3315 tx_buffer_info->time_stamp = jiffies;
@@ -3040,8 +3326,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3040} 3326}
3041 3327
3042static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 3328static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3043 struct ixgbe_ring *tx_ring, 3329 struct ixgbe_ring *tx_ring,
3044 struct sk_buff *skb, u32 tx_flags) 3330 struct sk_buff *skb, u32 tx_flags)
3045{ 3331{
3046 struct ixgbe_adv_tx_context_desc *context_desc; 3332 struct ixgbe_adv_tx_context_desc *context_desc;
3047 unsigned int i; 3333 unsigned int i;
@@ -3058,16 +3344,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3058 vlan_macip_lens |= 3344 vlan_macip_lens |=
3059 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3345 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3060 vlan_macip_lens |= (skb_network_offset(skb) << 3346 vlan_macip_lens |= (skb_network_offset(skb) <<
3061 IXGBE_ADVTXD_MACLEN_SHIFT); 3347 IXGBE_ADVTXD_MACLEN_SHIFT);
3062 if (skb->ip_summed == CHECKSUM_PARTIAL) 3348 if (skb->ip_summed == CHECKSUM_PARTIAL)
3063 vlan_macip_lens |= (skb_transport_header(skb) - 3349 vlan_macip_lens |= (skb_transport_header(skb) -
3064 skb_network_header(skb)); 3350 skb_network_header(skb));
3065 3351
3066 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3352 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3067 context_desc->seqnum_seed = 0; 3353 context_desc->seqnum_seed = 0;
3068 3354
3069 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3355 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3070 IXGBE_ADVTXD_DTYP_CTXT); 3356 IXGBE_ADVTXD_DTYP_CTXT);
3071 3357
3072 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3358 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3073 switch (skb->protocol) { 3359 switch (skb->protocol) {
@@ -3075,16 +3361,14 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3075 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3361 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3076 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3362 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3077 type_tucmd_mlhl |= 3363 type_tucmd_mlhl |=
3078 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3364 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3079 break; 3365 break;
3080
3081 case __constant_htons(ETH_P_IPV6): 3366 case __constant_htons(ETH_P_IPV6):
3082 /* XXX what about other V6 headers?? */ 3367 /* XXX what about other V6 headers?? */
3083 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3368 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3084 type_tucmd_mlhl |= 3369 type_tucmd_mlhl |=
3085 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3370 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3086 break; 3371 break;
3087
3088 default: 3372 default:
3089 if (unlikely(net_ratelimit())) { 3373 if (unlikely(net_ratelimit())) {
3090 DPRINTK(PROBE, WARNING, 3374 DPRINTK(PROBE, WARNING,
@@ -3096,10 +3380,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3096 } 3380 }
3097 3381
3098 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3382 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3383 /* use index zero for tx checksum offload */
3099 context_desc->mss_l4len_idx = 0; 3384 context_desc->mss_l4len_idx = 0;
3100 3385
3101 tx_buffer_info->time_stamp = jiffies; 3386 tx_buffer_info->time_stamp = jiffies;
3102 tx_buffer_info->next_to_watch = i; 3387 tx_buffer_info->next_to_watch = i;
3388
3103 adapter->hw_csum_tx_good++; 3389 adapter->hw_csum_tx_good++;
3104 i++; 3390 i++;
3105 if (i == tx_ring->count) 3391 if (i == tx_ring->count)
@@ -3108,12 +3394,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3108 3394
3109 return true; 3395 return true;
3110 } 3396 }
3397
3111 return false; 3398 return false;
3112} 3399}
3113 3400
3114static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 3401static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3115 struct ixgbe_ring *tx_ring, 3402 struct ixgbe_ring *tx_ring,
3116 struct sk_buff *skb, unsigned int first) 3403 struct sk_buff *skb, unsigned int first)
3117{ 3404{
3118 struct ixgbe_tx_buffer *tx_buffer_info; 3405 struct ixgbe_tx_buffer *tx_buffer_info;
3119 unsigned int len = skb->len; 3406 unsigned int len = skb->len;
@@ -3131,8 +3418,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3131 3418
3132 tx_buffer_info->length = size; 3419 tx_buffer_info->length = size;
3133 tx_buffer_info->dma = pci_map_single(adapter->pdev, 3420 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3134 skb->data + offset, 3421 skb->data + offset,
3135 size, PCI_DMA_TODEVICE); 3422 size, PCI_DMA_TODEVICE);
3136 tx_buffer_info->time_stamp = jiffies; 3423 tx_buffer_info->time_stamp = jiffies;
3137 tx_buffer_info->next_to_watch = i; 3424 tx_buffer_info->next_to_watch = i;
3138 3425
@@ -3157,9 +3444,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3157 3444
3158 tx_buffer_info->length = size; 3445 tx_buffer_info->length = size;
3159 tx_buffer_info->dma = pci_map_page(adapter->pdev, 3446 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3160 frag->page, 3447 frag->page,
3161 offset, 3448 offset,
3162 size, PCI_DMA_TODEVICE); 3449 size,
3450 PCI_DMA_TODEVICE);
3163 tx_buffer_info->time_stamp = jiffies; 3451 tx_buffer_info->time_stamp = jiffies;
3164 tx_buffer_info->next_to_watch = i; 3452 tx_buffer_info->next_to_watch = i;
3165 3453
@@ -3182,8 +3470,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3182} 3470}
3183 3471
3184static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 3472static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3185 struct ixgbe_ring *tx_ring, 3473 struct ixgbe_ring *tx_ring,
3186 int tx_flags, int count, u32 paylen, u8 hdr_len) 3474 int tx_flags, int count, u32 paylen, u8 hdr_len)
3187{ 3475{
3188 union ixgbe_adv_tx_desc *tx_desc = NULL; 3476 union ixgbe_adv_tx_desc *tx_desc = NULL;
3189 struct ixgbe_tx_buffer *tx_buffer_info; 3477 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -3202,15 +3490,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3202 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3490 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3203 3491
3204 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3492 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3205 IXGBE_ADVTXD_POPTS_SHIFT; 3493 IXGBE_ADVTXD_POPTS_SHIFT;
3206 3494
3495 /* use index 1 context for tso */
3496 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3207 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3497 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3208 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3498 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3209 IXGBE_ADVTXD_POPTS_SHIFT; 3499 IXGBE_ADVTXD_POPTS_SHIFT;
3210 3500
3211 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3501 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3212 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3502 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3213 IXGBE_ADVTXD_POPTS_SHIFT; 3503 IXGBE_ADVTXD_POPTS_SHIFT;
3214 3504
3215 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3505 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3216 3506
@@ -3220,9 +3510,8 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3220 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3510 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3221 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3511 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3222 tx_desc->read.cmd_type_len = 3512 tx_desc->read.cmd_type_len =
3223 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3513 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3224 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3514 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3225
3226 i++; 3515 i++;
3227 if (i == tx_ring->count) 3516 if (i == tx_ring->count)
3228 i = 0; 3517 i = 0;
@@ -3243,7 +3532,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3243} 3532}
3244 3533
3245static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 3534static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3246 struct ixgbe_ring *tx_ring, int size) 3535 struct ixgbe_ring *tx_ring, int size)
3247{ 3536{
3248 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3249 3538
@@ -3259,61 +3548,52 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3259 return -EBUSY; 3548 return -EBUSY;
3260 3549
3261 /* A reprieve! - use start_queue because it doesn't call schedule */ 3550 /* A reprieve! - use start_queue because it doesn't call schedule */
3262 netif_wake_subqueue(netdev, tx_ring->queue_index); 3551 netif_start_subqueue(netdev, tx_ring->queue_index);
3263 ++adapter->restart_queue; 3552 ++adapter->restart_queue;
3264 return 0; 3553 return 0;
3265} 3554}
3266 3555
3267static int ixgbe_maybe_stop_tx(struct net_device *netdev, 3556static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3268 struct ixgbe_ring *tx_ring, int size) 3557 struct ixgbe_ring *tx_ring, int size)
3269{ 3558{
3270 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3559 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3271 return 0; 3560 return 0;
3272 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 3561 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3273} 3562}
3274 3563
3275
3276static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3564static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3277{ 3565{
3278 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3566 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3279 struct ixgbe_ring *tx_ring; 3567 struct ixgbe_ring *tx_ring;
3280 unsigned int len = skb->len;
3281 unsigned int first; 3568 unsigned int first;
3282 unsigned int tx_flags = 0; 3569 unsigned int tx_flags = 0;
3283 u8 hdr_len = 0; 3570 u8 hdr_len = 0;
3284 int r_idx = 0, tso; 3571 int r_idx = 0, tso;
3285 unsigned int mss = 0;
3286 int count = 0; 3572 int count = 0;
3287 unsigned int f; 3573 unsigned int f;
3288 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3574
3289 len -= skb->data_len;
3290 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3575 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3291 tx_ring = &adapter->tx_ring[r_idx]; 3576 tx_ring = &adapter->tx_ring[r_idx];
3292 3577
3293 3578 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3294 if (skb->len <= 0) { 3579 tx_flags |= vlan_tx_tag_get(skb);
3295 dev_kfree_skb(skb); 3580 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3296 return NETDEV_TX_OK; 3581 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3297 } 3582 }
3298 mss = skb_shinfo(skb)->gso_size; 3583 /* three things can cause us to need a context descriptor */
3299 3584 if (skb_is_gso(skb) ||
3300 if (mss) 3585 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3301 count++; 3586 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3302 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3303 count++; 3587 count++;
3304 3588
3305 count += TXD_USE_COUNT(len); 3589 count += TXD_USE_COUNT(skb_headlen(skb));
3306 for (f = 0; f < nr_frags; f++) 3590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3307 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3591 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3308 3592
3309 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 3593 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3310 adapter->tx_busy++; 3594 adapter->tx_busy++;
3311 return NETDEV_TX_BUSY; 3595 return NETDEV_TX_BUSY;
3312 } 3596 }
3313 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3314 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3315 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3316 }
3317 3597
3318 if (skb->protocol == htons(ETH_P_IP)) 3598 if (skb->protocol == htons(ETH_P_IP))
3319 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3599 tx_flags |= IXGBE_TX_FLAGS_IPV4;
@@ -3327,12 +3607,12 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3327 if (tso) 3607 if (tso)
3328 tx_flags |= IXGBE_TX_FLAGS_TSO; 3608 tx_flags |= IXGBE_TX_FLAGS_TSO;
3329 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 3609 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3330 (skb->ip_summed == CHECKSUM_PARTIAL)) 3610 (skb->ip_summed == CHECKSUM_PARTIAL))
3331 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3611 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3332 3612
3333 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 3613 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3334 ixgbe_tx_map(adapter, tx_ring, skb, first), 3614 ixgbe_tx_map(adapter, tx_ring, skb, first),
3335 skb->len, hdr_len); 3615 skb->len, hdr_len);
3336 3616
3337 netdev->trans_start = jiffies; 3617 netdev->trans_start = jiffies;
3338 3618
@@ -3366,15 +3646,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3366static int ixgbe_set_mac(struct net_device *netdev, void *p) 3646static int ixgbe_set_mac(struct net_device *netdev, void *p)
3367{ 3647{
3368 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3648 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3649 struct ixgbe_hw *hw = &adapter->hw;
3369 struct sockaddr *addr = p; 3650 struct sockaddr *addr = p;
3370 3651
3371 if (!is_valid_ether_addr(addr->sa_data)) 3652 if (!is_valid_ether_addr(addr->sa_data))
3372 return -EADDRNOTAVAIL; 3653 return -EADDRNOTAVAIL;
3373 3654
3374 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3655 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3375 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3656 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3376 3657
3377 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3658 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3378 3659
3379 return 0; 3660 return 0;
3380} 3661}
@@ -3398,28 +3679,19 @@ static void ixgbe_netpoll(struct net_device *netdev)
3398#endif 3679#endif
3399 3680
3400/** 3681/**
3401 * ixgbe_napi_add_all - prep napi structs for use 3682 * ixgbe_link_config - set up initial link with default speed and duplex
3402 * @adapter: private struct 3683 * @hw: pointer to private hardware struct
3403 * helper function to napi_add each possible q_vector->napi 3684 *
3404 */ 3685 * Returns 0 on success, negative on failure
3405static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3686 **/
3687static int ixgbe_link_config(struct ixgbe_hw *hw)
3406{ 3688{
3407 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3689 u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
3408 int (*poll)(struct napi_struct *, int);
3409 3690
3410 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3691 /* must always autoneg for both 1G and 10G link */
3411 poll = &ixgbe_clean_rxonly; 3692 hw->mac.autoneg = true;
3412 } else {
3413 poll = &ixgbe_poll;
3414 /* only one q_vector for legacy modes */
3415 q_vectors = 1;
3416 }
3417 3693
3418 for (i = 0; i < q_vectors; i++) { 3694 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3419 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3420 netif_napi_add(adapter->netdev, &q_vector->napi,
3421 (*poll), 64);
3422 }
3423} 3695}
3424 3696
3425/** 3697/**
@@ -3434,17 +3706,16 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3434 * and a hardware reset occur. 3706 * and a hardware reset occur.
3435 **/ 3707 **/
3436static int __devinit ixgbe_probe(struct pci_dev *pdev, 3708static int __devinit ixgbe_probe(struct pci_dev *pdev,
3437 const struct pci_device_id *ent) 3709 const struct pci_device_id *ent)
3438{ 3710{
3439 struct net_device *netdev; 3711 struct net_device *netdev;
3440 struct ixgbe_adapter *adapter = NULL; 3712 struct ixgbe_adapter *adapter = NULL;
3441 struct ixgbe_hw *hw; 3713 struct ixgbe_hw *hw;
3442 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 3714 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3443 unsigned long mmio_start, mmio_len;
3444 static int cards_found; 3715 static int cards_found;
3445 int i, err, pci_using_dac; 3716 int i, err, pci_using_dac;
3446 u16 link_status, link_speed, link_width; 3717 u16 link_status, link_speed, link_width;
3447 u32 part_num; 3718 u32 part_num, eec;
3448 3719
3449 err = pci_enable_device(pdev); 3720 err = pci_enable_device(pdev);
3450 if (err) 3721 if (err)
@@ -3459,7 +3730,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3459 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3730 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3460 if (err) { 3731 if (err) {
3461 dev_err(&pdev->dev, "No usable DMA " 3732 dev_err(&pdev->dev, "No usable DMA "
3462 "configuration, aborting\n"); 3733 "configuration, aborting\n");
3463 goto err_dma; 3734 goto err_dma;
3464 } 3735 }
3465 } 3736 }
@@ -3492,10 +3763,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3492 hw->back = adapter; 3763 hw->back = adapter;
3493 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3764 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3494 3765
3495 mmio_start = pci_resource_start(pdev, 0); 3766 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3496 mmio_len = pci_resource_len(pdev, 0); 3767 pci_resource_len(pdev, 0));
3497
3498 hw->hw_addr = ioremap(mmio_start, mmio_len);
3499 if (!hw->hw_addr) { 3768 if (!hw->hw_addr) {
3500 err = -EIO; 3769 err = -EIO;
3501 goto err_ioremap; 3770 goto err_ioremap;
@@ -3510,7 +3779,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3510 netdev->stop = &ixgbe_close; 3779 netdev->stop = &ixgbe_close;
3511 netdev->hard_start_xmit = &ixgbe_xmit_frame; 3780 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3512 netdev->get_stats = &ixgbe_get_stats; 3781 netdev->get_stats = &ixgbe_get_stats;
3513 netdev->set_multicast_list = &ixgbe_set_multi; 3782 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3783 netdev->set_multicast_list = &ixgbe_set_rx_mode;
3514 netdev->set_mac_address = &ixgbe_set_mac; 3784 netdev->set_mac_address = &ixgbe_set_mac;
3515 netdev->change_mtu = &ixgbe_change_mtu; 3785 netdev->change_mtu = &ixgbe_change_mtu;
3516 ixgbe_set_ethtool_ops(netdev); 3786 ixgbe_set_ethtool_ops(netdev);
@@ -3524,22 +3794,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3524#endif 3794#endif
3525 strcpy(netdev->name, pci_name(pdev)); 3795 strcpy(netdev->name, pci_name(pdev));
3526 3796
3527 netdev->mem_start = mmio_start;
3528 netdev->mem_end = mmio_start + mmio_len;
3529
3530 adapter->bd_number = cards_found; 3797 adapter->bd_number = cards_found;
3531 3798
3532 /* PCI config space info */
3533 hw->vendor_id = pdev->vendor;
3534 hw->device_id = pdev->device;
3535 hw->revision_id = pdev->revision;
3536 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3537 hw->subsystem_device_id = pdev->subsystem_device;
3538
3539 /* Setup hw api */ 3799 /* Setup hw api */
3540 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3800 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3541 hw->mac.type = ii->mac; 3801 hw->mac.type = ii->mac;
3542 3802
3803 /* EEPROM */
3804 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
3805 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
3806 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
3807 if (!(eec & (1 << 8)))
3808 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
3809
3810 /* PHY */
3811 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
3812 /* phy->sfp_type = ixgbe_sfp_type_unknown; */
3813
3543 err = ii->get_invariants(hw); 3814 err = ii->get_invariants(hw);
3544 if (err) 3815 if (err)
3545 goto err_hw_init; 3816 goto err_hw_init;
@@ -3549,26 +3820,34 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3549 if (err) 3820 if (err)
3550 goto err_sw_init; 3821 goto err_sw_init;
3551 3822
3823 /* reset_hw fills in the perm_addr as well */
3824 err = hw->mac.ops.reset_hw(hw);
3825 if (err) {
3826 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
3827 goto err_sw_init;
3828 }
3829
3552 netdev->features = NETIF_F_SG | 3830 netdev->features = NETIF_F_SG |
3553 NETIF_F_HW_CSUM | 3831 NETIF_F_IP_CSUM |
3554 NETIF_F_HW_VLAN_TX | 3832 NETIF_F_HW_VLAN_TX |
3555 NETIF_F_HW_VLAN_RX | 3833 NETIF_F_HW_VLAN_RX |
3556 NETIF_F_HW_VLAN_FILTER; 3834 NETIF_F_HW_VLAN_FILTER;
3557 3835
3558 netdev->features |= NETIF_F_LRO; 3836 netdev->features |= NETIF_F_IPV6_CSUM;
3559 netdev->features |= NETIF_F_TSO; 3837 netdev->features |= NETIF_F_TSO;
3560 netdev->features |= NETIF_F_TSO6; 3838 netdev->features |= NETIF_F_TSO6;
3839 netdev->features |= NETIF_F_LRO;
3561 3840
3562 netdev->vlan_features |= NETIF_F_TSO; 3841 netdev->vlan_features |= NETIF_F_TSO;
3563 netdev->vlan_features |= NETIF_F_TSO6; 3842 netdev->vlan_features |= NETIF_F_TSO6;
3564 netdev->vlan_features |= NETIF_F_HW_CSUM; 3843 netdev->vlan_features |= NETIF_F_IP_CSUM;
3565 netdev->vlan_features |= NETIF_F_SG; 3844 netdev->vlan_features |= NETIF_F_SG;
3566 3845
3567 if (pci_using_dac) 3846 if (pci_using_dac)
3568 netdev->features |= NETIF_F_HIGHDMA; 3847 netdev->features |= NETIF_F_HIGHDMA;
3569 3848
3570 /* make sure the EEPROM is good */ 3849 /* make sure the EEPROM is good */
3571 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3850 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
3572 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 3851 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3573 err = -EIO; 3852 err = -EIO;
3574 goto err_eeprom; 3853 goto err_eeprom;
@@ -3577,7 +3856,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3577 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 3856 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3578 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 3857 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3579 3858
3580 if (ixgbe_validate_mac_addr(netdev->dev_addr)) { 3859 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
3860 dev_err(&pdev->dev, "invalid MAC address\n");
3581 err = -EIO; 3861 err = -EIO;
3582 goto err_eeprom; 3862 goto err_eeprom;
3583 } 3863 }
@@ -3587,13 +3867,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3587 adapter->watchdog_timer.data = (unsigned long)adapter; 3867 adapter->watchdog_timer.data = (unsigned long)adapter;
3588 3868
3589 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 3869 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3590 3870 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
3591 /* initialize default flow control settings */
3592 hw->fc.original_type = ixgbe_fc_full;
3593 hw->fc.type = ixgbe_fc_full;
3594 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3595 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3596 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3597 3871
3598 err = ixgbe_init_interrupt_scheme(adapter); 3872 err = ixgbe_init_interrupt_scheme(adapter);
3599 if (err) 3873 if (err)
@@ -3604,32 +3878,39 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3604 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 3878 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3605 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 3879 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3606 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 3880 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3607 "%02x:%02x:%02x:%02x:%02x:%02x\n", 3881 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3608 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3882 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3609 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3883 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3610 "Unknown"), 3884 "Unknown"),
3611 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3885 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3612 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3886 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3613 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3887 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3614 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3888 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3615 "Unknown"), 3889 "Unknown"),
3616 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3890 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3617 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3891 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3618 ixgbe_read_part_num(hw, &part_num); 3892 ixgbe_read_pba_num_generic(hw, &part_num);
3619 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3893 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3620 hw->mac.type, hw->phy.type, 3894 hw->mac.type, hw->phy.type,
3621 (part_num >> 8), (part_num & 0xff)); 3895 (part_num >> 8), (part_num & 0xff));
3622 3896
3623 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 3897 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3624 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 3898 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3625 "this card is not sufficient for optimal " 3899 "this card is not sufficient for optimal "
3626 "performance.\n"); 3900 "performance.\n");
3627 dev_warn(&pdev->dev, "For optimal performance a x8 " 3901 dev_warn(&pdev->dev, "For optimal performance a x8 "
3628 "PCI-Express slot is required.\n"); 3902 "PCI-Express slot is required.\n");
3629 } 3903 }
3630 3904
3631 /* reset the hardware with the new settings */ 3905 /* reset the hardware with the new settings */
3632 ixgbe_start_hw(hw); 3906 hw->mac.ops.start_hw(hw);
3907
3908 /* link_config depends on start_hw being called at least once */
3909 err = ixgbe_link_config(hw);
3910 if (err) {
3911 dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
3912 goto err_register;
3913 }
3633 3914
3634 netif_carrier_off(netdev); 3915 netif_carrier_off(netdev);
3635 netif_tx_stop_all_queues(netdev); 3916 netif_tx_stop_all_queues(netdev);
@@ -3641,7 +3922,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3641 if (err) 3922 if (err)
3642 goto err_register; 3923 goto err_register;
3643 3924
3644#ifdef CONFIG_DCA 3925#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3645 if (dca_add_requester(&pdev->dev) == 0) { 3926 if (dca_add_requester(&pdev->dev) == 0) {
3646 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3927 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3647 /* always use CB2 mode, difference is masked 3928 /* always use CB2 mode, difference is masked
@@ -3691,7 +3972,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3691 3972
3692 flush_scheduled_work(); 3973 flush_scheduled_work();
3693 3974
3694#ifdef CONFIG_DCA 3975#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3695 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3976 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3696 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3977 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3697 dca_remove_requester(&pdev->dev); 3978 dca_remove_requester(&pdev->dev);
@@ -3709,6 +3990,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3709 pci_release_regions(pdev); 3990 pci_release_regions(pdev);
3710 3991
3711 DPRINTK(PROBE, INFO, "complete\n"); 3992 DPRINTK(PROBE, INFO, "complete\n");
3993 ixgbe_napi_del_all(adapter);
3712 kfree(adapter->tx_ring); 3994 kfree(adapter->tx_ring);
3713 kfree(adapter->rx_ring); 3995 kfree(adapter->rx_ring);
3714 3996
@@ -3726,7 +4008,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3726 * this device has been detected. 4008 * this device has been detected.
3727 */ 4009 */
3728static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 4010static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3729 pci_channel_state_t state) 4011 pci_channel_state_t state)
3730{ 4012{
3731 struct net_device *netdev = pci_get_drvdata(pdev); 4013 struct net_device *netdev = pci_get_drvdata(pdev);
3732 struct ixgbe_adapter *adapter = netdev->priv; 4014 struct ixgbe_adapter *adapter = netdev->priv;
@@ -3737,7 +4019,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3737 ixgbe_down(adapter); 4019 ixgbe_down(adapter);
3738 pci_disable_device(pdev); 4020 pci_disable_device(pdev);
3739 4021
3740 /* Request a slot slot reset. */ 4022 /* Request a slot reset. */
3741 return PCI_ERS_RESULT_NEED_RESET; 4023 return PCI_ERS_RESULT_NEED_RESET;
3742} 4024}
3743 4025
@@ -3754,7 +4036,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3754 4036
3755 if (pci_enable_device(pdev)) { 4037 if (pci_enable_device(pdev)) {
3756 DPRINTK(PROBE, ERR, 4038 DPRINTK(PROBE, ERR,
3757 "Cannot re-enable PCI device after reset.\n"); 4039 "Cannot re-enable PCI device after reset.\n");
3758 return PCI_ERS_RESULT_DISCONNECT; 4040 return PCI_ERS_RESULT_DISCONNECT;
3759 } 4041 }
3760 pci_set_master(pdev); 4042 pci_set_master(pdev);
@@ -3788,7 +4070,6 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
3788 } 4070 }
3789 4071
3790 netif_device_attach(netdev); 4072 netif_device_attach(netdev);
3791
3792} 4073}
3793 4074
3794static struct pci_error_handlers ixgbe_err_handler = { 4075static struct pci_error_handlers ixgbe_err_handler = {
@@ -3824,13 +4105,14 @@ static int __init ixgbe_init_module(void)
3824 4105
3825 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 4106 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3826 4107
3827#ifdef CONFIG_DCA 4108#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3828 dca_register_notify(&dca_notifier); 4109 dca_register_notify(&dca_notifier);
3829 4110
3830#endif 4111#endif
3831 ret = pci_register_driver(&ixgbe_driver); 4112 ret = pci_register_driver(&ixgbe_driver);
3832 return ret; 4113 return ret;
3833} 4114}
4115
3834module_init(ixgbe_init_module); 4116module_init(ixgbe_init_module);
3835 4117
3836/** 4118/**
@@ -3841,24 +4123,24 @@ module_init(ixgbe_init_module);
3841 **/ 4123 **/
3842static void __exit ixgbe_exit_module(void) 4124static void __exit ixgbe_exit_module(void)
3843{ 4125{
3844#ifdef CONFIG_DCA 4126#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3845 dca_unregister_notify(&dca_notifier); 4127 dca_unregister_notify(&dca_notifier);
3846#endif 4128#endif
3847 pci_unregister_driver(&ixgbe_driver); 4129 pci_unregister_driver(&ixgbe_driver);
3848} 4130}
3849 4131
3850#ifdef CONFIG_DCA 4132#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3851static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 4133static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3852 void *p) 4134 void *p)
3853{ 4135{
3854 int ret_val; 4136 int ret_val;
3855 4137
3856 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 4138 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3857 __ixgbe_notify_dca); 4139 __ixgbe_notify_dca);
3858 4140
3859 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3860} 4142}
3861#endif /* CONFIG_DCA */ 4143#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
3862 4144
3863module_exit(ixgbe_exit_module); 4145module_exit(ixgbe_exit_module);
3864 4146
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8002931ae823..764035a8c9a1 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,32 +32,36 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
35static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
38static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
39static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
40 u32 device_type, u16 phy_data);
41 38
42/** 39/**
43 * ixgbe_identify_phy - Get physical layer module 40 * ixgbe_identify_phy_generic - Get physical layer module
44 * @hw: pointer to hardware structure 41 * @hw: pointer to hardware structure
45 * 42 *
46 * Determines the physical layer module found on the current adapter. 43 * Determines the physical layer module found on the current adapter.
47 **/ 44 **/
48s32 ixgbe_identify_phy(struct ixgbe_hw *hw) 45s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
49{ 46{
50 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 47 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
51 u32 phy_addr; 48 u32 phy_addr;
52 49
53 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 50 if (hw->phy.type == ixgbe_phy_unknown) {
54 if (ixgbe_validate_phy_addr(hw, phy_addr)) { 51 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
55 hw->phy.addr = phy_addr; 52 if (ixgbe_validate_phy_addr(hw, phy_addr)) {
56 ixgbe_get_phy_id(hw); 53 hw->phy.addr = phy_addr;
57 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); 54 ixgbe_get_phy_id(hw);
58 status = 0; 55 hw->phy.type =
59 break; 56 ixgbe_get_phy_type_from_id(hw->phy.id);
57 status = 0;
58 break;
59 }
60 } 60 }
61 } else {
62 status = 0;
61 } 63 }
64
62 return status; 65 return status;
63} 66}
64 67
@@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
73 bool valid = false; 76 bool valid = false;
74 77
75 hw->phy.addr = phy_addr; 78 hw->phy.addr = phy_addr;
76 ixgbe_read_phy_reg(hw, 79 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
77 IXGBE_MDIO_PHY_ID_HIGH, 80 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
78 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
79 &phy_id);
80 81
81 if (phy_id != 0xFFFF && phy_id != 0x0) 82 if (phy_id != 0xFFFF && phy_id != 0x0)
82 valid = true; 83 valid = true;
@@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
95 u16 phy_id_high = 0; 96 u16 phy_id_high = 0;
96 u16 phy_id_low = 0; 97 u16 phy_id_low = 0;
97 98
98 status = ixgbe_read_phy_reg(hw, 99 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
99 IXGBE_MDIO_PHY_ID_HIGH, 100 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
100 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 101 &phy_id_high);
101 &phy_id_high);
102 102
103 if (status == 0) { 103 if (status == 0) {
104 hw->phy.id = (u32)(phy_id_high << 16); 104 hw->phy.id = (u32)(phy_id_high << 16);
105 status = ixgbe_read_phy_reg(hw, 105 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
106 IXGBE_MDIO_PHY_ID_LOW, 106 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
107 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 107 &phy_id_low);
108 &phy_id_low);
109 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 108 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
110 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 109 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
111 } 110 }
112
113 return status; 111 return status;
114} 112}
115 113
@@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
123 enum ixgbe_phy_type phy_type; 121 enum ixgbe_phy_type phy_type;
124 122
125 switch (phy_id) { 123 switch (phy_id) {
126 case TN1010_PHY_ID:
127 phy_type = ixgbe_phy_tn;
128 break;
129 case QT2022_PHY_ID: 124 case QT2022_PHY_ID:
130 phy_type = ixgbe_phy_qt; 125 phy_type = ixgbe_phy_qt;
131 break; 126 break;
@@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138} 133}
139 134
140/** 135/**
141 * ixgbe_reset_phy - Performs a PHY reset 136 * ixgbe_reset_phy_generic - Performs a PHY reset
142 * @hw: pointer to hardware structure 137 * @hw: pointer to hardware structure
143 **/ 138 **/
144s32 ixgbe_reset_phy(struct ixgbe_hw *hw) 139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
145{ 140{
146 /* 141 /*
147 * Perform soft PHY reset to the PHY_XS. 142 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 143 * This will cause a soft reset to the PHY
149 */ 144 */
150 return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 145 return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
151 IXGBE_MDIO_PHY_XS_DEV_TYPE, 146 IXGBE_MDIO_PHY_XS_DEV_TYPE,
152 IXGBE_MDIO_PHY_XS_RESET); 147 IXGBE_MDIO_PHY_XS_RESET);
153} 148}
154 149
155/** 150/**
156 * ixgbe_read_phy_reg - Reads a value from a specified PHY register 151 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
157 * @hw: pointer to hardware structure 152 * @hw: pointer to hardware structure
158 * @reg_addr: 32 bit address of PHY register to read 153 * @reg_addr: 32 bit address of PHY register to read
159 * @phy_data: Pointer to read data from PHY register 154 * @phy_data: Pointer to read data from PHY register
160 **/ 155 **/
161s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 156s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
162 u32 device_type, u16 *phy_data) 157 u32 device_type, u16 *phy_data)
163{ 158{
164 u32 command; 159 u32 command;
165 u32 i; 160 u32 i;
166 u32 timeout = 10;
167 u32 data; 161 u32 data;
168 s32 status = 0; 162 s32 status = 0;
169 u16 gssr; 163 u16 gssr;
@@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
179 if (status == 0) { 173 if (status == 0) {
180 /* Setup and write the address cycle command */ 174 /* Setup and write the address cycle command */
181 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 175 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
182 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 176 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
183 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 177 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
184 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 178 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
185 179
186 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 180 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
187 181
@@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
190 * The MDI Command bit will clear when the operation is 184 * The MDI Command bit will clear when the operation is
191 * complete 185 * complete
192 */ 186 */
193 for (i = 0; i < timeout; i++) { 187 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
194 udelay(10); 188 udelay(10);
195 189
196 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 190 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
210 * command 204 * command
211 */ 205 */
212 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 206 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
213 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 207 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
214 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 208 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
215 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 209 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
216 210
217 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 211 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
218 212
@@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
221 * completed. The MDI Command bit will clear when the 215 * completed. The MDI Command bit will clear when the
222 * operation is complete 216 * operation is complete
223 */ 217 */
224 for (i = 0; i < timeout; i++) { 218 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
225 udelay(10); 219 udelay(10);
226 220
227 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 221 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
231 } 225 }
232 226
233 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 227 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
234 hw_dbg(hw, 228 hw_dbg(hw, "PHY read command didn't complete\n");
235 "PHY read command didn't complete\n");
236 status = IXGBE_ERR_PHY; 229 status = IXGBE_ERR_PHY;
237 } else { 230 } else {
238 /* 231 /*
@@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
247 240
248 ixgbe_release_swfw_sync(hw, gssr); 241 ixgbe_release_swfw_sync(hw, gssr);
249 } 242 }
243
250 return status; 244 return status;
251} 245}
252 246
253/** 247/**
254 * ixgbe_write_phy_reg - Writes a value to specified PHY register 248 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
255 * @hw: pointer to hardware structure 249 * @hw: pointer to hardware structure
256 * @reg_addr: 32 bit PHY register to write 250 * @reg_addr: 32 bit PHY register to write
257 * @device_type: 5 bit device type 251 * @device_type: 5 bit device type
258 * @phy_data: Data to write to the PHY register 252 * @phy_data: Data to write to the PHY register
259 **/ 253 **/
260static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 254s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
261 u32 device_type, u16 phy_data) 255 u32 device_type, u16 phy_data)
262{ 256{
263 u32 command; 257 u32 command;
264 u32 i; 258 u32 i;
265 u32 timeout = 10;
266 s32 status = 0; 259 s32 status = 0;
267 u16 gssr; 260 u16 gssr;
268 261
@@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
280 273
281 /* Setup and write the address cycle command */ 274 /* Setup and write the address cycle command */
282 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 275 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
283 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 276 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
284 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 277 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
285 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 278 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
286 279
287 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 280 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
288 281
@@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
291 * The MDI Command bit will clear when the operation is 284 * The MDI Command bit will clear when the operation is
292 * complete 285 * complete
293 */ 286 */
294 for (i = 0; i < timeout; i++) { 287 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
295 udelay(10); 288 udelay(10);
296 289
297 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 290 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
298 291
299 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 292 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
300 hw_dbg(hw, "PHY address cmd didn't complete\n");
301 break; 293 break;
302 }
303 } 294 }
304 295
305 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 296 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
297 hw_dbg(hw, "PHY address cmd didn't complete\n");
306 status = IXGBE_ERR_PHY; 298 status = IXGBE_ERR_PHY;
299 }
307 300
308 if (status == 0) { 301 if (status == 0) {
309 /* 302 /*
@@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
311 * command 304 * command
312 */ 305 */
313 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 306 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
314 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 307 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
315 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 308 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
316 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 309 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
317 310
318 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 311 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
319 312
@@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
322 * completed. The MDI Command bit will clear when the 315 * completed. The MDI Command bit will clear when the
323 * operation is complete 316 * operation is complete
324 */ 317 */
325 for (i = 0; i < timeout; i++) { 318 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
326 udelay(10); 319 udelay(10);
327 320
328 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 321 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
329 322
330 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 323 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
331 hw_dbg(hw, "PHY write command did not "
332 "complete.\n");
333 break; 324 break;
334 }
335 } 325 }
336 326
337 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 327 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
328 hw_dbg(hw, "PHY address cmd didn't complete\n");
338 status = IXGBE_ERR_PHY; 329 status = IXGBE_ERR_PHY;
330 }
339 } 331 }
340 332
341 ixgbe_release_swfw_sync(hw, gssr); 333 ixgbe_release_swfw_sync(hw, gssr);
@@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
345} 337}
346 338
347/** 339/**
348 * ixgbe_setup_tnx_phy_link - Set and restart autoneg 340 * ixgbe_setup_phy_link_generic - Set and restart autoneg
349 * @hw: pointer to hardware structure 341 * @hw: pointer to hardware structure
350 * 342 *
351 * Restart autonegotiation and PHY and waits for completion. 343 * Restart autonegotiation and PHY and waits for completion.
352 **/ 344 **/
353s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) 345s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
354{ 346{
355 s32 status = IXGBE_NOT_IMPLEMENTED; 347 s32 status = IXGBE_NOT_IMPLEMENTED;
356 u32 time_out; 348 u32 time_out;
357 u32 max_time_out = 10; 349 u32 max_time_out = 10;
358 u16 autoneg_speed_selection_register = 0x10; 350 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
359 u16 autoneg_restart_mask = 0x0200;
360 u16 autoneg_complete_mask = 0x0020;
361 u16 autoneg_reg = 0;
362 351
363 /* 352 /*
364 * Set advertisement settings in PHY based on autoneg_advertised 353 * Set advertisement settings in PHY based on autoneg_advertised
365 * settings. If autoneg_advertised = 0, then advertise default values 354 * settings. If autoneg_advertised = 0, then advertise default values
366 * txn devices cannot be "forced" to a autoneg 10G and fail. But can 355 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
367 * for a 1G. 356 * for a 1G.
368 */ 357 */
369 ixgbe_read_phy_reg(hw, 358 hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
370 autoneg_speed_selection_register, 359 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
372 &autoneg_reg);
373 360
374 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 361 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
375 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 362 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
376 else 363 else
377 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 364 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
378 365
379 ixgbe_write_phy_reg(hw, 366 hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
380 autoneg_speed_selection_register, 367 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
381 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
382 autoneg_reg);
383
384 368
385 /* Restart PHY autonegotiation and wait for completion */ 369 /* Restart PHY autonegotiation and wait for completion */
386 ixgbe_read_phy_reg(hw, 370 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
387 IXGBE_MDIO_AUTO_NEG_CONTROL, 371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
388 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
389 &autoneg_reg);
390 372
391 autoneg_reg |= autoneg_restart_mask; 373 autoneg_reg |= IXGBE_MII_RESTART;
392 374
393 ixgbe_write_phy_reg(hw, 375 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
394 IXGBE_MDIO_AUTO_NEG_CONTROL, 376 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
395 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
396 autoneg_reg);
397 377
398 /* Wait for autonegotiation to finish */ 378 /* Wait for autonegotiation to finish */
399 for (time_out = 0; time_out < max_time_out; time_out++) { 379 for (time_out = 0; time_out < max_time_out; time_out++) {
400 udelay(10); 380 udelay(10);
401 /* Restart PHY autonegotiation and wait for completion */ 381 /* Restart PHY autonegotiation and wait for completion */
402 status = ixgbe_read_phy_reg(hw, 382 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
403 IXGBE_MDIO_AUTO_NEG_STATUS, 383 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
404 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 384 &autoneg_reg);
405 &autoneg_reg);
406 385
407 autoneg_reg &= autoneg_complete_mask; 386 autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
408 if (autoneg_reg == autoneg_complete_mask) { 387 if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
409 status = 0; 388 status = 0;
410 break; 389 break;
411 } 390 }
@@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
418} 397}
419 398
420/** 399/**
421 * ixgbe_check_tnx_phy_link - Determine link and speed status 400 * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
422 * @hw: pointer to hardware structure
423 *
424 * Reads the VS1 register to determine if link is up and the current speed for
425 * the PHY.
426 **/
427s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
428 bool *link_up)
429{
430 s32 status = 0;
431 u32 time_out;
432 u32 max_time_out = 10;
433 u16 phy_link = 0;
434 u16 phy_speed = 0;
435 u16 phy_data = 0;
436
437 /* Initialize speed and link to default case */
438 *link_up = false;
439 *speed = IXGBE_LINK_SPEED_10GB_FULL;
440
441 /*
442 * Check current speed and link status of the PHY register.
443 * This is a vendor specific register and may have to
444 * be changed for other copper PHYs.
445 */
446 for (time_out = 0; time_out < max_time_out; time_out++) {
447 udelay(10);
448 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
449 *link_up = true;
450 if (phy_speed ==
451 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
452 *speed = IXGBE_LINK_SPEED_1GB_FULL;
453 break;
454 } else {
455 status = ixgbe_read_phy_reg(hw,
456 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
457 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
458 &phy_data);
459 phy_link = phy_data &
460 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
461 phy_speed = phy_data &
462 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
463 }
464 }
465
466 return status;
467}
468
469/**
470 * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
471 * @hw: pointer to hardware structure 401 * @hw: pointer to hardware structure
472 * @speed: new link speed 402 * @speed: new link speed
473 * @autoneg: true if autonegotiation enabled 403 * @autoneg: true if autonegotiation enabled
474 **/ 404 **/
475s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, 405s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
476 bool autoneg, 406 ixgbe_link_speed speed,
477 bool autoneg_wait_to_complete) 407 bool autoneg,
408 bool autoneg_wait_to_complete)
478{ 409{
410
479 /* 411 /*
480 * Clear autoneg_advertised and set new values based on input link 412 * Clear autoneg_advertised and set new values based on input link
481 * speed. 413 * speed.
@@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
484 416
485 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 417 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
486 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 418 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
419
487 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 420 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
488 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 421 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
489 422
490 /* Setup link based on the new speed settings */ 423 /* Setup link based on the new speed settings */
491 ixgbe_setup_tnx_phy_link(hw); 424 hw->phy.ops.setup_link(hw);
492 425
493 return 0; 426 return 0;
494} 427}
428
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index aa3ea72e678e..9bfe3f2b1d8f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -30,20 +29,52 @@
30#define _IXGBE_PHY_H_ 29#define _IXGBE_PHY_H_
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
33 33
34s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); 34/* EEPROM byte offsets */
35s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 35#define IXGBE_SFF_IDENTIFIER 0x0
36s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 36#define IXGBE_SFF_IDENTIFIER_SFP 0x3
37 bool autoneg_wait_to_complete); 37#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
38s32 ixgbe_identify_phy(struct ixgbe_hw *hw); 38#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
39s32 ixgbe_reset_phy(struct ixgbe_hw *hw); 39#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
40s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 40#define IXGBE_SFF_1GBE_COMP_CODES 0x6
41 u32 device_type, u16 *phy_data); 41#define IXGBE_SFF_10GBE_COMP_CODES 0x3
42 42#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
43/* PHY specific */ 43
44s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); 44/* Bitmasks */
45s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 45#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
46s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47 bool autoneg_wait_to_complete); 47#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
48#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
49#define IXGBE_I2C_EEPROM_READ_MASK 0x100
50#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
51#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
52#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
53#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
54#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
55
56/* Bit-shift macros */
57#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
58#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
59#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
60
61/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
62#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
63#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
64#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
65
66
67s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
68s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
69s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
70s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
71 u32 device_type, u16 *phy_data);
72s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
73 u32 device_type, u16 phy_data);
74s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
75s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
76 ixgbe_link_speed speed,
77 bool autoneg,
78 bool autoneg_wait_to_complete);
48 79
49#endif /* _IXGBE_PHY_H_ */ 80#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c0282a223df3..c6f8fa1c4e59 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -37,9 +36,9 @@
37/* Device IDs */ 36/* Device IDs */
38#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
39#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
40#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD 39#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
42#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 40#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
41#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
43 42
44/* General Registers */ 43/* General Registers */
45#define IXGBE_CTRL 0x00000 44#define IXGBE_CTRL 0x00000
@@ -70,11 +69,11 @@
70#define IXGBE_EIMC 0x00888 69#define IXGBE_EIMC 0x00888
71#define IXGBE_EIAC 0x00810 70#define IXGBE_EIAC 0x00810
72#define IXGBE_EIAM 0x00890 71#define IXGBE_EIAM 0x00890
73#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ 72#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
74#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ 73#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
75#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ 74#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
76#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ 75#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
77#define IXGBE_PBACL 0x11068 76#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
78#define IXGBE_GPIE 0x00898 77#define IXGBE_GPIE 0x00898
79 78
80/* Flow Control Registers */ 79/* Flow Control Registers */
@@ -86,20 +85,33 @@
86#define IXGBE_TFCS 0x0CE00 85#define IXGBE_TFCS 0x0CE00
87 86
88/* Receive DMA Registers */ 87/* Receive DMA Registers */
89#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ 88#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
90#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) 89#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
91#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) 90#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
92#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) 91#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
93#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) 92#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
94#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) 93#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
95#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) 94/*
96#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) 95 * Split and Replication Receive Control Registers
97 /* array of 16 (0x02100-0x0213C) */ 96 * 00-15 : 0x02100 + n*4
98#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) 97 * 16-64 : 0x01014 + n*0x40
99 /* array of 16 (0x02200-0x0223C) */ 98 * 64-127: 0x0D014 + (n-64)*0x40
100#define IXGBE_RDRXCTL 0x02F00 99 */
100#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
101 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
102 (0x0D014 + ((_i - 64) * 0x40))))
103/*
104 * Rx DCA Control Register:
105 * 00-15 : 0x02200 + n*4
106 * 16-64 : 0x0100C + n*0x40
107 * 64-127: 0x0D00C + (n-64)*0x40
108 */
109#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
110 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
111 (0x0D00C + ((_i - 64) * 0x40))))
112#define IXGBE_RDRXCTL 0x02F00
101#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 113#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
102 /* 8 of these 0x03C00 - 0x03C1C */ 114 /* 8 of these 0x03C00 - 0x03C1C */
103#define IXGBE_RXCTRL 0x03000 115#define IXGBE_RXCTRL 0x03000
104#define IXGBE_DROPEN 0x03D04 116#define IXGBE_DROPEN 0x03D04
105#define IXGBE_RXPBSIZE_SHIFT 10 117#define IXGBE_RXPBSIZE_SHIFT 10
@@ -107,29 +119,32 @@
107/* Receive Registers */ 119/* Receive Registers */
108#define IXGBE_RXCSUM 0x05000 120#define IXGBE_RXCSUM 0x05000
109#define IXGBE_RFCTL 0x05008 121#define IXGBE_RFCTL 0x05008
122#define IXGBE_DRECCCTL 0x02F08
123#define IXGBE_DRECCCTL_DISABLE 0
124/* Multicast Table Array - 128 entries */
110#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 125#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
111 /* Multicast Table Array - 128 entries */ 126#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
112#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ 127#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
113#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ 128/* Packet split receive type */
114#define IXGBE_PSRTYPE 0x05480 129#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
115 /* 0x5480-0x54BC Packet split receive type */ 130/* array of 4096 1-bit vlan filters */
116#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 131#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
117 /* array of 4096 1-bit vlan filters */ 132/*array of 4096 4-bit vlan vmdq indices */
118#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) 133#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
119 /*array of 4096 4-bit vlan vmdq indicies */
120#define IXGBE_FCTRL 0x05080 134#define IXGBE_FCTRL 0x05080
121#define IXGBE_VLNCTRL 0x05088 135#define IXGBE_VLNCTRL 0x05088
122#define IXGBE_MCSTCTRL 0x05090 136#define IXGBE_MCSTCTRL 0x05090
123#define IXGBE_MRQC 0x05818 137#define IXGBE_MRQC 0x05818
124#define IXGBE_VMD_CTL 0x0581C
125#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 138#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
126#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 139#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
127#define IXGBE_IMIRVP 0x05AC0 140#define IXGBE_IMIRVP 0x05AC0
141#define IXGBE_VMD_CTL 0x0581C
128#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 142#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
129#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 143#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
130 144
145
131/* Transmit DMA registers */ 146/* Transmit DMA registers */
132#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ 147#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
133#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) 148#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
134#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) 149#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
135#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) 150#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
@@ -138,11 +153,10 @@
138#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) 153#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
139#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 154#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
140#define IXGBE_DTXCTL 0x07E00 155#define IXGBE_DTXCTL 0x07E00
141#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) 156
142 /* there are 16 of these (0-15) */ 157#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
143#define IXGBE_TIPG 0x0CB00 158#define IXGBE_TIPG 0x0CB00
144#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) 159#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
145 /* there are 8 of these */
146#define IXGBE_MNGTXMAP 0x0CD10 160#define IXGBE_MNGTXMAP 0x0CD10
147#define IXGBE_TIPG_FIBER_DEFAULT 3 161#define IXGBE_TIPG_FIBER_DEFAULT 3
148#define IXGBE_TXPBSIZE_SHIFT 10 162#define IXGBE_TXPBSIZE_SHIFT 10
@@ -154,6 +168,7 @@
154#define IXGBE_IPAV 0x05838 168#define IXGBE_IPAV 0x05838
155#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ 169#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
156#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ 170#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
171
157#define IXGBE_WUPL 0x05900 172#define IXGBE_WUPL 0x05900
158#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 173#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
159#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ 174#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
@@ -170,6 +185,8 @@
170#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ 185#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
171#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ 186#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
172 187
188
189
173/* Stats registers */ 190/* Stats registers */
174#define IXGBE_CRCERRS 0x04000 191#define IXGBE_CRCERRS 0x04000
175#define IXGBE_ILLERRC 0x04004 192#define IXGBE_ILLERRC 0x04004
@@ -224,7 +241,7 @@
224#define IXGBE_XEC 0x04120 241#define IXGBE_XEC 0x04120
225 242
226#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ 243#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
227#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ 244#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
228 245
229#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 246#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
230#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 247#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
@@ -275,23 +292,17 @@
275#define IXGBE_DCA_CTRL 0x11074 292#define IXGBE_DCA_CTRL 0x11074
276 293
277/* Diagnostic Registers */ 294/* Diagnostic Registers */
278#define IXGBE_RDSTATCTL 0x02C20 295#define IXGBE_RDSTATCTL 0x02C20
279#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ 296#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
280#define IXGBE_RDHMPN 0x02F08 297#define IXGBE_RDHMPN 0x02F08
281#define IXGBE_RIC_DW0 0x02F10 298#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
282#define IXGBE_RIC_DW1 0x02F14 299#define IXGBE_RDPROBE 0x02F20
283#define IXGBE_RIC_DW2 0x02F18 300#define IXGBE_TDSTATCTL 0x07C20
284#define IXGBE_RIC_DW3 0x02F1C 301#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
285#define IXGBE_RDPROBE 0x02F20 302#define IXGBE_TDHMPN 0x07F08
286#define IXGBE_TDSTATCTL 0x07C20 303#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
287#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ 304#define IXGBE_TDPROBE 0x07F20
288#define IXGBE_TDHMPN 0x07F08 305#define IXGBE_TXBUFCTRL 0x0C600
289#define IXGBE_TIC_DW0 0x07F10
290#define IXGBE_TIC_DW1 0x07F14
291#define IXGBE_TIC_DW2 0x07F18
292#define IXGBE_TIC_DW3 0x07F1C
293#define IXGBE_TDPROBE 0x07F20
294#define IXGBE_TXBUFCTRL 0x0C600
295#define IXGBE_TXBUFDATA0 0x0C610 306#define IXGBE_TXBUFDATA0 0x0C610
296#define IXGBE_TXBUFDATA1 0x0C614 307#define IXGBE_TXBUFDATA1 0x0C614
297#define IXGBE_TXBUFDATA2 0x0C618 308#define IXGBE_TXBUFDATA2 0x0C618
@@ -356,12 +367,10 @@
356#define IXGBE_ANLP2 0x042B4 367#define IXGBE_ANLP2 0x042B4
357#define IXGBE_ATLASCTL 0x04800 368#define IXGBE_ATLASCTL 0x04800
358 369
359/* RSCCTL Bit Masks */ 370/* RDRXCTL Bit Masks */
360#define IXGBE_RSCCTL_RSCEN 0x01 371#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
361#define IXGBE_RSCCTL_MAXDESC_1 0x00 372#define IXGBE_RDRXCTL_MVMEN 0x00000020
362#define IXGBE_RSCCTL_MAXDESC_4 0x04 373#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
363#define IXGBE_RSCCTL_MAXDESC_8 0x08
364#define IXGBE_RSCCTL_MAXDESC_16 0x0C
365 374
366/* CTRL Bit Masks */ 375/* CTRL Bit Masks */
367#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ 376#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
@@ -394,7 +403,7 @@
394 403
395#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 404#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
396#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 405#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
397#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ 406#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
398#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ 407#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
399 408
400/* MSCA Bit Masks */ 409/* MSCA Bit Masks */
@@ -418,10 +427,10 @@
418#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ 427#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
419 428
420/* MSRWD bit masks */ 429/* MSRWD bit masks */
421#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF 430#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
422#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 431#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
423#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 432#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
424#define IXGBE_MSRWD_READ_DATA_SHIFT 16 433#define IXGBE_MSRWD_READ_DATA_SHIFT 16
425 434
426/* Atlas registers */ 435/* Atlas registers */
427#define IXGBE_ATLAS_PDN_LPBK 0x24 436#define IXGBE_ATLAS_PDN_LPBK 0x24
@@ -436,6 +445,7 @@
436#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 445#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
437#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 446#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
438 447
448
439/* Device Type definitions for new protocol MDIO commands */ 449/* Device Type definitions for new protocol MDIO commands */
440#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 450#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
441#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 451#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
@@ -443,6 +453,8 @@
443#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 453#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
444#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ 454#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
445 455
456#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
457
446#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ 458#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
447#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ 459#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
448#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ 460#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
@@ -456,23 +468,39 @@
456#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ 468#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
457#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ 469#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
458#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ 470#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
459#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ 471#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
460#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ 472#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
461#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ 473#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
462 474
475#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
476#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
477#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
478
479/* MII clause 22/28 definitions */
480#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
481
482#define IXGBE_MII_SPEED_SELECTION_REG 0x10
483#define IXGBE_MII_RESTART 0x200
484#define IXGBE_MII_AUTONEG_COMPLETE 0x20
485#define IXGBE_MII_AUTONEG_REG 0x0
486
463#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 487#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
464#define IXGBE_MAX_PHY_ADDR 32 488#define IXGBE_MAX_PHY_ADDR 32
465 489
466/* PHY IDs*/ 490/* PHY IDs*/
467#define TN1010_PHY_ID 0x00A19410
468#define QT2022_PHY_ID 0x0043A400 491#define QT2022_PHY_ID 0x0043A400
469 492
493/* PHY Types */
494#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
495
470/* General purpose Interrupt Enable */ 496/* General purpose Interrupt Enable */
471#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ 497#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
472#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ 498#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
473#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 499#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
474#define IXGBE_GPIE_EIAME 0x40000000 500#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
475#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 501#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
502#define IXGBE_GPIE_EIAME 0x40000000
503#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
476 504
477/* Transmit Flow Control status */ 505/* Transmit Flow Control status */
478#define IXGBE_TFCS_TXOFF 0x00000001 506#define IXGBE_TFCS_TXOFF 0x00000001
@@ -533,7 +561,7 @@
533#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ 561#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
534 562
535/* RMCS Bit Masks */ 563/* RMCS Bit Masks */
536#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ 564#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
537/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ 565/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
538#define IXGBE_RMCS_RAC 0x00000004 566#define IXGBE_RMCS_RAC 0x00000004
539#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ 567#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
@@ -541,12 +569,15 @@
541#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ 569#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
542#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ 570#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
543 571
572
544/* Interrupt register bitmasks */ 573/* Interrupt register bitmasks */
545 574
546/* Extended Interrupt Cause Read */ 575/* Extended Interrupt Cause Read */
547#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ 576#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
548#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ 577#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
549#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ 578#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
579#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
580#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
550#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ 581#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
551#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ 582#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
552#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ 583#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -554,11 +585,12 @@
554 585
555/* Extended Interrupt Cause Set */ 586/* Extended Interrupt Cause Set */
556#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 587#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
557#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 588#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
558#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 589#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
559#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 590#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
560#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 591#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
561#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 592#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
593#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
562#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 594#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
563#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 595#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
564 596
@@ -566,7 +598,9 @@
566#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 598#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
567#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ 599#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
568#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 600#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
569#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 601#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
602#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
603#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
570#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ 604#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
571#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 605#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
572#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 606#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
@@ -575,18 +609,20 @@
575#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 609#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
576#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ 610#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
577#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 611#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
578#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 612#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
579#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 613#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
614#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
615#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
580#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 616#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
581#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 617#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
582 618
583#define IXGBE_EIMS_ENABLE_MASK (\ 619#define IXGBE_EIMS_ENABLE_MASK ( \
584 IXGBE_EIMS_RTX_QUEUE | \ 620 IXGBE_EIMS_RTX_QUEUE | \
585 IXGBE_EIMS_LSC | \ 621 IXGBE_EIMS_LSC | \
586 IXGBE_EIMS_TCP_TIMER | \ 622 IXGBE_EIMS_TCP_TIMER | \
587 IXGBE_EIMS_OTHER) 623 IXGBE_EIMS_OTHER)
588 624
589/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ 625/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
590#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 626#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
591#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ 627#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
592#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 628#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -623,6 +659,7 @@
623#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ 659#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
624#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ 660#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
625 661
662
626#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 663#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
627 664
628/* STATUS Bit Masks */ 665/* STATUS Bit Masks */
@@ -670,16 +707,16 @@
670#define IXGBE_AUTOC_AN_RESTART 0x00001000 707#define IXGBE_AUTOC_AN_RESTART 0x00001000
671#define IXGBE_AUTOC_FLU 0x00000001 708#define IXGBE_AUTOC_FLU 0x00000001
672#define IXGBE_AUTOC_LMS_SHIFT 13 709#define IXGBE_AUTOC_LMS_SHIFT 13
673#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) 710#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
674#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) 711#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
675#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) 712#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
676#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) 713#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
677#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) 714#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
678#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) 715#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
679#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 716#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
680 717
681#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 718#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
682#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 719#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
683#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 720#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
684#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 721#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
685#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 722#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
@@ -705,6 +742,7 @@
705#define IXGBE_LINKS_TL_FAULT 0x00001000 742#define IXGBE_LINKS_TL_FAULT 0x00001000
706#define IXGBE_LINKS_SIGNAL 0x00000F00 743#define IXGBE_LINKS_SIGNAL 0x00000F00
707 744
745#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
708#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 746#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
709 747
710/* SW Semaphore Register bitmasks */ 748/* SW Semaphore Register bitmasks */
@@ -759,6 +797,11 @@
759#define IXGBE_PBANUM0_PTR 0x15 797#define IXGBE_PBANUM0_PTR 0x15
760#define IXGBE_PBANUM1_PTR 0x16 798#define IXGBE_PBANUM1_PTR 0x16
761 799
800/* Legacy EEPROM word offsets */
801#define IXGBE_ISCSI_BOOT_CAPS 0x0033
802#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
803#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
804
762/* EEPROM Commands - SPI */ 805/* EEPROM Commands - SPI */
763#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ 806#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
764#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 807#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
@@ -766,7 +809,7 @@
766#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 809#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
767#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ 810#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
768#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ 811#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
769/* EEPROM reset Write Enbale latch */ 812/* EEPROM reset Write Enable latch */
770#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 813#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
771#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ 814#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
772#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ 815#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
@@ -805,26 +848,20 @@
805/* Number of 100 microseconds we wait for PCI Express master disable */ 848/* Number of 100 microseconds we wait for PCI Express master disable */
806#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 849#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
807 850
808/* PHY Types */
809#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
810
811/* Check whether address is multicast. This is little-endian specific check.*/ 851/* Check whether address is multicast. This is little-endian specific check.*/
812#define IXGBE_IS_MULTICAST(Address) \ 852#define IXGBE_IS_MULTICAST(Address) \
813 (bool)(((u8 *)(Address))[0] & ((u8)0x01)) 853 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
814 854
815/* Check whether an address is broadcast. */ 855/* Check whether an address is broadcast. */
816#define IXGBE_IS_BROADCAST(Address) \ 856#define IXGBE_IS_BROADCAST(Address) \
817 ((((u8 *)(Address))[0] == ((u8)0xff)) && \ 857 ((((u8 *)(Address))[0] == ((u8)0xff)) && \
818 (((u8 *)(Address))[1] == ((u8)0xff))) 858 (((u8 *)(Address))[1] == ((u8)0xff)))
819 859
820/* RAH */ 860/* RAH */
821#define IXGBE_RAH_VIND_MASK 0x003C0000 861#define IXGBE_RAH_VIND_MASK 0x003C0000
822#define IXGBE_RAH_VIND_SHIFT 18 862#define IXGBE_RAH_VIND_SHIFT 18
823#define IXGBE_RAH_AV 0x80000000 863#define IXGBE_RAH_AV 0x80000000
824 864#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
825/* Filters */
826#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
827#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
828 865
829/* Header split receive */ 866/* Header split receive */
830#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 867#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
@@ -853,7 +890,7 @@
853#define IXGBE_MAX_FRAME_SZ 0x40040000 890#define IXGBE_MAX_FRAME_SZ 0x40040000
854 891
855#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ 892#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
856#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ 893#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
857 894
858/* Receive Config masks */ 895/* Receive Config masks */
859#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 896#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
@@ -866,7 +903,7 @@
866#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ 903#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
867#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ 904#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
868#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ 905#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
869/* Receive Priority Flow Control Enbale */ 906/* Receive Priority Flow Control Enable */
870#define IXGBE_FCTRL_RPFCE 0x00004000 907#define IXGBE_FCTRL_RPFCE 0x00004000
871#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ 908#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
872 909
@@ -896,9 +933,8 @@
896/* Receive Descriptor bit definitions */ 933/* Receive Descriptor bit definitions */
897#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ 934#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
898#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ 935#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
899#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */
900#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 936#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
901#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 937#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
902#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ 938#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
903#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 939#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
904#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 940#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
@@ -914,7 +950,7 @@
914#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ 950#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
915#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ 951#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
916#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 952#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
917#define IXGBE_RXDADV_HBO 0x00800000 953#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
918#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ 954#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
919#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 955#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
920#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ 956#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
@@ -928,15 +964,17 @@
928#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ 964#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
929#define IXGBE_RXD_CFI_SHIFT 12 965#define IXGBE_RXD_CFI_SHIFT 12
930 966
967
931/* SRRCTL bit definitions */ 968/* SRRCTL bit definitions */
932#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ 969#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
933#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F 970#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
934#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 971#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
935#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 972#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
936#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 973#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
937#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 974#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
938#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 975#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
939#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 976#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
977#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
940 978
941#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 979#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
942#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF 980#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
@@ -970,21 +1008,20 @@
970#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 1008#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
971#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 1009#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
972#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 1010#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
973
974/* Masks to determine if packets should be dropped due to frame errors */ 1011/* Masks to determine if packets should be dropped due to frame errors */
975#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ 1012#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
976 IXGBE_RXD_ERR_CE | \ 1013 IXGBE_RXD_ERR_CE | \
977 IXGBE_RXD_ERR_LE | \ 1014 IXGBE_RXD_ERR_LE | \
978 IXGBE_RXD_ERR_PE | \ 1015 IXGBE_RXD_ERR_PE | \
979 IXGBE_RXD_ERR_OSE | \ 1016 IXGBE_RXD_ERR_OSE | \
980 IXGBE_RXD_ERR_USE) 1017 IXGBE_RXD_ERR_USE)
981 1018
982#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ 1019#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
983 IXGBE_RXDADV_ERR_CE | \ 1020 IXGBE_RXDADV_ERR_CE | \
984 IXGBE_RXDADV_ERR_LE | \ 1021 IXGBE_RXDADV_ERR_LE | \
985 IXGBE_RXDADV_ERR_PE | \ 1022 IXGBE_RXDADV_ERR_PE | \
986 IXGBE_RXDADV_ERR_OSE | \ 1023 IXGBE_RXDADV_ERR_OSE | \
987 IXGBE_RXDADV_ERR_USE) 1024 IXGBE_RXDADV_ERR_USE)
988 1025
989/* Multicast bit mask */ 1026/* Multicast bit mask */
990#define IXGBE_MCSTCTRL_MFE 0x4 1027#define IXGBE_MCSTCTRL_MFE 0x4
@@ -1000,6 +1037,7 @@
1000#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1037#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
1001#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1038#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
1002 1039
1040
1003/* Transmit Descriptor - Legacy */ 1041/* Transmit Descriptor - Legacy */
1004struct ixgbe_legacy_tx_desc { 1042struct ixgbe_legacy_tx_desc {
1005 u64 buffer_addr; /* Address of the descriptor's data buffer */ 1043 u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -1007,15 +1045,15 @@ struct ixgbe_legacy_tx_desc {
1007 __le32 data; 1045 __le32 data;
1008 struct { 1046 struct {
1009 __le16 length; /* Data buffer length */ 1047 __le16 length; /* Data buffer length */
1010 u8 cso; /* Checksum offset */ 1048 u8 cso; /* Checksum offset */
1011 u8 cmd; /* Descriptor control */ 1049 u8 cmd; /* Descriptor control */
1012 } flags; 1050 } flags;
1013 } lower; 1051 } lower;
1014 union { 1052 union {
1015 __le32 data; 1053 __le32 data;
1016 struct { 1054 struct {
1017 u8 status; /* Descriptor status */ 1055 u8 status; /* Descriptor status */
1018 u8 css; /* Checksum start */ 1056 u8 css; /* Checksum start */
1019 __le16 vlan; 1057 __le16 vlan;
1020 } fields; 1058 } fields;
1021 } upper; 1059 } upper;
@@ -1024,7 +1062,7 @@ struct ixgbe_legacy_tx_desc {
1024/* Transmit Descriptor - Advanced */ 1062/* Transmit Descriptor - Advanced */
1025union ixgbe_adv_tx_desc { 1063union ixgbe_adv_tx_desc {
1026 struct { 1064 struct {
1027 __le64 buffer_addr; /* Address of descriptor's data buf */ 1065 __le64 buffer_addr; /* Address of descriptor's data buf */
1028 __le32 cmd_type_len; 1066 __le32 cmd_type_len;
1029 __le32 olinfo_status; 1067 __le32 olinfo_status;
1030 } read; 1068 } read;
@@ -1039,9 +1077,9 @@ union ixgbe_adv_tx_desc {
1039struct ixgbe_legacy_rx_desc { 1077struct ixgbe_legacy_rx_desc {
1040 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 1078 __le64 buffer_addr; /* Address of the descriptor's data buffer */
1041 __le16 length; /* Length of data DMAed into data buffer */ 1079 __le16 length; /* Length of data DMAed into data buffer */
1042 u16 csum; /* Packet checksum */ 1080 __le16 csum; /* Packet checksum */
1043 u8 status; /* Descriptor status */ 1081 u8 status; /* Descriptor status */
1044 u8 errors; /* Descriptor Errors */ 1082 u8 errors; /* Descriptor Errors */
1045 __le16 vlan; 1083 __le16 vlan;
1046}; 1084};
1047 1085
@@ -1053,15 +1091,18 @@ union ixgbe_adv_rx_desc {
1053 } read; 1091 } read;
1054 struct { 1092 struct {
1055 struct { 1093 struct {
1056 struct { 1094 union {
1057 __le16 pkt_info; /* RSS type, Packet type */ 1095 __le32 data;
1058 __le16 hdr_info; /* Split Header, header len */ 1096 struct {
1097 __le16 pkt_info; /* RSS, Pkt type */
1098 __le16 hdr_info; /* Splithdr, hdrlen */
1099 } hs_rss;
1059 } lo_dword; 1100 } lo_dword;
1060 union { 1101 union {
1061 __le32 rss; /* RSS Hash */ 1102 __le32 rss; /* RSS Hash */
1062 struct { 1103 struct {
1063 __le16 ip_id; /* IP id */ 1104 __le16 ip_id; /* IP id */
1064 u16 csum; /* Packet Checksum */ 1105 __le16 csum; /* Packet Checksum */
1065 } csum_ip; 1106 } csum_ip;
1066 } hi_dword; 1107 } hi_dword;
1067 } lower; 1108 } lower;
@@ -1082,49 +1123,69 @@ struct ixgbe_adv_tx_context_desc {
1082}; 1123};
1083 1124
1084/* Adv Transmit Descriptor Config Masks */ 1125/* Adv Transmit Descriptor Config Masks */
1085#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ 1126#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
1086#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ 1127#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
1087#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ 1128#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
1088#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 1129#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
1089#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ 1130#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
1090#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ 1131#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
1091#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */
1092#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ 1132#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
1093#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ 1133#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
1094#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ 1134#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
1095#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ 1135#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
1096#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 1136#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
1097#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ 1137#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
1098#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ 1138#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
1099#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ 1139#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
1100#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ 1140#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
1141#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
1101#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 1142#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
1102#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 1143#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
1103 IXGBE_ADVTXD_POPTS_SHIFT) 1144 IXGBE_ADVTXD_POPTS_SHIFT)
1104#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 1145#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
1105 IXGBE_ADVTXD_POPTS_SHIFT) 1146 IXGBE_ADVTXD_POPTS_SHIFT)
1106#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ 1147#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
1107#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 1148#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
1108#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 1149#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
1109#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 1150#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
1110#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ 1151#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
1111#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ 1152#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
1112#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 1153#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
1113#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 1154#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
1114#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ 1155#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
1115#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 1156#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
1116#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ 1157#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
1117#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ 1158#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
1118#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 1159#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
1119#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ 1160#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
1120#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 1161#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
1121#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 1162#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
1122 1163
1164/* Autonegotiation advertised speeds */
1165typedef u32 ixgbe_autoneg_advertised;
1123/* Link speed */ 1166/* Link speed */
1167typedef u32 ixgbe_link_speed;
1124#define IXGBE_LINK_SPEED_UNKNOWN 0 1168#define IXGBE_LINK_SPEED_UNKNOWN 0
1125#define IXGBE_LINK_SPEED_100_FULL 0x0008 1169#define IXGBE_LINK_SPEED_100_FULL 0x0008
1126#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 1170#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
1127#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 1171#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
1172#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
1173 IXGBE_LINK_SPEED_10GB_FULL)
1174
1175/* Physical layer type */
1176typedef u32 ixgbe_physical_layer;
1177#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
1178#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
1179#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
1180#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
1181#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
1182#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
1183#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
1184#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
1185#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
1186#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
1187#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
1188#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
1128 1189
1129 1190
1130enum ixgbe_eeprom_type { 1191enum ixgbe_eeprom_type {
@@ -1141,16 +1202,38 @@ enum ixgbe_mac_type {
1141 1202
1142enum ixgbe_phy_type { 1203enum ixgbe_phy_type {
1143 ixgbe_phy_unknown = 0, 1204 ixgbe_phy_unknown = 0,
1144 ixgbe_phy_tn,
1145 ixgbe_phy_qt, 1205 ixgbe_phy_qt,
1146 ixgbe_phy_xaui 1206 ixgbe_phy_xaui,
1207 ixgbe_phy_tw_tyco,
1208 ixgbe_phy_tw_unknown,
1209 ixgbe_phy_sfp_avago,
1210 ixgbe_phy_sfp_ftl,
1211 ixgbe_phy_sfp_unknown,
1212 ixgbe_phy_generic
1213};
1214
1215/*
1216 * SFP+ module type IDs:
1217 *
1218 * ID Module Type
1219 * =============
1220 * 0 SFP_DA_CU
1221 * 1 SFP_SR
1222 * 2 SFP_LR
1223 */
1224enum ixgbe_sfp_type {
1225 ixgbe_sfp_type_da_cu = 0,
1226 ixgbe_sfp_type_sr = 1,
1227 ixgbe_sfp_type_lr = 2,
1228 ixgbe_sfp_type_unknown = 0xFFFF
1147}; 1229};
1148 1230
1149enum ixgbe_media_type { 1231enum ixgbe_media_type {
1150 ixgbe_media_type_unknown = 0, 1232 ixgbe_media_type_unknown = 0,
1151 ixgbe_media_type_fiber, 1233 ixgbe_media_type_fiber,
1152 ixgbe_media_type_copper, 1234 ixgbe_media_type_copper,
1153 ixgbe_media_type_backplane 1235 ixgbe_media_type_backplane,
1236 ixgbe_media_type_virtual
1154}; 1237};
1155 1238
1156/* Flow Control Settings */ 1239/* Flow Control Settings */
@@ -1167,6 +1250,8 @@ struct ixgbe_addr_filter_info {
1167 u32 rar_used_count; 1250 u32 rar_used_count;
1168 u32 mc_addr_in_rar_count; 1251 u32 mc_addr_in_rar_count;
1169 u32 mta_in_use; 1252 u32 mta_in_use;
1253 u32 overflow_promisc;
1254 bool user_set_promisc;
1170}; 1255};
1171 1256
1172/* Flow control parameters */ 1257/* Flow control parameters */
@@ -1242,57 +1327,118 @@ struct ixgbe_hw_stats {
1242/* forward declaration */ 1327/* forward declaration */
1243struct ixgbe_hw; 1328struct ixgbe_hw;
1244 1329
1330/* iterator type for walking multicast address lists */
1331typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1332 u32 *vmdq);
1333
1334/* Function pointer table */
1335struct ixgbe_eeprom_operations {
1336 s32 (*init_params)(struct ixgbe_hw *);
1337 s32 (*read)(struct ixgbe_hw *, u16, u16 *);
1338 s32 (*write)(struct ixgbe_hw *, u16, u16);
1339 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
1340 s32 (*update_checksum)(struct ixgbe_hw *);
1341};
1342
1245struct ixgbe_mac_operations { 1343struct ixgbe_mac_operations {
1246 s32 (*reset)(struct ixgbe_hw *); 1344 s32 (*init_hw)(struct ixgbe_hw *);
1345 s32 (*reset_hw)(struct ixgbe_hw *);
1346 s32 (*start_hw)(struct ixgbe_hw *);
1347 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
1247 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); 1348 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
1349 s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
1350 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
1351 s32 (*stop_adapter)(struct ixgbe_hw *);
1352 s32 (*get_bus_info)(struct ixgbe_hw *);
1353 s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
1354 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
1355
1356 /* Link */
1248 s32 (*setup_link)(struct ixgbe_hw *); 1357 s32 (*setup_link)(struct ixgbe_hw *);
1249 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1358 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1250 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1359 bool);
1251 s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); 1360 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
1361 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
1362 bool *);
1363
1364 /* LED */
1365 s32 (*led_on)(struct ixgbe_hw *, u32);
1366 s32 (*led_off)(struct ixgbe_hw *, u32);
1367 s32 (*blink_led_start)(struct ixgbe_hw *, u32);
1368 s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
1369
1370 /* RAR, Multicast, VLAN */
1371 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
1372 s32 (*clear_rar)(struct ixgbe_hw *, u32);
1373 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
1374 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
1375 s32 (*init_rx_addrs)(struct ixgbe_hw *);
1376 s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1377 ixgbe_mc_addr_itr);
1378 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1379 ixgbe_mc_addr_itr);
1380 s32 (*enable_mc)(struct ixgbe_hw *);
1381 s32 (*disable_mc)(struct ixgbe_hw *);
1382 s32 (*clear_vfta)(struct ixgbe_hw *);
1383 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
1384 s32 (*init_uta_tables)(struct ixgbe_hw *);
1385
1386 /* Flow Control */
1387 s32 (*setup_fc)(struct ixgbe_hw *, s32);
1252}; 1388};
1253 1389
1254struct ixgbe_phy_operations { 1390struct ixgbe_phy_operations {
1391 s32 (*identify)(struct ixgbe_hw *);
1392 s32 (*identify_sfp)(struct ixgbe_hw *);
1393 s32 (*reset)(struct ixgbe_hw *);
1394 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
1395 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
1255 s32 (*setup_link)(struct ixgbe_hw *); 1396 s32 (*setup_link)(struct ixgbe_hw *);
1256 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1397 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1257 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1398 bool);
1258}; 1399 s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
1259 1400 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
1260struct ixgbe_mac_info { 1401 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
1261 struct ixgbe_mac_operations ops; 1402 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
1262 enum ixgbe_mac_type type;
1263 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1264 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1265 s32 mc_filter_type;
1266 u32 num_rx_queues;
1267 u32 num_tx_queues;
1268 u32 num_rx_addrs;
1269 u32 link_attach_type;
1270 u32 link_mode_select;
1271 bool link_settings_loaded;
1272}; 1403};
1273 1404
1274struct ixgbe_eeprom_info { 1405struct ixgbe_eeprom_info {
1275 enum ixgbe_eeprom_type type; 1406 struct ixgbe_eeprom_operations ops;
1276 u16 word_size; 1407 enum ixgbe_eeprom_type type;
1277 u16 address_bits; 1408 u32 semaphore_delay;
1409 u16 word_size;
1410 u16 address_bits;
1278}; 1411};
1279 1412
1280struct ixgbe_phy_info { 1413struct ixgbe_mac_info {
1281 struct ixgbe_phy_operations ops; 1414 struct ixgbe_mac_operations ops;
1282 1415 enum ixgbe_mac_type type;
1283 enum ixgbe_phy_type type; 1416 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1284 u32 addr; 1417 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1285 u32 id; 1418 s32 mc_filter_type;
1286 u32 revision; 1419 u32 mcft_size;
1287 enum ixgbe_media_type media_type; 1420 u32 vft_size;
1288 u32 autoneg_advertised; 1421 u32 num_rar_entries;
1289 bool autoneg_wait_to_complete; 1422 u32 max_tx_queues;
1423 u32 max_rx_queues;
1424 u32 link_attach_type;
1425 u32 link_mode_select;
1426 bool link_settings_loaded;
1427 bool autoneg;
1428 bool autoneg_failed;
1290}; 1429};
1291 1430
1292struct ixgbe_info { 1431struct ixgbe_phy_info {
1293 enum ixgbe_mac_type mac; 1432 struct ixgbe_phy_operations ops;
1294 s32 (*get_invariants)(struct ixgbe_hw *); 1433 enum ixgbe_phy_type type;
1295 struct ixgbe_mac_operations *mac_ops; 1434 u32 addr;
1435 u32 id;
1436 enum ixgbe_sfp_type sfp_type;
1437 u32 revision;
1438 enum ixgbe_media_type media_type;
1439 bool reset_disable;
1440 ixgbe_autoneg_advertised autoneg_advertised;
1441 bool autoneg_wait_to_complete;
1296}; 1442};
1297 1443
1298struct ixgbe_hw { 1444struct ixgbe_hw {
@@ -1311,6 +1457,15 @@ struct ixgbe_hw {
1311 bool adapter_stopped; 1457 bool adapter_stopped;
1312}; 1458};
1313 1459
1460struct ixgbe_info {
1461 enum ixgbe_mac_type mac;
1462 s32 (*get_invariants)(struct ixgbe_hw *);
1463 struct ixgbe_mac_operations *mac_ops;
1464 struct ixgbe_eeprom_operations *eeprom_ops;
1465 struct ixgbe_phy_operations *phy_ops;
1466};
1467
1468
1314/* Error Codes */ 1469/* Error Codes */
1315#define IXGBE_ERR_EEPROM -1 1470#define IXGBE_ERR_EEPROM -1
1316#define IXGBE_ERR_EEPROM_CHECKSUM -2 1471#define IXGBE_ERR_EEPROM_CHECKSUM -2
@@ -1329,6 +1484,8 @@ struct ixgbe_hw {
1329#define IXGBE_ERR_RESET_FAILED -15 1484#define IXGBE_ERR_RESET_FAILED -15
1330#define IXGBE_ERR_SWFW_SYNC -16 1485#define IXGBE_ERR_SWFW_SYNC -16
1331#define IXGBE_ERR_PHY_ADDR_INVALID -17 1486#define IXGBE_ERR_PHY_ADDR_INVALID -17
1487#define IXGBE_ERR_I2C -18
1488#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
1332#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 1489#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
1333 1490
1334#endif /* _IXGBE_TYPE_H_ */ 1491#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
new file mode 100644
index 000000000000..f292df557544
--- /dev/null
+++ b/drivers/net/jme.c
@@ -0,0 +1,3019 @@
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/version.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <linux/delay.h>
34#include <linux/spinlock.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <linux/tcp.h>
39#include <linux/udp.h>
40#include <linux/if_vlan.h>
41#include "jme.h"
42
43static int force_pseudohp = -1;
44static int no_pseudohp = -1;
45static int no_extplug = -1;
46module_param(force_pseudohp, int, 0);
47MODULE_PARM_DESC(force_pseudohp,
48 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
49module_param(no_pseudohp, int, 0);
50MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
51module_param(no_extplug, int, 0);
52MODULE_PARM_DESC(no_extplug,
53 "Do not use external plug signal for pseudo hot-plug.");
54
55static int
56jme_mdio_read(struct net_device *netdev, int phy, int reg)
57{
58 struct jme_adapter *jme = netdev_priv(netdev);
59 int i, val, again = (reg == MII_BMSR) ? 1 : 0;
60
61read_again:
62 jwrite32(jme, JME_SMI, SMI_OP_REQ |
63 smi_phy_addr(phy) |
64 smi_reg_addr(reg));
65
66 wmb();
67 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
68 udelay(20);
69 val = jread32(jme, JME_SMI);
70 if ((val & SMI_OP_REQ) == 0)
71 break;
72 }
73
74 if (i == 0) {
75 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
76 return 0;
77 }
78
79 if (again--)
80 goto read_again;
81
82 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
83}
84
85static void
86jme_mdio_write(struct net_device *netdev,
87 int phy, int reg, int val)
88{
89 struct jme_adapter *jme = netdev_priv(netdev);
90 int i;
91
92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
94 smi_phy_addr(phy) | smi_reg_addr(reg));
95
96 wmb();
97 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
98 udelay(20);
99 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
100 break;
101 }
102
103 if (i == 0)
104 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
105
106 return;
107}
108
109static inline void
110jme_reset_phy_processor(struct jme_adapter *jme)
111{
112 u32 val;
113
114 jme_mdio_write(jme->dev,
115 jme->mii_if.phy_id,
116 MII_ADVERTISE, ADVERTISE_ALL |
117 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
118
119 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
120 jme_mdio_write(jme->dev,
121 jme->mii_if.phy_id,
122 MII_CTRL1000,
123 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
124
125 val = jme_mdio_read(jme->dev,
126 jme->mii_if.phy_id,
127 MII_BMCR);
128
129 jme_mdio_write(jme->dev,
130 jme->mii_if.phy_id,
131 MII_BMCR, val | BMCR_RESET);
132
133 return;
134}
135
136static void
137jme_setup_wakeup_frame(struct jme_adapter *jme,
138 u32 *mask, u32 crc, int fnr)
139{
140 int i;
141
142 /*
143 * Setup CRC pattern
144 */
145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
146 wmb();
147 jwrite32(jme, JME_WFODP, crc);
148 wmb();
149
150 /*
151 * Setup Mask
152 */
153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
154 jwrite32(jme, JME_WFOI,
155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
156 (fnr & WFOI_FRAME_SEL));
157 wmb();
158 jwrite32(jme, JME_WFODP, mask[i]);
159 wmb();
160 }
161}
162
163static inline void
164jme_reset_mac_processor(struct jme_adapter *jme)
165{
166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
167 u32 crc = 0xCDCDCDCD;
168 u32 gpreg0;
169 int i;
170
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
172 udelay(2);
173 jwrite32(jme, JME_GHC, jme->reg_ghc);
174
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
177 jwrite32(jme, JME_RXQDC, 0x00000000);
178 jwrite32(jme, JME_RXNDA, 0x00000000);
179 jwrite32(jme, JME_TXDBA_LO, 0x00000000);
180 jwrite32(jme, JME_TXDBA_HI, 0x00000000);
181 jwrite32(jme, JME_TXQDC, 0x00000000);
182 jwrite32(jme, JME_TXNDA, 0x00000000);
183
184 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
185 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
186 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
187 jme_setup_wakeup_frame(jme, mask, crc, i);
188 if (jme->fpgaver)
189 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
190 else
191 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, 0);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201}
202
203static inline void
204jme_clear_pm(struct jme_adapter *jme)
205{
206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
207 pci_set_power_state(jme->pdev, PCI_D0);
208 pci_enable_wake(jme->pdev, PCI_D0, false);
209}
210
211static int
212jme_reload_eeprom(struct jme_adapter *jme)
213{
214 u32 val;
215 int i;
216
217 val = jread32(jme, JME_SMBCSR);
218
219 if (val & SMBCSR_EEPROMD) {
220 val |= SMBCSR_CNACK;
221 jwrite32(jme, JME_SMBCSR, val);
222 val |= SMBCSR_RELOAD;
223 jwrite32(jme, JME_SMBCSR, val);
224 mdelay(12);
225
226 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
227 mdelay(1);
228 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
229 break;
230 }
231
232 if (i == 0) {
233 jeprintk(jme->pdev, "eeprom reload timeout\n");
234 return -EIO;
235 }
236 }
237
238 return 0;
239}
240
241static void
242jme_load_macaddr(struct net_device *netdev)
243{
244 struct jme_adapter *jme = netdev_priv(netdev);
245 unsigned char macaddr[6];
246 u32 val;
247
248 spin_lock_bh(&jme->macaddr_lock);
249 val = jread32(jme, JME_RXUMA_LO);
250 macaddr[0] = (val >> 0) & 0xFF;
251 macaddr[1] = (val >> 8) & 0xFF;
252 macaddr[2] = (val >> 16) & 0xFF;
253 macaddr[3] = (val >> 24) & 0xFF;
254 val = jread32(jme, JME_RXUMA_HI);
255 macaddr[4] = (val >> 0) & 0xFF;
256 macaddr[5] = (val >> 8) & 0xFF;
257 memcpy(netdev->dev_addr, macaddr, 6);
258 spin_unlock_bh(&jme->macaddr_lock);
259}
260
261static inline void
262jme_set_rx_pcc(struct jme_adapter *jme, int p)
263{
264 switch (p) {
265 case PCC_OFF:
266 jwrite32(jme, JME_PCCRX0,
267 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
268 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
269 break;
270 case PCC_P1:
271 jwrite32(jme, JME_PCCRX0,
272 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
273 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
274 break;
275 case PCC_P2:
276 jwrite32(jme, JME_PCCRX0,
277 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
278 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
279 break;
280 case PCC_P3:
281 jwrite32(jme, JME_PCCRX0,
282 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
283 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
284 break;
285 default:
286 break;
287 }
288 wmb();
289
290 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
291 msg_rx_status(jme, "Switched to PCC_P%d\n", p);
292}
293
294static void
295jme_start_irq(struct jme_adapter *jme)
296{
297 register struct dynpcc_info *dpi = &(jme->dpi);
298
299 jme_set_rx_pcc(jme, PCC_P1);
300 dpi->cur = PCC_P1;
301 dpi->attempt = PCC_P1;
302 dpi->cnt = 0;
303
304 jwrite32(jme, JME_PCCTX,
305 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
306 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
307 PCCTXQ0_EN
308 );
309
310 /*
311 * Enable Interrupts
312 */
313 jwrite32(jme, JME_IENS, INTR_ENABLE);
314}
315
316static inline void
317jme_stop_irq(struct jme_adapter *jme)
318{
319 /*
320 * Disable Interrupts
321 */
322 jwrite32f(jme, JME_IENC, INTR_ENABLE);
323}
324
325static inline void
326jme_enable_shadow(struct jme_adapter *jme)
327{
328 jwrite32(jme,
329 JME_SHBA_LO,
330 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
331}
332
333static inline void
334jme_disable_shadow(struct jme_adapter *jme)
335{
336 jwrite32(jme, JME_SHBA_LO, 0x0);
337}
338
339static u32
340jme_linkstat_from_phy(struct jme_adapter *jme)
341{
342 u32 phylink, bmsr;
343
344 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
345 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
346 if (bmsr & BMSR_ANCOMP)
347 phylink |= PHY_LINK_AUTONEG_COMPLETE;
348
349 return phylink;
350}
351
352static inline void
353jme_set_phyfifoa(struct jme_adapter *jme)
354{
355 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
356}
357
358static inline void
359jme_set_phyfifob(struct jme_adapter *jme)
360{
361 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
362}
363
364static int
365jme_check_link(struct net_device *netdev, int testonly)
366{
367 struct jme_adapter *jme = netdev_priv(netdev);
368 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
369 char linkmsg[64];
370 int rc = 0;
371
372 linkmsg[0] = '\0';
373
374 if (jme->fpgaver)
375 phylink = jme_linkstat_from_phy(jme);
376 else
377 phylink = jread32(jme, JME_PHY_LINK);
378
379 if (phylink & PHY_LINK_UP) {
380 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
381 /*
382 * If we did not enable AN
383 * Speed/Duplex Info should be obtained from SMI
384 */
385 phylink = PHY_LINK_UP;
386
387 bmcr = jme_mdio_read(jme->dev,
388 jme->mii_if.phy_id,
389 MII_BMCR);
390
391 phylink |= ((bmcr & BMCR_SPEED1000) &&
392 (bmcr & BMCR_SPEED100) == 0) ?
393 PHY_LINK_SPEED_1000M :
394 (bmcr & BMCR_SPEED100) ?
395 PHY_LINK_SPEED_100M :
396 PHY_LINK_SPEED_10M;
397
398 phylink |= (bmcr & BMCR_FULLDPLX) ?
399 PHY_LINK_DUPLEX : 0;
400
401 strcat(linkmsg, "Forced: ");
402 } else {
403 /*
404 * Keep polling for speed/duplex resolve complete
405 */
406 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
407 --cnt) {
408
409 udelay(1);
410
411 if (jme->fpgaver)
412 phylink = jme_linkstat_from_phy(jme);
413 else
414 phylink = jread32(jme, JME_PHY_LINK);
415 }
416 if (!cnt)
417 jeprintk(jme->pdev,
418 "Waiting speed resolve timeout.\n");
419
420 strcat(linkmsg, "ANed: ");
421 }
422
423 if (jme->phylink == phylink) {
424 rc = 1;
425 goto out;
426 }
427 if (testonly)
428 goto out;
429
430 jme->phylink = phylink;
431
432 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
433 GHC_SPEED_100M |
434 GHC_SPEED_1000M |
435 GHC_DPX);
436 switch (phylink & PHY_LINK_SPEED_MASK) {
437 case PHY_LINK_SPEED_10M:
438 ghc |= GHC_SPEED_10M;
439 strcat(linkmsg, "10 Mbps, ");
440 if (is_buggy250(jme->pdev->device, jme->chiprev))
441 jme_set_phyfifoa(jme);
442 break;
443 case PHY_LINK_SPEED_100M:
444 ghc |= GHC_SPEED_100M;
445 strcat(linkmsg, "100 Mbps, ");
446 if (is_buggy250(jme->pdev->device, jme->chiprev))
447 jme_set_phyfifob(jme);
448 break;
449 case PHY_LINK_SPEED_1000M:
450 ghc |= GHC_SPEED_1000M;
451 strcat(linkmsg, "1000 Mbps, ");
452 if (is_buggy250(jme->pdev->device, jme->chiprev))
453 jme_set_phyfifoa(jme);
454 break;
455 default:
456 break;
457 }
458 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
459
460 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
461 "Full-Duplex, " :
462 "Half-Duplex, ");
463
464 if (phylink & PHY_LINK_MDI_STAT)
465 strcat(linkmsg, "MDI-X");
466 else
467 strcat(linkmsg, "MDI");
468
469 if (phylink & PHY_LINK_DUPLEX) {
470 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
471 } else {
472 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
473 TXMCS_BACKOFF |
474 TXMCS_CARRIERSENSE |
475 TXMCS_COLLISION);
476 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
477 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
478 TXTRHD_TXREN |
479 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
480 }
481
482 jme->reg_ghc = ghc;
483 jwrite32(jme, JME_GHC, ghc);
484
485 msg_link(jme, "Link is up at %s.\n", linkmsg);
486 netif_carrier_on(netdev);
487 } else {
488 if (testonly)
489 goto out;
490
491 msg_link(jme, "Link is down.\n");
492 jme->phylink = 0;
493 netif_carrier_off(netdev);
494 }
495
496out:
497 return rc;
498}
499
500static int
501jme_setup_tx_resources(struct jme_adapter *jme)
502{
503 struct jme_ring *txring = &(jme->txring[0]);
504
505 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
506 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
507 &(txring->dmaalloc),
508 GFP_ATOMIC);
509
510 if (!txring->alloc) {
511 txring->desc = NULL;
512 txring->dmaalloc = 0;
513 txring->dma = 0;
514 return -ENOMEM;
515 }
516
517 /*
518 * 16 Bytes align
519 */
520 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
521 RING_DESC_ALIGN);
522 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
523 txring->next_to_use = 0;
524 atomic_set(&txring->next_to_clean, 0);
525 atomic_set(&txring->nr_free, jme->tx_ring_size);
526
527 /*
528 * Initialize Transmit Descriptors
529 */
530 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
531 memset(txring->bufinf, 0,
532 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
533
534 return 0;
535}
536
537static void
538jme_free_tx_resources(struct jme_adapter *jme)
539{
540 int i;
541 struct jme_ring *txring = &(jme->txring[0]);
542 struct jme_buffer_info *txbi = txring->bufinf;
543
544 if (txring->alloc) {
545 for (i = 0 ; i < jme->tx_ring_size ; ++i) {
546 txbi = txring->bufinf + i;
547 if (txbi->skb) {
548 dev_kfree_skb(txbi->skb);
549 txbi->skb = NULL;
550 }
551 txbi->mapping = 0;
552 txbi->len = 0;
553 txbi->nr_desc = 0;
554 txbi->start_xmit = 0;
555 }
556
557 dma_free_coherent(&(jme->pdev->dev),
558 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
559 txring->alloc,
560 txring->dmaalloc);
561
562 txring->alloc = NULL;
563 txring->desc = NULL;
564 txring->dmaalloc = 0;
565 txring->dma = 0;
566 }
567 txring->next_to_use = 0;
568 atomic_set(&txring->next_to_clean, 0);
569 atomic_set(&txring->nr_free, 0);
570
571}
572
573static inline void
574jme_enable_tx_engine(struct jme_adapter *jme)
575{
576 /*
577 * Select Queue 0
578 */
579 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
580 wmb();
581
582 /*
583 * Setup TX Queue 0 DMA Bass Address
584 */
585 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
586 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
587 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
588
589 /*
590 * Setup TX Descptor Count
591 */
592 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
593
594 /*
595 * Enable TX Engine
596 */
597 wmb();
598 jwrite32(jme, JME_TXCS, jme->reg_txcs |
599 TXCS_SELECT_QUEUE0 |
600 TXCS_ENABLE);
601
602}
603
604static inline void
605jme_restart_tx_engine(struct jme_adapter *jme)
606{
607 /*
608 * Restart TX Engine
609 */
610 jwrite32(jme, JME_TXCS, jme->reg_txcs |
611 TXCS_SELECT_QUEUE0 |
612 TXCS_ENABLE);
613}
614
615static inline void
616jme_disable_tx_engine(struct jme_adapter *jme)
617{
618 int i;
619 u32 val;
620
621 /*
622 * Disable TX Engine
623 */
624 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
625 wmb();
626
627 val = jread32(jme, JME_TXCS);
628 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
629 mdelay(1);
630 val = jread32(jme, JME_TXCS);
631 rmb();
632 }
633
634 if (!i)
635 jeprintk(jme->pdev, "Disable TX engine timeout.\n");
636}
637
638static void
639jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
640{
641 struct jme_ring *rxring = jme->rxring;
642 register struct rxdesc *rxdesc = rxring->desc;
643 struct jme_buffer_info *rxbi = rxring->bufinf;
644 rxdesc += i;
645 rxbi += i;
646
647 rxdesc->dw[0] = 0;
648 rxdesc->dw[1] = 0;
649 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
650 rxdesc->desc1.bufaddrl = cpu_to_le32(
651 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
652 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
653 if (jme->dev->features & NETIF_F_HIGHDMA)
654 rxdesc->desc1.flags = RXFLAG_64BIT;
655 wmb();
656 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
657}
658
659static int
660jme_make_new_rx_buf(struct jme_adapter *jme, int i)
661{
662 struct jme_ring *rxring = &(jme->rxring[0]);
663 struct jme_buffer_info *rxbi = rxring->bufinf + i;
664 struct sk_buff *skb;
665
666 skb = netdev_alloc_skb(jme->dev,
667 jme->dev->mtu + RX_EXTRA_LEN);
668 if (unlikely(!skb))
669 return -ENOMEM;
670
671 rxbi->skb = skb;
672 rxbi->len = skb_tailroom(skb);
673 rxbi->mapping = pci_map_page(jme->pdev,
674 virt_to_page(skb->data),
675 offset_in_page(skb->data),
676 rxbi->len,
677 PCI_DMA_FROMDEVICE);
678
679 return 0;
680}
681
682static void
683jme_free_rx_buf(struct jme_adapter *jme, int i)
684{
685 struct jme_ring *rxring = &(jme->rxring[0]);
686 struct jme_buffer_info *rxbi = rxring->bufinf;
687 rxbi += i;
688
689 if (rxbi->skb) {
690 pci_unmap_page(jme->pdev,
691 rxbi->mapping,
692 rxbi->len,
693 PCI_DMA_FROMDEVICE);
694 dev_kfree_skb(rxbi->skb);
695 rxbi->skb = NULL;
696 rxbi->mapping = 0;
697 rxbi->len = 0;
698 }
699}
700
701static void
702jme_free_rx_resources(struct jme_adapter *jme)
703{
704 int i;
705 struct jme_ring *rxring = &(jme->rxring[0]);
706
707 if (rxring->alloc) {
708 for (i = 0 ; i < jme->rx_ring_size ; ++i)
709 jme_free_rx_buf(jme, i);
710
711 dma_free_coherent(&(jme->pdev->dev),
712 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
713 rxring->alloc,
714 rxring->dmaalloc);
715 rxring->alloc = NULL;
716 rxring->desc = NULL;
717 rxring->dmaalloc = 0;
718 rxring->dma = 0;
719 }
720 rxring->next_to_use = 0;
721 atomic_set(&rxring->next_to_clean, 0);
722}
723
724static int
725jme_setup_rx_resources(struct jme_adapter *jme)
726{
727 int i;
728 struct jme_ring *rxring = &(jme->rxring[0]);
729
730 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
731 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
732 &(rxring->dmaalloc),
733 GFP_ATOMIC);
734 if (!rxring->alloc) {
735 rxring->desc = NULL;
736 rxring->dmaalloc = 0;
737 rxring->dma = 0;
738 return -ENOMEM;
739 }
740
741 /*
742 * 16 Bytes align
743 */
744 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
745 RING_DESC_ALIGN);
746 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
747 rxring->next_to_use = 0;
748 atomic_set(&rxring->next_to_clean, 0);
749
750 /*
751 * Initiallize Receive Descriptors
752 */
753 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
754 if (unlikely(jme_make_new_rx_buf(jme, i))) {
755 jme_free_rx_resources(jme);
756 return -ENOMEM;
757 }
758
759 jme_set_clean_rxdesc(jme, i);
760 }
761
762 return 0;
763}
764
765static inline void
766jme_enable_rx_engine(struct jme_adapter *jme)
767{
768 /*
769 * Select Queue 0
770 */
771 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
772 RXCS_QUEUESEL_Q0);
773 wmb();
774
775 /*
776 * Setup RX DMA Bass Address
777 */
778 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
779 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
780 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
781
782 /*
783 * Setup RX Descriptor Count
784 */
785 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
786
787 /*
788 * Setup Unicast Filter
789 */
790 jme_set_multi(jme->dev);
791
792 /*
793 * Enable RX Engine
794 */
795 wmb();
796 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
797 RXCS_QUEUESEL_Q0 |
798 RXCS_ENABLE |
799 RXCS_QST);
800}
801
802static inline void
803jme_restart_rx_engine(struct jme_adapter *jme)
804{
805 /*
806 * Start RX Engine
807 */
808 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
809 RXCS_QUEUESEL_Q0 |
810 RXCS_ENABLE |
811 RXCS_QST);
812}
813
814static inline void
815jme_disable_rx_engine(struct jme_adapter *jme)
816{
817 int i;
818 u32 val;
819
820 /*
821 * Disable RX Engine
822 */
823 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
824 wmb();
825
826 val = jread32(jme, JME_RXCS);
827 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
828 mdelay(1);
829 val = jread32(jme, JME_RXCS);
830 rmb();
831 }
832
833 if (!i)
834 jeprintk(jme->pdev, "Disable RX engine timeout.\n");
835
836}
837
838static int
839jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
840{
841 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
842 return false;
843
844 if (unlikely(!(flags & RXWBFLAG_MF) &&
845 (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
846 msg_rx_err(jme, "TCP Checksum error.\n");
847 goto out_sumerr;
848 }
849
850 if (unlikely(!(flags & RXWBFLAG_MF) &&
851 (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
852 msg_rx_err(jme, "UDP Checksum error.\n");
853 goto out_sumerr;
854 }
855
856 if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
857 msg_rx_err(jme, "IPv4 Checksum error.\n");
858 goto out_sumerr;
859 }
860
861 return true;
862
863out_sumerr:
864 return false;
865}
866
867static void
868jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
869{
870 struct jme_ring *rxring = &(jme->rxring[0]);
871 struct rxdesc *rxdesc = rxring->desc;
872 struct jme_buffer_info *rxbi = rxring->bufinf;
873 struct sk_buff *skb;
874 int framesize;
875
876 rxdesc += idx;
877 rxbi += idx;
878
879 skb = rxbi->skb;
880 pci_dma_sync_single_for_cpu(jme->pdev,
881 rxbi->mapping,
882 rxbi->len,
883 PCI_DMA_FROMDEVICE);
884
885 if (unlikely(jme_make_new_rx_buf(jme, idx))) {
886 pci_dma_sync_single_for_device(jme->pdev,
887 rxbi->mapping,
888 rxbi->len,
889 PCI_DMA_FROMDEVICE);
890
891 ++(NET_STAT(jme).rx_dropped);
892 } else {
893 framesize = le16_to_cpu(rxdesc->descwb.framesize)
894 - RX_PREPAD_SIZE;
895
896 skb_reserve(skb, RX_PREPAD_SIZE);
897 skb_put(skb, framesize);
898 skb->protocol = eth_type_trans(skb, jme->dev);
899
900 if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
901 skb->ip_summed = CHECKSUM_UNNECESSARY;
902 else
903 skb->ip_summed = CHECKSUM_NONE;
904
905 if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
906 if (jme->vlgrp) {
907 jme->jme_vlan_rx(skb, jme->vlgrp,
908 le32_to_cpu(rxdesc->descwb.vlan));
909 NET_STAT(jme).rx_bytes += 4;
910 }
911 } else {
912 jme->jme_rx(skb);
913 }
914
915 if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
916 RXWBFLAG_DEST_MUL)
917 ++(NET_STAT(jme).multicast);
918
919 jme->dev->last_rx = jiffies;
920 NET_STAT(jme).rx_bytes += framesize;
921 ++(NET_STAT(jme).rx_packets);
922 }
923
924 jme_set_clean_rxdesc(jme, idx);
925
926}
927
928static int
929jme_process_receive(struct jme_adapter *jme, int limit)
930{
931 struct jme_ring *rxring = &(jme->rxring[0]);
932 struct rxdesc *rxdesc = rxring->desc;
933 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
934
935 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
936 goto out_inc;
937
938 if (unlikely(atomic_read(&jme->link_changing) != 1))
939 goto out_inc;
940
941 if (unlikely(!netif_carrier_ok(jme->dev)))
942 goto out_inc;
943
944 i = atomic_read(&rxring->next_to_clean);
945 while (limit-- > 0) {
946 rxdesc = rxring->desc;
947 rxdesc += i;
948
949 if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
950 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
951 goto out;
952
953 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
954
955 if (unlikely(desccnt > 1 ||
956 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
957
958 if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
959 ++(NET_STAT(jme).rx_crc_errors);
960 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
961 ++(NET_STAT(jme).rx_fifo_errors);
962 else
963 ++(NET_STAT(jme).rx_errors);
964
965 if (desccnt > 1)
966 limit -= desccnt - 1;
967
968 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
969 jme_set_clean_rxdesc(jme, j);
970 j = (j + 1) & (mask);
971 }
972
973 } else {
974 jme_alloc_and_feed_skb(jme, i);
975 }
976
977 i = (i + desccnt) & (mask);
978 }
979
980out:
981 atomic_set(&rxring->next_to_clean, i);
982
983out_inc:
984 atomic_inc(&jme->rx_cleaning);
985
986 return limit > 0 ? limit : 0;
987
988}
989
990static void
991jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
992{
993 if (likely(atmp == dpi->cur)) {
994 dpi->cnt = 0;
995 return;
996 }
997
998 if (dpi->attempt == atmp) {
999 ++(dpi->cnt);
1000 } else {
1001 dpi->attempt = atmp;
1002 dpi->cnt = 0;
1003 }
1004
1005}
1006
1007static void
1008jme_dynamic_pcc(struct jme_adapter *jme)
1009{
1010 register struct dynpcc_info *dpi = &(jme->dpi);
1011
1012 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1013 jme_attempt_pcc(dpi, PCC_P3);
1014 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1015 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1016 jme_attempt_pcc(dpi, PCC_P2);
1017 else
1018 jme_attempt_pcc(dpi, PCC_P1);
1019
1020 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1021 if (dpi->attempt < dpi->cur)
1022 tasklet_schedule(&jme->rxclean_task);
1023 jme_set_rx_pcc(jme, dpi->attempt);
1024 dpi->cur = dpi->attempt;
1025 dpi->cnt = 0;
1026 }
1027}
1028
1029static void
1030jme_start_pcc_timer(struct jme_adapter *jme)
1031{
1032 struct dynpcc_info *dpi = &(jme->dpi);
1033 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1034 dpi->last_pkts = NET_STAT(jme).rx_packets;
1035 dpi->intr_cnt = 0;
1036 jwrite32(jme, JME_TMCSR,
1037 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1038}
1039
1040static inline void
1041jme_stop_pcc_timer(struct jme_adapter *jme)
1042{
1043 jwrite32(jme, JME_TMCSR, 0);
1044}
1045
1046static void
1047jme_shutdown_nic(struct jme_adapter *jme)
1048{
1049 u32 phylink;
1050
1051 phylink = jme_linkstat_from_phy(jme);
1052
1053 if (!(phylink & PHY_LINK_UP)) {
1054 /*
1055 * Disable all interrupt before issue timer
1056 */
1057 jme_stop_irq(jme);
1058 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1059 }
1060}
1061
1062static void
1063jme_pcc_tasklet(unsigned long arg)
1064{
1065 struct jme_adapter *jme = (struct jme_adapter *)arg;
1066 struct net_device *netdev = jme->dev;
1067
1068 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1069 jme_shutdown_nic(jme);
1070 return;
1071 }
1072
1073 if (unlikely(!netif_carrier_ok(netdev) ||
1074 (atomic_read(&jme->link_changing) != 1)
1075 )) {
1076 jme_stop_pcc_timer(jme);
1077 return;
1078 }
1079
1080 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1081 jme_dynamic_pcc(jme);
1082
1083 jme_start_pcc_timer(jme);
1084}
1085
1086static inline void
1087jme_polling_mode(struct jme_adapter *jme)
1088{
1089 jme_set_rx_pcc(jme, PCC_OFF);
1090}
1091
1092static inline void
1093jme_interrupt_mode(struct jme_adapter *jme)
1094{
1095 jme_set_rx_pcc(jme, PCC_P1);
1096}
1097
1098static inline int
1099jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1100{
1101 u32 apmc;
1102 apmc = jread32(jme, JME_APMC);
1103 return apmc & JME_APMC_PSEUDO_HP_EN;
1104}
1105
1106static void
1107jme_start_shutdown_timer(struct jme_adapter *jme)
1108{
1109 u32 apmc;
1110
1111 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1112 apmc &= ~JME_APMC_EPIEN_CTRL;
1113 if (!no_extplug) {
1114 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1115 wmb();
1116 }
1117 jwrite32f(jme, JME_APMC, apmc);
1118
1119 jwrite32f(jme, JME_TIMER2, 0);
1120 set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1121 jwrite32(jme, JME_TMCSR,
1122 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1123}
1124
1125static void
1126jme_stop_shutdown_timer(struct jme_adapter *jme)
1127{
1128 u32 apmc;
1129
1130 jwrite32f(jme, JME_TMCSR, 0);
1131 jwrite32f(jme, JME_TIMER2, 0);
1132 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1133
1134 apmc = jread32(jme, JME_APMC);
1135 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1136 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1137 wmb();
1138 jwrite32f(jme, JME_APMC, apmc);
1139}
1140
1141static void
1142jme_link_change_tasklet(unsigned long arg)
1143{
1144 struct jme_adapter *jme = (struct jme_adapter *)arg;
1145 struct net_device *netdev = jme->dev;
1146 int rc;
1147
1148 while (!atomic_dec_and_test(&jme->link_changing)) {
1149 atomic_inc(&jme->link_changing);
1150 msg_intr(jme, "Get link change lock failed.\n");
1151 while (atomic_read(&jme->link_changing) != 1)
1152 msg_intr(jme, "Waiting link change lock.\n");
1153 }
1154
1155 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1156 goto out;
1157
1158 jme->old_mtu = netdev->mtu;
1159 netif_stop_queue(netdev);
1160 if (jme_pseudo_hotplug_enabled(jme))
1161 jme_stop_shutdown_timer(jme);
1162
1163 jme_stop_pcc_timer(jme);
1164 tasklet_disable(&jme->txclean_task);
1165 tasklet_disable(&jme->rxclean_task);
1166 tasklet_disable(&jme->rxempty_task);
1167
1168 if (netif_carrier_ok(netdev)) {
1169 jme_reset_ghc_speed(jme);
1170 jme_disable_rx_engine(jme);
1171 jme_disable_tx_engine(jme);
1172 jme_reset_mac_processor(jme);
1173 jme_free_rx_resources(jme);
1174 jme_free_tx_resources(jme);
1175
1176 if (test_bit(JME_FLAG_POLL, &jme->flags))
1177 jme_polling_mode(jme);
1178
1179 netif_carrier_off(netdev);
1180 }
1181
1182 jme_check_link(netdev, 0);
1183 if (netif_carrier_ok(netdev)) {
1184 rc = jme_setup_rx_resources(jme);
1185 if (rc) {
1186 jeprintk(jme->pdev, "Allocating resources for RX error"
1187 ", Device STOPPED!\n");
1188 goto out_enable_tasklet;
1189 }
1190
1191 rc = jme_setup_tx_resources(jme);
1192 if (rc) {
1193 jeprintk(jme->pdev, "Allocating resources for TX error"
1194 ", Device STOPPED!\n");
1195 goto err_out_free_rx_resources;
1196 }
1197
1198 jme_enable_rx_engine(jme);
1199 jme_enable_tx_engine(jme);
1200
1201 netif_start_queue(netdev);
1202
1203 if (test_bit(JME_FLAG_POLL, &jme->flags))
1204 jme_interrupt_mode(jme);
1205
1206 jme_start_pcc_timer(jme);
1207 } else if (jme_pseudo_hotplug_enabled(jme)) {
1208 jme_start_shutdown_timer(jme);
1209 }
1210
1211 goto out_enable_tasklet;
1212
1213err_out_free_rx_resources:
1214 jme_free_rx_resources(jme);
1215out_enable_tasklet:
1216 tasklet_enable(&jme->txclean_task);
1217 tasklet_hi_enable(&jme->rxclean_task);
1218 tasklet_hi_enable(&jme->rxempty_task);
1219out:
1220 atomic_inc(&jme->link_changing);
1221}
1222
1223static void
1224jme_rx_clean_tasklet(unsigned long arg)
1225{
1226 struct jme_adapter *jme = (struct jme_adapter *)arg;
1227 struct dynpcc_info *dpi = &(jme->dpi);
1228
1229 jme_process_receive(jme, jme->rx_ring_size);
1230 ++(dpi->intr_cnt);
1231
1232}
1233
1234static int
1235jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1236{
1237 struct jme_adapter *jme = jme_napi_priv(holder);
1238 struct net_device *netdev = jme->dev;
1239 int rest;
1240
1241 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1242
1243 while (atomic_read(&jme->rx_empty) > 0) {
1244 atomic_dec(&jme->rx_empty);
1245 ++(NET_STAT(jme).rx_dropped);
1246 jme_restart_rx_engine(jme);
1247 }
1248 atomic_inc(&jme->rx_empty);
1249
1250 if (rest) {
1251 JME_RX_COMPLETE(netdev, holder);
1252 jme_interrupt_mode(jme);
1253 }
1254
1255 JME_NAPI_WEIGHT_SET(budget, rest);
1256 return JME_NAPI_WEIGHT_VAL(budget) - rest;
1257}
1258
1259static void
1260jme_rx_empty_tasklet(unsigned long arg)
1261{
1262 struct jme_adapter *jme = (struct jme_adapter *)arg;
1263
1264 if (unlikely(atomic_read(&jme->link_changing) != 1))
1265 return;
1266
1267 if (unlikely(!netif_carrier_ok(jme->dev)))
1268 return;
1269
1270 msg_rx_status(jme, "RX Queue Full!\n");
1271
1272 jme_rx_clean_tasklet(arg);
1273
1274 while (atomic_read(&jme->rx_empty) > 0) {
1275 atomic_dec(&jme->rx_empty);
1276 ++(NET_STAT(jme).rx_dropped);
1277 jme_restart_rx_engine(jme);
1278 }
1279 atomic_inc(&jme->rx_empty);
1280}
1281
1282static void
1283jme_wake_queue_if_stopped(struct jme_adapter *jme)
1284{
1285 struct jme_ring *txring = jme->txring;
1286
1287 smp_wmb();
1288 if (unlikely(netif_queue_stopped(jme->dev) &&
1289 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1290 msg_tx_done(jme, "TX Queue Waked.\n");
1291 netif_wake_queue(jme->dev);
1292 }
1293
1294}
1295
1296static void
1297jme_tx_clean_tasklet(unsigned long arg)
1298{
1299 struct jme_adapter *jme = (struct jme_adapter *)arg;
1300 struct jme_ring *txring = &(jme->txring[0]);
1301 struct txdesc *txdesc = txring->desc;
1302 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1303 int i, j, cnt = 0, max, err, mask;
1304
1305 tx_dbg(jme, "Into txclean.\n");
1306
1307 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1308 goto out;
1309
1310 if (unlikely(atomic_read(&jme->link_changing) != 1))
1311 goto out;
1312
1313 if (unlikely(!netif_carrier_ok(jme->dev)))
1314 goto out;
1315
1316 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1317 mask = jme->tx_ring_mask;
1318
1319 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1320
1321 ctxbi = txbi + i;
1322
1323 if (likely(ctxbi->skb &&
1324 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1325
1326 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1327 i, ctxbi->nr_desc, jiffies);
1328
1329 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1330
1331 for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1332 ttxbi = txbi + ((i + j) & (mask));
1333 txdesc[(i + j) & (mask)].dw[0] = 0;
1334
1335 pci_unmap_page(jme->pdev,
1336 ttxbi->mapping,
1337 ttxbi->len,
1338 PCI_DMA_TODEVICE);
1339
1340 ttxbi->mapping = 0;
1341 ttxbi->len = 0;
1342 }
1343
1344 dev_kfree_skb(ctxbi->skb);
1345
1346 cnt += ctxbi->nr_desc;
1347
1348 if (unlikely(err)) {
1349 ++(NET_STAT(jme).tx_carrier_errors);
1350 } else {
1351 ++(NET_STAT(jme).tx_packets);
1352 NET_STAT(jme).tx_bytes += ctxbi->len;
1353 }
1354
1355 ctxbi->skb = NULL;
1356 ctxbi->len = 0;
1357 ctxbi->start_xmit = 0;
1358
1359 } else {
1360 break;
1361 }
1362
1363 i = (i + ctxbi->nr_desc) & mask;
1364
1365 ctxbi->nr_desc = 0;
1366 }
1367
1368 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
1369 atomic_set(&txring->next_to_clean, i);
1370 atomic_add(cnt, &txring->nr_free);
1371
1372 jme_wake_queue_if_stopped(jme);
1373
1374out:
1375 atomic_inc(&jme->tx_cleaning);
1376}
1377
1378static void
1379jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1380{
1381 /*
1382 * Disable interrupt
1383 */
1384 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1385
1386 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1387 /*
1388 * Link change event is critical
1389 * all other events are ignored
1390 */
1391 jwrite32(jme, JME_IEVE, intrstat);
1392 tasklet_schedule(&jme->linkch_task);
1393 goto out_reenable;
1394 }
1395
1396 if (intrstat & INTR_TMINTR) {
1397 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1398 tasklet_schedule(&jme->pcc_task);
1399 }
1400
1401 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1402 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1403 tasklet_schedule(&jme->txclean_task);
1404 }
1405
1406 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1407 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1408 INTR_PCCRX0 |
1409 INTR_RX0EMP)) |
1410 INTR_RX0);
1411 }
1412
1413 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1414 if (intrstat & INTR_RX0EMP)
1415 atomic_inc(&jme->rx_empty);
1416
1417 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1418 if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1419 jme_polling_mode(jme);
1420 JME_RX_SCHEDULE(jme);
1421 }
1422 }
1423 } else {
1424 if (intrstat & INTR_RX0EMP) {
1425 atomic_inc(&jme->rx_empty);
1426 tasklet_hi_schedule(&jme->rxempty_task);
1427 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1428 tasklet_hi_schedule(&jme->rxclean_task);
1429 }
1430 }
1431
1432out_reenable:
1433 /*
1434 * Re-enable interrupt
1435 */
1436 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1437}
1438
1439static irqreturn_t
1440jme_intr(int irq, void *dev_id)
1441{
1442 struct net_device *netdev = dev_id;
1443 struct jme_adapter *jme = netdev_priv(netdev);
1444 u32 intrstat;
1445
1446 intrstat = jread32(jme, JME_IEVE);
1447
1448 /*
1449 * Check if it's really an interrupt for us
1450 */
1451 if (unlikely(intrstat == 0))
1452 return IRQ_NONE;
1453
1454 /*
1455 * Check if the device still exist
1456 */
1457 if (unlikely(intrstat == ~((typeof(intrstat))0)))
1458 return IRQ_NONE;
1459
1460 jme_intr_msi(jme, intrstat);
1461
1462 return IRQ_HANDLED;
1463}
1464
1465static irqreturn_t
1466jme_msi(int irq, void *dev_id)
1467{
1468 struct net_device *netdev = dev_id;
1469 struct jme_adapter *jme = netdev_priv(netdev);
1470 u32 intrstat;
1471
1472 pci_dma_sync_single_for_cpu(jme->pdev,
1473 jme->shadow_dma,
1474 sizeof(u32) * SHADOW_REG_NR,
1475 PCI_DMA_FROMDEVICE);
1476 intrstat = jme->shadow_regs[SHADOW_IEVE];
1477 jme->shadow_regs[SHADOW_IEVE] = 0;
1478
1479 jme_intr_msi(jme, intrstat);
1480
1481 return IRQ_HANDLED;
1482}
1483
1484static void
1485jme_reset_link(struct jme_adapter *jme)
1486{
1487 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1488}
1489
1490static void
1491jme_restart_an(struct jme_adapter *jme)
1492{
1493 u32 bmcr;
1494
1495 spin_lock_bh(&jme->phy_lock);
1496 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1497 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1498 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1499 spin_unlock_bh(&jme->phy_lock);
1500}
1501
1502static int
1503jme_request_irq(struct jme_adapter *jme)
1504{
1505 int rc;
1506 struct net_device *netdev = jme->dev;
1507 irq_handler_t handler = jme_intr;
1508 int irq_flags = IRQF_SHARED;
1509
1510 if (!pci_enable_msi(jme->pdev)) {
1511 set_bit(JME_FLAG_MSI, &jme->flags);
1512 handler = jme_msi;
1513 irq_flags = 0;
1514 }
1515
1516 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1517 netdev);
1518 if (rc) {
1519 jeprintk(jme->pdev,
1520 "Unable to request %s interrupt (return: %d)\n",
1521 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1522 rc);
1523
1524 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1525 pci_disable_msi(jme->pdev);
1526 clear_bit(JME_FLAG_MSI, &jme->flags);
1527 }
1528 } else {
1529 netdev->irq = jme->pdev->irq;
1530 }
1531
1532 return rc;
1533}
1534
1535static void
1536jme_free_irq(struct jme_adapter *jme)
1537{
1538 free_irq(jme->pdev->irq, jme->dev);
1539 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1540 pci_disable_msi(jme->pdev);
1541 clear_bit(JME_FLAG_MSI, &jme->flags);
1542 jme->dev->irq = jme->pdev->irq;
1543 }
1544}
1545
1546static int
1547jme_open(struct net_device *netdev)
1548{
1549 struct jme_adapter *jme = netdev_priv(netdev);
1550 int rc;
1551
1552 jme_clear_pm(jme);
1553 JME_NAPI_ENABLE(jme);
1554
1555 tasklet_enable(&jme->txclean_task);
1556 tasklet_hi_enable(&jme->rxclean_task);
1557 tasklet_hi_enable(&jme->rxempty_task);
1558
1559 rc = jme_request_irq(jme);
1560 if (rc)
1561 goto err_out;
1562
1563 jme_enable_shadow(jme);
1564 jme_start_irq(jme);
1565
1566 if (test_bit(JME_FLAG_SSET, &jme->flags))
1567 jme_set_settings(netdev, &jme->old_ecmd);
1568 else
1569 jme_reset_phy_processor(jme);
1570
1571 jme_reset_link(jme);
1572
1573 return 0;
1574
1575err_out:
1576 netif_stop_queue(netdev);
1577 netif_carrier_off(netdev);
1578 return rc;
1579}
1580
1581static void
1582jme_set_100m_half(struct jme_adapter *jme)
1583{
1584 u32 bmcr, tmp;
1585
1586 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1587 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1588 BMCR_SPEED1000 | BMCR_FULLDPLX);
1589 tmp |= BMCR_SPEED100;
1590
1591 if (bmcr != tmp)
1592 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1593
1594 if (jme->fpgaver)
1595 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1596 else
1597 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1598}
1599
1600#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1601static void
1602jme_wait_link(struct jme_adapter *jme)
1603{
1604 u32 phylink, to = JME_WAIT_LINK_TIME;
1605
1606 mdelay(1000);
1607 phylink = jme_linkstat_from_phy(jme);
1608 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1609 mdelay(10);
1610 phylink = jme_linkstat_from_phy(jme);
1611 }
1612}
1613
1614static inline void
1615jme_phy_off(struct jme_adapter *jme)
1616{
1617 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1618}
1619
1620static int
1621jme_close(struct net_device *netdev)
1622{
1623 struct jme_adapter *jme = netdev_priv(netdev);
1624
1625 netif_stop_queue(netdev);
1626 netif_carrier_off(netdev);
1627
1628 jme_stop_irq(jme);
1629 jme_disable_shadow(jme);
1630 jme_free_irq(jme);
1631
1632 JME_NAPI_DISABLE(jme);
1633
1634 tasklet_kill(&jme->linkch_task);
1635 tasklet_kill(&jme->txclean_task);
1636 tasklet_kill(&jme->rxclean_task);
1637 tasklet_kill(&jme->rxempty_task);
1638
1639 jme_reset_ghc_speed(jme);
1640 jme_disable_rx_engine(jme);
1641 jme_disable_tx_engine(jme);
1642 jme_reset_mac_processor(jme);
1643 jme_free_rx_resources(jme);
1644 jme_free_tx_resources(jme);
1645 jme->phylink = 0;
1646 jme_phy_off(jme);
1647
1648 return 0;
1649}
1650
1651static int
1652jme_alloc_txdesc(struct jme_adapter *jme,
1653 struct sk_buff *skb)
1654{
1655 struct jme_ring *txring = jme->txring;
1656 int idx, nr_alloc, mask = jme->tx_ring_mask;
1657
1658 idx = txring->next_to_use;
1659 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1660
1661 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1662 return -1;
1663
1664 atomic_sub(nr_alloc, &txring->nr_free);
1665
1666 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1667
1668 return idx;
1669}
1670
1671static void
1672jme_fill_tx_map(struct pci_dev *pdev,
1673 struct txdesc *txdesc,
1674 struct jme_buffer_info *txbi,
1675 struct page *page,
1676 u32 page_offset,
1677 u32 len,
1678 u8 hidma)
1679{
1680 dma_addr_t dmaaddr;
1681
1682 dmaaddr = pci_map_page(pdev,
1683 page,
1684 page_offset,
1685 len,
1686 PCI_DMA_TODEVICE);
1687
1688 pci_dma_sync_single_for_device(pdev,
1689 dmaaddr,
1690 len,
1691 PCI_DMA_TODEVICE);
1692
1693 txdesc->dw[0] = 0;
1694 txdesc->dw[1] = 0;
1695 txdesc->desc2.flags = TXFLAG_OWN;
1696 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
1697 txdesc->desc2.datalen = cpu_to_le16(len);
1698 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
1699 txdesc->desc2.bufaddrl = cpu_to_le32(
1700 (__u64)dmaaddr & 0xFFFFFFFFUL);
1701
1702 txbi->mapping = dmaaddr;
1703 txbi->len = len;
1704}
1705
1706static void
1707jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1708{
1709 struct jme_ring *txring = jme->txring;
1710 struct txdesc *txdesc = txring->desc, *ctxdesc;
1711 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1712 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1713 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1714 int mask = jme->tx_ring_mask;
1715 struct skb_frag_struct *frag;
1716 u32 len;
1717
1718 for (i = 0 ; i < nr_frags ; ++i) {
1719 frag = &skb_shinfo(skb)->frags[i];
1720 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1721 ctxbi = txbi + ((idx + i + 2) & (mask));
1722
1723 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1724 frag->page_offset, frag->size, hidma);
1725 }
1726
1727 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1728 ctxdesc = txdesc + ((idx + 1) & (mask));
1729 ctxbi = txbi + ((idx + 1) & (mask));
1730 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1731 offset_in_page(skb->data), len, hidma);
1732
1733}
1734
1735static int
1736jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1737{
1738 if (unlikely(skb_shinfo(skb)->gso_size &&
1739 skb_header_cloned(skb) &&
1740 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1741 dev_kfree_skb(skb);
1742 return -1;
1743 }
1744
1745 return 0;
1746}
1747
1748static int
1749jme_tx_tso(struct sk_buff *skb,
1750 u16 *mss, u8 *flags)
1751{
1752 *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
1753 if (*mss) {
1754 *flags |= TXFLAG_LSEN;
1755
1756 if (skb->protocol == htons(ETH_P_IP)) {
1757 struct iphdr *iph = ip_hdr(skb);
1758
1759 iph->check = 0;
1760 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1761 iph->daddr, 0,
1762 IPPROTO_TCP,
1763 0);
1764 } else {
1765 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1766
1767 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1768 &ip6h->daddr, 0,
1769 IPPROTO_TCP,
1770 0);
1771 }
1772
1773 return 0;
1774 }
1775
1776 return 1;
1777}
1778
1779static void
1780jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1781{
1782 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1783 u8 ip_proto;
1784
1785 switch (skb->protocol) {
1786 case htons(ETH_P_IP):
1787 ip_proto = ip_hdr(skb)->protocol;
1788 break;
1789 case htons(ETH_P_IPV6):
1790 ip_proto = ipv6_hdr(skb)->nexthdr;
1791 break;
1792 default:
1793 ip_proto = 0;
1794 break;
1795 }
1796
1797 switch (ip_proto) {
1798 case IPPROTO_TCP:
1799 *flags |= TXFLAG_TCPCS;
1800 break;
1801 case IPPROTO_UDP:
1802 *flags |= TXFLAG_UDPCS;
1803 break;
1804 default:
1805 msg_tx_err(jme, "Error upper layer protocol.\n");
1806 break;
1807 }
1808 }
1809}
1810
1811static inline void
1812jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
1813{
1814 if (vlan_tx_tag_present(skb)) {
1815 *flags |= TXFLAG_TAGON;
1816 *vlan = vlan_tx_tag_get(skb);
1817 }
1818}
1819
1820static int
1821jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1822{
1823 struct jme_ring *txring = jme->txring;
1824 struct txdesc *txdesc;
1825 struct jme_buffer_info *txbi;
1826 u8 flags;
1827
1828 txdesc = (struct txdesc *)txring->desc + idx;
1829 txbi = txring->bufinf + idx;
1830
1831 txdesc->dw[0] = 0;
1832 txdesc->dw[1] = 0;
1833 txdesc->dw[2] = 0;
1834 txdesc->dw[3] = 0;
1835 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1836 /*
1837 * Set OWN bit at final.
1838 * When kernel transmit faster than NIC.
1839 * And NIC trying to send this descriptor before we tell
1840 * it to start sending this TX queue.
1841 * Other fields are already filled correctly.
1842 */
1843 wmb();
1844 flags = TXFLAG_OWN | TXFLAG_INT;
1845 /*
1846 * Set checksum flags while not tso
1847 */
1848 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1849 jme_tx_csum(jme, skb, &flags);
1850 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1851 txdesc->desc1.flags = flags;
1852 /*
1853 * Set tx buffer info after telling NIC to send
1854 * For better tx_clean timing
1855 */
1856 wmb();
1857 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1858 txbi->skb = skb;
1859 txbi->len = skb->len;
1860 txbi->start_xmit = jiffies;
1861 if (!txbi->start_xmit)
1862 txbi->start_xmit = (0UL-1);
1863
1864 return 0;
1865}
1866
1867static void
1868jme_stop_queue_if_full(struct jme_adapter *jme)
1869{
1870 struct jme_ring *txring = jme->txring;
1871 struct jme_buffer_info *txbi = txring->bufinf;
1872 int idx = atomic_read(&txring->next_to_clean);
1873
1874 txbi += idx;
1875
1876 smp_wmb();
1877 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1878 netif_stop_queue(jme->dev);
1879 msg_tx_queued(jme, "TX Queue Paused.\n");
1880 smp_wmb();
1881 if (atomic_read(&txring->nr_free)
1882 >= (jme->tx_wake_threshold)) {
1883 netif_wake_queue(jme->dev);
1884 msg_tx_queued(jme, "TX Queue Fast Waked.\n");
1885 }
1886 }
1887
1888 if (unlikely(txbi->start_xmit &&
1889 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1890 txbi->skb)) {
1891 netif_stop_queue(jme->dev);
1892 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
1893 }
1894}
1895
1896/*
1897 * This function is already protected by netif_tx_lock()
1898 */
1899
1900static int
1901jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1902{
1903 struct jme_adapter *jme = netdev_priv(netdev);
1904 int idx;
1905
1906 if (unlikely(jme_expand_header(jme, skb))) {
1907 ++(NET_STAT(jme).tx_dropped);
1908 return NETDEV_TX_OK;
1909 }
1910
1911 idx = jme_alloc_txdesc(jme, skb);
1912
1913 if (unlikely(idx < 0)) {
1914 netif_stop_queue(netdev);
1915 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
1916
1917 return NETDEV_TX_BUSY;
1918 }
1919
1920 jme_map_tx_skb(jme, skb, idx);
1921 jme_fill_first_tx_desc(jme, skb, idx);
1922
1923 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1924 TXCS_SELECT_QUEUE0 |
1925 TXCS_QUEUE0S |
1926 TXCS_ENABLE);
1927 netdev->trans_start = jiffies;
1928
1929 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
1930 skb_shinfo(skb)->nr_frags + 2,
1931 jiffies);
1932 jme_stop_queue_if_full(jme);
1933
1934 return NETDEV_TX_OK;
1935}
1936
1937static int
1938jme_set_macaddr(struct net_device *netdev, void *p)
1939{
1940 struct jme_adapter *jme = netdev_priv(netdev);
1941 struct sockaddr *addr = p;
1942 u32 val;
1943
1944 if (netif_running(netdev))
1945 return -EBUSY;
1946
1947 spin_lock_bh(&jme->macaddr_lock);
1948 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1949
1950 val = (addr->sa_data[3] & 0xff) << 24 |
1951 (addr->sa_data[2] & 0xff) << 16 |
1952 (addr->sa_data[1] & 0xff) << 8 |
1953 (addr->sa_data[0] & 0xff);
1954 jwrite32(jme, JME_RXUMA_LO, val);
1955 val = (addr->sa_data[5] & 0xff) << 8 |
1956 (addr->sa_data[4] & 0xff);
1957 jwrite32(jme, JME_RXUMA_HI, val);
1958 spin_unlock_bh(&jme->macaddr_lock);
1959
1960 return 0;
1961}
1962
1963static void
1964jme_set_multi(struct net_device *netdev)
1965{
1966 struct jme_adapter *jme = netdev_priv(netdev);
1967 u32 mc_hash[2] = {};
1968 int i;
1969
1970 spin_lock_bh(&jme->rxmcs_lock);
1971
1972 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1973
1974 if (netdev->flags & IFF_PROMISC) {
1975 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1976 } else if (netdev->flags & IFF_ALLMULTI) {
1977 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1978 } else if (netdev->flags & IFF_MULTICAST) {
1979 struct dev_mc_list *mclist;
1980 int bit_nr;
1981
1982 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1983 for (i = 0, mclist = netdev->mc_list;
1984 mclist && i < netdev->mc_count;
1985 ++i, mclist = mclist->next) {
1986
1987 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1988 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1989 }
1990
1991 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1992 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1993 }
1994
1995 wmb();
1996 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1997
1998 spin_unlock_bh(&jme->rxmcs_lock);
1999}
2000
2001static int
2002jme_change_mtu(struct net_device *netdev, int new_mtu)
2003{
2004 struct jme_adapter *jme = netdev_priv(netdev);
2005
2006 if (new_mtu == jme->old_mtu)
2007 return 0;
2008
2009 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2010 ((new_mtu) < IPV6_MIN_MTU))
2011 return -EINVAL;
2012
2013 if (new_mtu > 4000) {
2014 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2015 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2016 jme_restart_rx_engine(jme);
2017 } else {
2018 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2019 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2020 jme_restart_rx_engine(jme);
2021 }
2022
2023 if (new_mtu > 1900) {
2024 netdev->features &= ~(NETIF_F_HW_CSUM |
2025 NETIF_F_TSO |
2026 NETIF_F_TSO6);
2027 } else {
2028 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2029 netdev->features |= NETIF_F_HW_CSUM;
2030 if (test_bit(JME_FLAG_TSO, &jme->flags))
2031 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2032 }
2033
2034 netdev->mtu = new_mtu;
2035 jme_reset_link(jme);
2036
2037 return 0;
2038}
2039
2040static void
2041jme_tx_timeout(struct net_device *netdev)
2042{
2043 struct jme_adapter *jme = netdev_priv(netdev);
2044
2045 jme->phylink = 0;
2046 jme_reset_phy_processor(jme);
2047 if (test_bit(JME_FLAG_SSET, &jme->flags))
2048 jme_set_settings(netdev, &jme->old_ecmd);
2049
2050 /*
2051 * Force to Reset the link again
2052 */
2053 jme_reset_link(jme);
2054}
2055
2056static void
2057jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2058{
2059 struct jme_adapter *jme = netdev_priv(netdev);
2060
2061 jme->vlgrp = grp;
2062}
2063
2064static void
2065jme_get_drvinfo(struct net_device *netdev,
2066 struct ethtool_drvinfo *info)
2067{
2068 struct jme_adapter *jme = netdev_priv(netdev);
2069
2070 strcpy(info->driver, DRV_NAME);
2071 strcpy(info->version, DRV_VERSION);
2072 strcpy(info->bus_info, pci_name(jme->pdev));
2073}
2074
2075static int
2076jme_get_regs_len(struct net_device *netdev)
2077{
2078 return JME_REG_LEN;
2079}
2080
2081static void
2082mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2083{
2084 int i;
2085
2086 for (i = 0 ; i < len ; i += 4)
2087 p[i >> 2] = jread32(jme, reg + i);
2088}
2089
2090static void
2091mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2092{
2093 int i;
2094 u16 *p16 = (u16 *)p;
2095
2096 for (i = 0 ; i < reg_nr ; ++i)
2097 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2098}
2099
2100static void
2101jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2102{
2103 struct jme_adapter *jme = netdev_priv(netdev);
2104 u32 *p32 = (u32 *)p;
2105
2106 memset(p, 0xFF, JME_REG_LEN);
2107
2108 regs->version = 1;
2109 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2110
2111 p32 += 0x100 >> 2;
2112 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2113
2114 p32 += 0x100 >> 2;
2115 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2116
2117 p32 += 0x100 >> 2;
2118 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2119
2120 p32 += 0x100 >> 2;
2121 mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2122}
2123
2124static int
2125jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2126{
2127 struct jme_adapter *jme = netdev_priv(netdev);
2128
2129 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2130 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2131
2132 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2133 ecmd->use_adaptive_rx_coalesce = false;
2134 ecmd->rx_coalesce_usecs = 0;
2135 ecmd->rx_max_coalesced_frames = 0;
2136 return 0;
2137 }
2138
2139 ecmd->use_adaptive_rx_coalesce = true;
2140
2141 switch (jme->dpi.cur) {
2142 case PCC_P1:
2143 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2144 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2145 break;
2146 case PCC_P2:
2147 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2148 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2149 break;
2150 case PCC_P3:
2151 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2152 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2153 break;
2154 default:
2155 break;
2156 }
2157
2158 return 0;
2159}
2160
2161static int
2162jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2163{
2164 struct jme_adapter *jme = netdev_priv(netdev);
2165 struct dynpcc_info *dpi = &(jme->dpi);
2166
2167 if (netif_running(netdev))
2168 return -EBUSY;
2169
2170 if (ecmd->use_adaptive_rx_coalesce
2171 && test_bit(JME_FLAG_POLL, &jme->flags)) {
2172 clear_bit(JME_FLAG_POLL, &jme->flags);
2173 jme->jme_rx = netif_rx;
2174 jme->jme_vlan_rx = vlan_hwaccel_rx;
2175 dpi->cur = PCC_P1;
2176 dpi->attempt = PCC_P1;
2177 dpi->cnt = 0;
2178 jme_set_rx_pcc(jme, PCC_P1);
2179 jme_interrupt_mode(jme);
2180 } else if (!(ecmd->use_adaptive_rx_coalesce)
2181 && !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2182 set_bit(JME_FLAG_POLL, &jme->flags);
2183 jme->jme_rx = netif_receive_skb;
2184 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2185 jme_interrupt_mode(jme);
2186 }
2187
2188 return 0;
2189}
2190
2191static void
2192jme_get_pauseparam(struct net_device *netdev,
2193 struct ethtool_pauseparam *ecmd)
2194{
2195 struct jme_adapter *jme = netdev_priv(netdev);
2196 u32 val;
2197
2198 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2199 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2200
2201 spin_lock_bh(&jme->phy_lock);
2202 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2203 spin_unlock_bh(&jme->phy_lock);
2204
2205 ecmd->autoneg =
2206 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2207}
2208
2209static int
2210jme_set_pauseparam(struct net_device *netdev,
2211 struct ethtool_pauseparam *ecmd)
2212{
2213 struct jme_adapter *jme = netdev_priv(netdev);
2214 u32 val;
2215
2216 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2217 (ecmd->tx_pause != 0)) {
2218
2219 if (ecmd->tx_pause)
2220 jme->reg_txpfc |= TXPFC_PF_EN;
2221 else
2222 jme->reg_txpfc &= ~TXPFC_PF_EN;
2223
2224 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2225 }
2226
2227 spin_lock_bh(&jme->rxmcs_lock);
2228 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2229 (ecmd->rx_pause != 0)) {
2230
2231 if (ecmd->rx_pause)
2232 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2233 else
2234 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2235
2236 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2237 }
2238 spin_unlock_bh(&jme->rxmcs_lock);
2239
2240 spin_lock_bh(&jme->phy_lock);
2241 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2242 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2243 (ecmd->autoneg != 0)) {
2244
2245 if (ecmd->autoneg)
2246 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2247 else
2248 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2249
2250 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2251 MII_ADVERTISE, val);
2252 }
2253 spin_unlock_bh(&jme->phy_lock);
2254
2255 return 0;
2256}
2257
2258static void
2259jme_get_wol(struct net_device *netdev,
2260 struct ethtool_wolinfo *wol)
2261{
2262 struct jme_adapter *jme = netdev_priv(netdev);
2263
2264 wol->supported = WAKE_MAGIC | WAKE_PHY;
2265
2266 wol->wolopts = 0;
2267
2268 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2269 wol->wolopts |= WAKE_PHY;
2270
2271 if (jme->reg_pmcs & PMCS_MFEN)
2272 wol->wolopts |= WAKE_MAGIC;
2273
2274}
2275
2276static int
2277jme_set_wol(struct net_device *netdev,
2278 struct ethtool_wolinfo *wol)
2279{
2280 struct jme_adapter *jme = netdev_priv(netdev);
2281
2282 if (wol->wolopts & (WAKE_MAGICSECURE |
2283 WAKE_UCAST |
2284 WAKE_MCAST |
2285 WAKE_BCAST |
2286 WAKE_ARP))
2287 return -EOPNOTSUPP;
2288
2289 jme->reg_pmcs = 0;
2290
2291 if (wol->wolopts & WAKE_PHY)
2292 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2293
2294 if (wol->wolopts & WAKE_MAGIC)
2295 jme->reg_pmcs |= PMCS_MFEN;
2296
2297 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2298
2299 return 0;
2300}
2301
2302static int
2303jme_get_settings(struct net_device *netdev,
2304 struct ethtool_cmd *ecmd)
2305{
2306 struct jme_adapter *jme = netdev_priv(netdev);
2307 int rc;
2308
2309 spin_lock_bh(&jme->phy_lock);
2310 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2311 spin_unlock_bh(&jme->phy_lock);
2312 return rc;
2313}
2314
2315static int
2316jme_set_settings(struct net_device *netdev,
2317 struct ethtool_cmd *ecmd)
2318{
2319 struct jme_adapter *jme = netdev_priv(netdev);
2320 int rc, fdc = 0;
2321
2322 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2323 return -EINVAL;
2324
2325 if (jme->mii_if.force_media &&
2326 ecmd->autoneg != AUTONEG_ENABLE &&
2327 (jme->mii_if.full_duplex != ecmd->duplex))
2328 fdc = 1;
2329
2330 spin_lock_bh(&jme->phy_lock);
2331 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2332 spin_unlock_bh(&jme->phy_lock);
2333
2334 if (!rc && fdc)
2335 jme_reset_link(jme);
2336
2337 if (!rc) {
2338 set_bit(JME_FLAG_SSET, &jme->flags);
2339 jme->old_ecmd = *ecmd;
2340 }
2341
2342 return rc;
2343}
2344
2345static u32
2346jme_get_link(struct net_device *netdev)
2347{
2348 struct jme_adapter *jme = netdev_priv(netdev);
2349 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2350}
2351
2352static u32
2353jme_get_msglevel(struct net_device *netdev)
2354{
2355 struct jme_adapter *jme = netdev_priv(netdev);
2356 return jme->msg_enable;
2357}
2358
2359static void
2360jme_set_msglevel(struct net_device *netdev, u32 value)
2361{
2362 struct jme_adapter *jme = netdev_priv(netdev);
2363 jme->msg_enable = value;
2364}
2365
2366static u32
2367jme_get_rx_csum(struct net_device *netdev)
2368{
2369 struct jme_adapter *jme = netdev_priv(netdev);
2370 return jme->reg_rxmcs & RXMCS_CHECKSUM;
2371}
2372
2373static int
2374jme_set_rx_csum(struct net_device *netdev, u32 on)
2375{
2376 struct jme_adapter *jme = netdev_priv(netdev);
2377
2378 spin_lock_bh(&jme->rxmcs_lock);
2379 if (on)
2380 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2381 else
2382 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2383 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2384 spin_unlock_bh(&jme->rxmcs_lock);
2385
2386 return 0;
2387}
2388
2389static int
2390jme_set_tx_csum(struct net_device *netdev, u32 on)
2391{
2392 struct jme_adapter *jme = netdev_priv(netdev);
2393
2394 if (on) {
2395 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2396 if (netdev->mtu <= 1900)
2397 netdev->features |= NETIF_F_HW_CSUM;
2398 } else {
2399 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2400 netdev->features &= ~NETIF_F_HW_CSUM;
2401 }
2402
2403 return 0;
2404}
2405
2406static int
2407jme_set_tso(struct net_device *netdev, u32 on)
2408{
2409 struct jme_adapter *jme = netdev_priv(netdev);
2410
2411 if (on) {
2412 set_bit(JME_FLAG_TSO, &jme->flags);
2413 if (netdev->mtu <= 1900)
2414 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2415 } else {
2416 clear_bit(JME_FLAG_TSO, &jme->flags);
2417 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2418 }
2419
2420 return 0;
2421}
2422
2423static int
2424jme_nway_reset(struct net_device *netdev)
2425{
2426 struct jme_adapter *jme = netdev_priv(netdev);
2427 jme_restart_an(jme);
2428 return 0;
2429}
2430
2431static u8
2432jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2433{
2434 u32 val;
2435 int to;
2436
2437 val = jread32(jme, JME_SMBCSR);
2438 to = JME_SMB_BUSY_TIMEOUT;
2439 while ((val & SMBCSR_BUSY) && --to) {
2440 msleep(1);
2441 val = jread32(jme, JME_SMBCSR);
2442 }
2443 if (!to) {
2444 msg_hw(jme, "SMB Bus Busy.\n");
2445 return 0xFF;
2446 }
2447
2448 jwrite32(jme, JME_SMBINTF,
2449 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2450 SMBINTF_HWRWN_READ |
2451 SMBINTF_HWCMD);
2452
2453 val = jread32(jme, JME_SMBINTF);
2454 to = JME_SMB_BUSY_TIMEOUT;
2455 while ((val & SMBINTF_HWCMD) && --to) {
2456 msleep(1);
2457 val = jread32(jme, JME_SMBINTF);
2458 }
2459 if (!to) {
2460 msg_hw(jme, "SMB Bus Busy.\n");
2461 return 0xFF;
2462 }
2463
2464 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2465}
2466
2467static void
2468jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2469{
2470 u32 val;
2471 int to;
2472
2473 val = jread32(jme, JME_SMBCSR);
2474 to = JME_SMB_BUSY_TIMEOUT;
2475 while ((val & SMBCSR_BUSY) && --to) {
2476 msleep(1);
2477 val = jread32(jme, JME_SMBCSR);
2478 }
2479 if (!to) {
2480 msg_hw(jme, "SMB Bus Busy.\n");
2481 return;
2482 }
2483
2484 jwrite32(jme, JME_SMBINTF,
2485 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2486 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2487 SMBINTF_HWRWN_WRITE |
2488 SMBINTF_HWCMD);
2489
2490 val = jread32(jme, JME_SMBINTF);
2491 to = JME_SMB_BUSY_TIMEOUT;
2492 while ((val & SMBINTF_HWCMD) && --to) {
2493 msleep(1);
2494 val = jread32(jme, JME_SMBINTF);
2495 }
2496 if (!to) {
2497 msg_hw(jme, "SMB Bus Busy.\n");
2498 return;
2499 }
2500
2501 mdelay(2);
2502}
2503
2504static int
2505jme_get_eeprom_len(struct net_device *netdev)
2506{
2507 struct jme_adapter *jme = netdev_priv(netdev);
2508 u32 val;
2509 val = jread32(jme, JME_SMBCSR);
2510 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2511}
2512
2513static int
2514jme_get_eeprom(struct net_device *netdev,
2515 struct ethtool_eeprom *eeprom, u8 *data)
2516{
2517 struct jme_adapter *jme = netdev_priv(netdev);
2518 int i, offset = eeprom->offset, len = eeprom->len;
2519
2520 /*
2521 * ethtool will check the boundary for us
2522 */
2523 eeprom->magic = JME_EEPROM_MAGIC;
2524 for (i = 0 ; i < len ; ++i)
2525 data[i] = jme_smb_read(jme, i + offset);
2526
2527 return 0;
2528}
2529
2530static int
2531jme_set_eeprom(struct net_device *netdev,
2532 struct ethtool_eeprom *eeprom, u8 *data)
2533{
2534 struct jme_adapter *jme = netdev_priv(netdev);
2535 int i, offset = eeprom->offset, len = eeprom->len;
2536
2537 if (eeprom->magic != JME_EEPROM_MAGIC)
2538 return -EINVAL;
2539
2540 /*
2541 * ethtool will check the boundary for us
2542 */
2543 for (i = 0 ; i < len ; ++i)
2544 jme_smb_write(jme, i + offset, data[i]);
2545
2546 return 0;
2547}
2548
2549static const struct ethtool_ops jme_ethtool_ops = {
2550 .get_drvinfo = jme_get_drvinfo,
2551 .get_regs_len = jme_get_regs_len,
2552 .get_regs = jme_get_regs,
2553 .get_coalesce = jme_get_coalesce,
2554 .set_coalesce = jme_set_coalesce,
2555 .get_pauseparam = jme_get_pauseparam,
2556 .set_pauseparam = jme_set_pauseparam,
2557 .get_wol = jme_get_wol,
2558 .set_wol = jme_set_wol,
2559 .get_settings = jme_get_settings,
2560 .set_settings = jme_set_settings,
2561 .get_link = jme_get_link,
2562 .get_msglevel = jme_get_msglevel,
2563 .set_msglevel = jme_set_msglevel,
2564 .get_rx_csum = jme_get_rx_csum,
2565 .set_rx_csum = jme_set_rx_csum,
2566 .set_tx_csum = jme_set_tx_csum,
2567 .set_tso = jme_set_tso,
2568 .set_sg = ethtool_op_set_sg,
2569 .nway_reset = jme_nway_reset,
2570 .get_eeprom_len = jme_get_eeprom_len,
2571 .get_eeprom = jme_get_eeprom,
2572 .set_eeprom = jme_set_eeprom,
2573};
2574
2575static int
2576jme_pci_dma64(struct pci_dev *pdev)
2577{
2578 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2579 if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2580 return 1;
2581
2582 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2583 if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
2584 return 1;
2585
2586 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2587 if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
2588 return 0;
2589
2590 return -1;
2591}
2592
2593static inline void
2594jme_phy_init(struct jme_adapter *jme)
2595{
2596 u16 reg26;
2597
2598 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2599 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2600}
2601
2602static inline void
2603jme_check_hw_ver(struct jme_adapter *jme)
2604{
2605 u32 chipmode;
2606
2607 chipmode = jread32(jme, JME_CHIPMODE);
2608
2609 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2610 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2611}
2612
2613static int __devinit
2614jme_init_one(struct pci_dev *pdev,
2615 const struct pci_device_id *ent)
2616{
2617 int rc = 0, using_dac, i;
2618 struct net_device *netdev;
2619 struct jme_adapter *jme;
2620 u16 bmcr, bmsr;
2621 u32 apmc;
2622
2623 /*
2624 * set up PCI device basics
2625 */
2626 rc = pci_enable_device(pdev);
2627 if (rc) {
2628 jeprintk(pdev, "Cannot enable PCI device.\n");
2629 goto err_out;
2630 }
2631
2632 using_dac = jme_pci_dma64(pdev);
2633 if (using_dac < 0) {
2634 jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
2635 rc = -EIO;
2636 goto err_out_disable_pdev;
2637 }
2638
2639 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2640 jeprintk(pdev, "No PCI resource region found.\n");
2641 rc = -ENOMEM;
2642 goto err_out_disable_pdev;
2643 }
2644
2645 rc = pci_request_regions(pdev, DRV_NAME);
2646 if (rc) {
2647 jeprintk(pdev, "Cannot obtain PCI resource region.\n");
2648 goto err_out_disable_pdev;
2649 }
2650
2651 pci_set_master(pdev);
2652
2653 /*
2654 * alloc and init net device
2655 */
2656 netdev = alloc_etherdev(sizeof(*jme));
2657 if (!netdev) {
2658 jeprintk(pdev, "Cannot allocate netdev structure.\n");
2659 rc = -ENOMEM;
2660 goto err_out_release_regions;
2661 }
2662 netdev->open = jme_open;
2663 netdev->stop = jme_close;
2664 netdev->hard_start_xmit = jme_start_xmit;
2665 netdev->set_mac_address = jme_set_macaddr;
2666 netdev->set_multicast_list = jme_set_multi;
2667 netdev->change_mtu = jme_change_mtu;
2668 netdev->ethtool_ops = &jme_ethtool_ops;
2669 netdev->tx_timeout = jme_tx_timeout;
2670 netdev->watchdog_timeo = TX_TIMEOUT;
2671 netdev->vlan_rx_register = jme_vlan_rx_register;
2672 NETDEV_GET_STATS(netdev, &jme_get_stats);
2673 netdev->features = NETIF_F_HW_CSUM |
2674 NETIF_F_SG |
2675 NETIF_F_TSO |
2676 NETIF_F_TSO6 |
2677 NETIF_F_HW_VLAN_TX |
2678 NETIF_F_HW_VLAN_RX;
2679 if (using_dac)
2680 netdev->features |= NETIF_F_HIGHDMA;
2681
2682 SET_NETDEV_DEV(netdev, &pdev->dev);
2683 pci_set_drvdata(pdev, netdev);
2684
2685 /*
2686 * init adapter info
2687 */
2688 jme = netdev_priv(netdev);
2689 jme->pdev = pdev;
2690 jme->dev = netdev;
2691 jme->jme_rx = netif_rx;
2692 jme->jme_vlan_rx = vlan_hwaccel_rx;
2693 jme->old_mtu = netdev->mtu = 1500;
2694 jme->phylink = 0;
2695 jme->tx_ring_size = 1 << 10;
2696 jme->tx_ring_mask = jme->tx_ring_size - 1;
2697 jme->tx_wake_threshold = 1 << 9;
2698 jme->rx_ring_size = 1 << 9;
2699 jme->rx_ring_mask = jme->rx_ring_size - 1;
2700 jme->msg_enable = JME_DEF_MSG_ENABLE;
2701 jme->regs = ioremap(pci_resource_start(pdev, 0),
2702 pci_resource_len(pdev, 0));
2703 if (!(jme->regs)) {
2704 jeprintk(pdev, "Mapping PCI resource region error.\n");
2705 rc = -ENOMEM;
2706 goto err_out_free_netdev;
2707 }
2708 jme->shadow_regs = pci_alloc_consistent(pdev,
2709 sizeof(u32) * SHADOW_REG_NR,
2710 &(jme->shadow_dma));
2711 if (!(jme->shadow_regs)) {
2712 jeprintk(pdev, "Allocating shadow register mapping error.\n");
2713 rc = -ENOMEM;
2714 goto err_out_unmap;
2715 }
2716
2717 if (no_pseudohp) {
2718 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
2719 jwrite32(jme, JME_APMC, apmc);
2720 } else if (force_pseudohp) {
2721 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
2722 jwrite32(jme, JME_APMC, apmc);
2723 }
2724
2725 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2726
2727 spin_lock_init(&jme->phy_lock);
2728 spin_lock_init(&jme->macaddr_lock);
2729 spin_lock_init(&jme->rxmcs_lock);
2730
2731 atomic_set(&jme->link_changing, 1);
2732 atomic_set(&jme->rx_cleaning, 1);
2733 atomic_set(&jme->tx_cleaning, 1);
2734 atomic_set(&jme->rx_empty, 1);
2735
2736 tasklet_init(&jme->pcc_task,
2737 &jme_pcc_tasklet,
2738 (unsigned long) jme);
2739 tasklet_init(&jme->linkch_task,
2740 &jme_link_change_tasklet,
2741 (unsigned long) jme);
2742 tasklet_init(&jme->txclean_task,
2743 &jme_tx_clean_tasklet,
2744 (unsigned long) jme);
2745 tasklet_init(&jme->rxclean_task,
2746 &jme_rx_clean_tasklet,
2747 (unsigned long) jme);
2748 tasklet_init(&jme->rxempty_task,
2749 &jme_rx_empty_tasklet,
2750 (unsigned long) jme);
2751 tasklet_disable_nosync(&jme->txclean_task);
2752 tasklet_disable_nosync(&jme->rxclean_task);
2753 tasklet_disable_nosync(&jme->rxempty_task);
2754 jme->dpi.cur = PCC_P1;
2755
2756 jme->reg_ghc = 0;
2757 jme->reg_rxcs = RXCS_DEFAULT;
2758 jme->reg_rxmcs = RXMCS_DEFAULT;
2759 jme->reg_txpfc = 0;
2760 jme->reg_pmcs = PMCS_MFEN;
2761 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2762 set_bit(JME_FLAG_TSO, &jme->flags);
2763
2764 /*
2765 * Get Max Read Req Size from PCI Config Space
2766 */
2767 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
2768 jme->mrrs &= PCI_DCSR_MRRS_MASK;
2769 switch (jme->mrrs) {
2770 case MRRS_128B:
2771 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2772 break;
2773 case MRRS_256B:
2774 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2775 break;
2776 default:
2777 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2778 break;
2779 };
2780
2781 /*
2782 * Must check before reset_mac_processor
2783 */
2784 jme_check_hw_ver(jme);
2785 jme->mii_if.dev = netdev;
2786 if (jme->fpgaver) {
2787 jme->mii_if.phy_id = 0;
2788 for (i = 1 ; i < 32 ; ++i) {
2789 bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2790 bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2791 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2792 jme->mii_if.phy_id = i;
2793 break;
2794 }
2795 }
2796
2797 if (!jme->mii_if.phy_id) {
2798 rc = -EIO;
2799 jeprintk(pdev, "Can not find phy_id.\n");
2800 goto err_out_free_shadow;
2801 }
2802
2803 jme->reg_ghc |= GHC_LINK_POLL;
2804 } else {
2805 jme->mii_if.phy_id = 1;
2806 }
2807 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
2808 jme->mii_if.supports_gmii = true;
2809 else
2810 jme->mii_if.supports_gmii = false;
2811 jme->mii_if.mdio_read = jme_mdio_read;
2812 jme->mii_if.mdio_write = jme_mdio_write;
2813
2814 jme_clear_pm(jme);
2815 jme_set_phyfifoa(jme);
2816 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
2817 if (!jme->fpgaver)
2818 jme_phy_init(jme);
2819 jme_phy_off(jme);
2820
2821 /*
2822 * Reset MAC processor and reload EEPROM for MAC Address
2823 */
2824 jme_reset_mac_processor(jme);
2825 rc = jme_reload_eeprom(jme);
2826 if (rc) {
2827 jeprintk(pdev,
2828 "Reload eeprom for reading MAC Address error.\n");
2829 goto err_out_free_shadow;
2830 }
2831 jme_load_macaddr(netdev);
2832
2833 /*
2834 * Tell stack that we are not ready to work until open()
2835 */
2836 netif_carrier_off(netdev);
2837 netif_stop_queue(netdev);
2838
2839 /*
2840 * Register netdev
2841 */
2842 rc = register_netdev(netdev);
2843 if (rc) {
2844 jeprintk(pdev, "Cannot register net device.\n");
2845 goto err_out_free_shadow;
2846 }
2847
2848 msg_probe(jme,
2849 "JMC250 gigabit%s ver:%x rev:%x "
2850 "macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
2851 (jme->fpgaver != 0) ? " (FPGA)" : "",
2852 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2853 jme->rev,
2854 netdev->dev_addr[0],
2855 netdev->dev_addr[1],
2856 netdev->dev_addr[2],
2857 netdev->dev_addr[3],
2858 netdev->dev_addr[4],
2859 netdev->dev_addr[5]);
2860
2861 return 0;
2862
2863err_out_free_shadow:
2864 pci_free_consistent(pdev,
2865 sizeof(u32) * SHADOW_REG_NR,
2866 jme->shadow_regs,
2867 jme->shadow_dma);
2868err_out_unmap:
2869 iounmap(jme->regs);
2870err_out_free_netdev:
2871 pci_set_drvdata(pdev, NULL);
2872 free_netdev(netdev);
2873err_out_release_regions:
2874 pci_release_regions(pdev);
2875err_out_disable_pdev:
2876 pci_disable_device(pdev);
2877err_out:
2878 return rc;
2879}
2880
2881static void __devexit
2882jme_remove_one(struct pci_dev *pdev)
2883{
2884 struct net_device *netdev = pci_get_drvdata(pdev);
2885 struct jme_adapter *jme = netdev_priv(netdev);
2886
2887 unregister_netdev(netdev);
2888 pci_free_consistent(pdev,
2889 sizeof(u32) * SHADOW_REG_NR,
2890 jme->shadow_regs,
2891 jme->shadow_dma);
2892 iounmap(jme->regs);
2893 pci_set_drvdata(pdev, NULL);
2894 free_netdev(netdev);
2895 pci_release_regions(pdev);
2896 pci_disable_device(pdev);
2897
2898}
2899
2900static int
2901jme_suspend(struct pci_dev *pdev, pm_message_t state)
2902{
2903 struct net_device *netdev = pci_get_drvdata(pdev);
2904 struct jme_adapter *jme = netdev_priv(netdev);
2905
2906 atomic_dec(&jme->link_changing);
2907
2908 netif_device_detach(netdev);
2909 netif_stop_queue(netdev);
2910 jme_stop_irq(jme);
2911
2912 tasklet_disable(&jme->txclean_task);
2913 tasklet_disable(&jme->rxclean_task);
2914 tasklet_disable(&jme->rxempty_task);
2915
2916 jme_disable_shadow(jme);
2917
2918 if (netif_carrier_ok(netdev)) {
2919 if (test_bit(JME_FLAG_POLL, &jme->flags))
2920 jme_polling_mode(jme);
2921
2922 jme_stop_pcc_timer(jme);
2923 jme_reset_ghc_speed(jme);
2924 jme_disable_rx_engine(jme);
2925 jme_disable_tx_engine(jme);
2926 jme_reset_mac_processor(jme);
2927 jme_free_rx_resources(jme);
2928 jme_free_tx_resources(jme);
2929 netif_carrier_off(netdev);
2930 jme->phylink = 0;
2931 }
2932
2933 tasklet_enable(&jme->txclean_task);
2934 tasklet_hi_enable(&jme->rxclean_task);
2935 tasklet_hi_enable(&jme->rxempty_task);
2936
2937 pci_save_state(pdev);
2938 if (jme->reg_pmcs) {
2939 jme_set_100m_half(jme);
2940
2941 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2942 jme_wait_link(jme);
2943
2944 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2945
2946 pci_enable_wake(pdev, PCI_D3cold, true);
2947 } else {
2948 jme_phy_off(jme);
2949 }
2950 pci_set_power_state(pdev, PCI_D3cold);
2951
2952 return 0;
2953}
2954
2955static int
2956jme_resume(struct pci_dev *pdev)
2957{
2958 struct net_device *netdev = pci_get_drvdata(pdev);
2959 struct jme_adapter *jme = netdev_priv(netdev);
2960
2961 jme_clear_pm(jme);
2962 pci_restore_state(pdev);
2963
2964 if (test_bit(JME_FLAG_SSET, &jme->flags))
2965 jme_set_settings(netdev, &jme->old_ecmd);
2966 else
2967 jme_reset_phy_processor(jme);
2968
2969 jme_enable_shadow(jme);
2970 jme_start_irq(jme);
2971 netif_device_attach(netdev);
2972
2973 atomic_inc(&jme->link_changing);
2974
2975 jme_reset_link(jme);
2976
2977 return 0;
2978}
2979
2980static struct pci_device_id jme_pci_tbl[] = {
2981 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2982 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
2983 { }
2984};
2985
2986static struct pci_driver jme_driver = {
2987 .name = DRV_NAME,
2988 .id_table = jme_pci_tbl,
2989 .probe = jme_init_one,
2990 .remove = __devexit_p(jme_remove_one),
2991#ifdef CONFIG_PM
2992 .suspend = jme_suspend,
2993 .resume = jme_resume,
2994#endif /* CONFIG_PM */
2995};
2996
2997static int __init
2998jme_init_module(void)
2999{
3000 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
3001 "driver version %s\n", DRV_VERSION);
3002 return pci_register_driver(&jme_driver);
3003}
3004
3005static void __exit
3006jme_cleanup_module(void)
3007{
3008 pci_unregister_driver(&jme_driver);
3009}
3010
3011module_init(jme_init_module);
3012module_exit(jme_cleanup_module);
3013
3014MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3015MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3016MODULE_LICENSE("GPL");
3017MODULE_VERSION(DRV_VERSION);
3018MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3019
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
new file mode 100644
index 000000000000..b29688431a6d
--- /dev/null
+++ b/drivers/net/jme.h
@@ -0,0 +1,1199 @@
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#ifndef __JME_H_INCLUDED__
25#define __JME_H_INCLUDEE__
26
27#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.2"
29#define PFX DRV_NAME ": "
30
31#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
32#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260
33
34/*
35 * Message related definitions
36 */
37#define JME_DEF_MSG_ENABLE \
38 (NETIF_MSG_PROBE | \
39 NETIF_MSG_LINK | \
40 NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR | \
42 NETIF_MSG_HW)
43
44#define jeprintk(pdev, fmt, args...) \
45 printk(KERN_ERR PFX fmt, ## args)
46
47#ifdef TX_DEBUG
48#define tx_dbg(priv, fmt, args...) \
49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
50#else
51#define tx_dbg(priv, fmt, args...)
52#endif
53
54#define jme_msg(msglvl, type, priv, fmt, args...) \
55 if (netif_msg_##type(priv)) \
56 printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
57
58#define msg_probe(priv, fmt, args...) \
59 jme_msg(KERN_INFO, probe, priv, fmt, ## args)
60
61#define msg_link(priv, fmt, args...) \
62 jme_msg(KERN_INFO, link, priv, fmt, ## args)
63
64#define msg_intr(priv, fmt, args...) \
65 jme_msg(KERN_INFO, intr, priv, fmt, ## args)
66
67#define msg_rx_err(priv, fmt, args...) \
68 jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
69
70#define msg_rx_status(priv, fmt, args...) \
71 jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
72
73#define msg_tx_err(priv, fmt, args...) \
74 jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
75
76#define msg_tx_done(priv, fmt, args...) \
77 jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
78
79#define msg_tx_queued(priv, fmt, args...) \
80 jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
81
82#define msg_hw(priv, fmt, args...) \
83 jme_msg(KERN_ERR, hw, priv, fmt, ## args)
84
85/*
86 * Extra PCI Configuration space interface
87 */
88#define PCI_DCSR_MRRS 0x59
89#define PCI_DCSR_MRRS_MASK 0x70
90
91enum pci_dcsr_mrrs_vals {
92 MRRS_128B = 0x00,
93 MRRS_256B = 0x10,
94 MRRS_512B = 0x20,
95 MRRS_1024B = 0x30,
96 MRRS_2048B = 0x40,
97 MRRS_4096B = 0x50,
98};
99
100#define PCI_SPI 0xB0
101
102enum pci_spi_bits {
103 SPI_EN = 0x10,
104 SPI_MISO = 0x08,
105 SPI_MOSI = 0x04,
106 SPI_SCLK = 0x02,
107 SPI_CS = 0x01,
108};
109
110struct jme_spi_op {
111 void __user *uwbuf;
112 void __user *urbuf;
113 __u8 wn; /* Number of write actions */
114 __u8 rn; /* Number of read actions */
115 __u8 bitn; /* Number of bits per action */
116 __u8 spd; /* The maxim acceptable speed of controller, in MHz.*/
117 __u8 mode; /* CPOL, CPHA, and Duplex mode of SPI */
118
119 /* Internal use only */
120 u8 *kwbuf;
121 u8 *krbuf;
122 u8 sr;
123 u16 halfclk; /* Half of clock cycle calculated from spd, in ns */
124};
125
126enum jme_spi_op_bits {
127 SPI_MODE_CPHA = 0x01,
128 SPI_MODE_CPOL = 0x02,
129 SPI_MODE_DUP = 0x80,
130};
131
132#define HALF_US 500 /* 500 ns */
133#define JMESPIIOCTL SIOCDEVPRIVATE
134
135/*
136 * Dynamic(adaptive)/Static PCC values
137 */
138enum dynamic_pcc_values {
139 PCC_OFF = 0,
140 PCC_P1 = 1,
141 PCC_P2 = 2,
142 PCC_P3 = 3,
143
144 PCC_OFF_TO = 0,
145 PCC_P1_TO = 1,
146 PCC_P2_TO = 64,
147 PCC_P3_TO = 128,
148
149 PCC_OFF_CNT = 0,
150 PCC_P1_CNT = 1,
151 PCC_P2_CNT = 16,
152 PCC_P3_CNT = 32,
153};
154struct dynpcc_info {
155 unsigned long last_bytes;
156 unsigned long last_pkts;
157 unsigned long intr_cnt;
158 unsigned char cur;
159 unsigned char attempt;
160 unsigned char cnt;
161};
162#define PCC_INTERVAL_US 100000
163#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US))
164#define PCC_P3_THRESHOLD (2 * 1024 * 1024)
165#define PCC_P2_THRESHOLD 800
166#define PCC_INTR_THRESHOLD 800
167#define PCC_TX_TO 1000
168#define PCC_TX_CNT 8
169
170/*
171 * TX/RX Descriptors
172 *
173 * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024
174 */
175#define RING_DESC_ALIGN 16 /* Descriptor alignment */
176#define TX_DESC_SIZE 16
177#define TX_RING_NR 8
178#define TX_RING_ALLOC_SIZE(s) ((s * TX_DESC_SIZE) + RING_DESC_ALIGN)
179
180struct txdesc {
181 union {
182 __u8 all[16];
183 __le32 dw[4];
184 struct {
185 /* DW0 */
186 __le16 vlan;
187 __u8 rsv1;
188 __u8 flags;
189
190 /* DW1 */
191 __le16 datalen;
192 __le16 mss;
193
194 /* DW2 */
195 __le16 pktsize;
196 __le16 rsv2;
197
198 /* DW3 */
199 __le32 bufaddr;
200 } desc1;
201 struct {
202 /* DW0 */
203 __le16 rsv1;
204 __u8 rsv2;
205 __u8 flags;
206
207 /* DW1 */
208 __le16 datalen;
209 __le16 rsv3;
210
211 /* DW2 */
212 __le32 bufaddrh;
213
214 /* DW3 */
215 __le32 bufaddrl;
216 } desc2;
217 struct {
218 /* DW0 */
219 __u8 ehdrsz;
220 __u8 rsv1;
221 __u8 rsv2;
222 __u8 flags;
223
224 /* DW1 */
225 __le16 trycnt;
226 __le16 segcnt;
227
228 /* DW2 */
229 __le16 pktsz;
230 __le16 rsv3;
231
232 /* DW3 */
233 __le32 bufaddrl;
234 } descwb;
235 };
236};
237
238enum jme_txdesc_flags_bits {
239 TXFLAG_OWN = 0x80,
240 TXFLAG_INT = 0x40,
241 TXFLAG_64BIT = 0x20,
242 TXFLAG_TCPCS = 0x10,
243 TXFLAG_UDPCS = 0x08,
244 TXFLAG_IPCS = 0x04,
245 TXFLAG_LSEN = 0x02,
246 TXFLAG_TAGON = 0x01,
247};
248
249#define TXDESC_MSS_SHIFT 2
250enum jme_rxdescwb_flags_bits {
251 TXWBFLAG_OWN = 0x80,
252 TXWBFLAG_INT = 0x40,
253 TXWBFLAG_TMOUT = 0x20,
254 TXWBFLAG_TRYOUT = 0x10,
255 TXWBFLAG_COL = 0x08,
256
257 TXWBFLAG_ALLERR = TXWBFLAG_TMOUT |
258 TXWBFLAG_TRYOUT |
259 TXWBFLAG_COL,
260};
261
262#define RX_DESC_SIZE 16
263#define RX_RING_NR 4
264#define RX_RING_ALLOC_SIZE(s) ((s * RX_DESC_SIZE) + RING_DESC_ALIGN)
265#define RX_BUF_DMA_ALIGN 8
266#define RX_PREPAD_SIZE 10
267#define ETH_CRC_LEN 2
268#define RX_VLANHDR_LEN 2
269#define RX_EXTRA_LEN (RX_PREPAD_SIZE + \
270 ETH_HLEN + \
271 ETH_CRC_LEN + \
272 RX_VLANHDR_LEN + \
273 RX_BUF_DMA_ALIGN)
274
275struct rxdesc {
276 union {
277 __u8 all[16];
278 __le32 dw[4];
279 struct {
280 /* DW0 */
281 __le16 rsv2;
282 __u8 rsv1;
283 __u8 flags;
284
285 /* DW1 */
286 __le16 datalen;
287 __le16 wbcpl;
288
289 /* DW2 */
290 __le32 bufaddrh;
291
292 /* DW3 */
293 __le32 bufaddrl;
294 } desc1;
295 struct {
296 /* DW0 */
297 __le16 vlan;
298 __le16 flags;
299
300 /* DW1 */
301 __le16 framesize;
302 __u8 errstat;
303 __u8 desccnt;
304
305 /* DW2 */
306 __le32 rsshash;
307
308 /* DW3 */
309 __u8 hashfun;
310 __u8 hashtype;
311 __le16 resrv;
312 } descwb;
313 };
314};
315
316enum jme_rxdesc_flags_bits {
317 RXFLAG_OWN = 0x80,
318 RXFLAG_INT = 0x40,
319 RXFLAG_64BIT = 0x20,
320};
321
322enum jme_rxwbdesc_flags_bits {
323 RXWBFLAG_OWN = 0x8000,
324 RXWBFLAG_INT = 0x4000,
325 RXWBFLAG_MF = 0x2000,
326 RXWBFLAG_64BIT = 0x2000,
327 RXWBFLAG_TCPON = 0x1000,
328 RXWBFLAG_UDPON = 0x0800,
329 RXWBFLAG_IPCS = 0x0400,
330 RXWBFLAG_TCPCS = 0x0200,
331 RXWBFLAG_UDPCS = 0x0100,
332 RXWBFLAG_TAGON = 0x0080,
333 RXWBFLAG_IPV4 = 0x0040,
334 RXWBFLAG_IPV6 = 0x0020,
335 RXWBFLAG_PAUSE = 0x0010,
336 RXWBFLAG_MAGIC = 0x0008,
337 RXWBFLAG_WAKEUP = 0x0004,
338 RXWBFLAG_DEST = 0x0003,
339 RXWBFLAG_DEST_UNI = 0x0001,
340 RXWBFLAG_DEST_MUL = 0x0002,
341 RXWBFLAG_DEST_BRO = 0x0003,
342};
343
344enum jme_rxwbdesc_desccnt_mask {
345 RXWBDCNT_WBCPL = 0x80,
346 RXWBDCNT_DCNT = 0x7F,
347};
348
349enum jme_rxwbdesc_errstat_bits {
350 RXWBERR_LIMIT = 0x80,
351 RXWBERR_MIIER = 0x40,
352 RXWBERR_NIBON = 0x20,
353 RXWBERR_COLON = 0x10,
354 RXWBERR_ABORT = 0x08,
355 RXWBERR_SHORT = 0x04,
356 RXWBERR_OVERUN = 0x02,
357 RXWBERR_CRCERR = 0x01,
358 RXWBERR_ALLERR = 0xFF,
359};
360
361/*
362 * Buffer information corresponding to ring descriptors.
363 */
364struct jme_buffer_info {
365 struct sk_buff *skb;
366 dma_addr_t mapping;
367 int len;
368 int nr_desc;
369 unsigned long start_xmit;
370};
371
372/*
373 * The structure holding buffer information and ring descriptors all together.
374 */
375#define MAX_RING_DESC_NR 1024
376struct jme_ring {
377 void *alloc; /* pointer to allocated memory */
378 void *desc; /* pointer to ring memory */
379 dma_addr_t dmaalloc; /* phys address of ring alloc */
380 dma_addr_t dma; /* phys address for ring dma */
381
382 /* Buffer information corresponding to each descriptor */
383 struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
384
385 int next_to_use;
386 atomic_t next_to_clean;
387 atomic_t nr_free;
388};
389
390#define NET_STAT(priv) (priv->dev->stats)
391#define NETDEV_GET_STATS(netdev, fun_ptr)
392#define DECLARE_NET_DEVICE_STATS
393
394#define DECLARE_NAPI_STRUCT struct napi_struct napi;
395#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
396 netif_napi_add(dev, napis, pollfn, q);
397#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
398#define JME_NAPI_WEIGHT(w) int w
399#define JME_NAPI_WEIGHT_VAL(w) w
400#define JME_NAPI_WEIGHT_SET(w, r)
401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
403#define JME_NAPI_DISABLE(priv) \
404 if (!napi_disable_pending(&priv->napi)) \
405 napi_disable(&priv->napi);
406#define JME_RX_SCHEDULE_PREP(priv) \
407 netif_rx_schedule_prep(priv->dev, &priv->napi)
408#define JME_RX_SCHEDULE(priv) \
409 __netif_rx_schedule(priv->dev, &priv->napi);
410
411/*
412 * Jmac Adapter Private data
413 */
414#define SHADOW_REG_NR 8
415struct jme_adapter {
416 struct pci_dev *pdev;
417 struct net_device *dev;
418 void __iomem *regs;
419 dma_addr_t shadow_dma;
420 u32 *shadow_regs;
421 struct mii_if_info mii_if;
422 struct jme_ring rxring[RX_RING_NR];
423 struct jme_ring txring[TX_RING_NR];
424 spinlock_t phy_lock;
425 spinlock_t macaddr_lock;
426 spinlock_t rxmcs_lock;
427 struct tasklet_struct rxempty_task;
428 struct tasklet_struct rxclean_task;
429 struct tasklet_struct txclean_task;
430 struct tasklet_struct linkch_task;
431 struct tasklet_struct pcc_task;
432 unsigned long flags;
433 u32 reg_txcs;
434 u32 reg_txpfc;
435 u32 reg_rxcs;
436 u32 reg_rxmcs;
437 u32 reg_ghc;
438 u32 reg_pmcs;
439 u32 phylink;
440 u32 tx_ring_size;
441 u32 tx_ring_mask;
442 u32 tx_wake_threshold;
443 u32 rx_ring_size;
444 u32 rx_ring_mask;
445 u8 mrrs;
446 unsigned int fpgaver;
447 unsigned int chiprev;
448 u8 rev;
449 u32 msg_enable;
450 struct ethtool_cmd old_ecmd;
451 unsigned int old_mtu;
452 struct vlan_group *vlgrp;
453 struct dynpcc_info dpi;
454 atomic_t intr_sem;
455 atomic_t link_changing;
456 atomic_t tx_cleaning;
457 atomic_t rx_cleaning;
458 atomic_t rx_empty;
459 int (*jme_rx)(struct sk_buff *skb);
460 int (*jme_vlan_rx)(struct sk_buff *skb,
461 struct vlan_group *grp,
462 unsigned short vlan_tag);
463 DECLARE_NAPI_STRUCT
464 DECLARE_NET_DEVICE_STATS
465};
466
467enum shadow_reg_val {
468 SHADOW_IEVE = 0,
469};
470
471enum jme_flags_bits {
472 JME_FLAG_MSI = 1,
473 JME_FLAG_SSET = 2,
474 JME_FLAG_TXCSUM = 3,
475 JME_FLAG_TSO = 4,
476 JME_FLAG_POLL = 5,
477 JME_FLAG_SHUTDOWN = 6,
478};
479
480#define TX_TIMEOUT (5 * HZ)
481#define JME_REG_LEN 0x500
482#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
483
484static inline struct jme_adapter*
485jme_napi_priv(struct napi_struct *napi)
486{
487 struct jme_adapter *jme;
488 jme = container_of(napi, struct jme_adapter, napi);
489 return jme;
490}
491
492/*
493 * MMaped I/O Resters
494 */
495enum jme_iomap_offsets {
496 JME_MAC = 0x0000,
497 JME_PHY = 0x0400,
498 JME_MISC = 0x0800,
499 JME_RSS = 0x0C00,
500};
501
502enum jme_iomap_lens {
503 JME_MAC_LEN = 0x80,
504 JME_PHY_LEN = 0x58,
505 JME_MISC_LEN = 0x98,
506 JME_RSS_LEN = 0xFF,
507};
508
509enum jme_iomap_regs {
510 JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */
511 JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
512 JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */
513 JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */
514 JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */
515 JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */
516 JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */
517 JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */
518
519 JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */
520 JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */
521 JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */
522 JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */
523 JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */
524 JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */
525 JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */
526 JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */
527 JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */
528 JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */
529 JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */
530 JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */
531
532 JME_SMI = JME_MAC | 0x50, /* Station Management Interface */
533 JME_GHC = JME_MAC | 0x54, /* Global Host Control */
534 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
535
536
537 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
538 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
539 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
540 JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */
541
542
543 JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */
544 JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */
545 JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */
546 JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */
547 JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */
548 JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */
549 JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */
550 JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */
551 JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */
552 JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */
553 JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */
554 JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */
555 JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */
556 JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */
557 JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */
558 JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */
559};
560
561/*
562 * TX Control/Status Bits
563 */
564enum jme_txcs_bits {
565 TXCS_QUEUE7S = 0x00008000,
566 TXCS_QUEUE6S = 0x00004000,
567 TXCS_QUEUE5S = 0x00002000,
568 TXCS_QUEUE4S = 0x00001000,
569 TXCS_QUEUE3S = 0x00000800,
570 TXCS_QUEUE2S = 0x00000400,
571 TXCS_QUEUE1S = 0x00000200,
572 TXCS_QUEUE0S = 0x00000100,
573 TXCS_FIFOTH = 0x000000C0,
574 TXCS_DMASIZE = 0x00000030,
575 TXCS_BURST = 0x00000004,
576 TXCS_ENABLE = 0x00000001,
577};
578
579enum jme_txcs_value {
580 TXCS_FIFOTH_16QW = 0x000000C0,
581 TXCS_FIFOTH_12QW = 0x00000080,
582 TXCS_FIFOTH_8QW = 0x00000040,
583 TXCS_FIFOTH_4QW = 0x00000000,
584
585 TXCS_DMASIZE_64B = 0x00000000,
586 TXCS_DMASIZE_128B = 0x00000010,
587 TXCS_DMASIZE_256B = 0x00000020,
588 TXCS_DMASIZE_512B = 0x00000030,
589
590 TXCS_SELECT_QUEUE0 = 0x00000000,
591 TXCS_SELECT_QUEUE1 = 0x00010000,
592 TXCS_SELECT_QUEUE2 = 0x00020000,
593 TXCS_SELECT_QUEUE3 = 0x00030000,
594 TXCS_SELECT_QUEUE4 = 0x00040000,
595 TXCS_SELECT_QUEUE5 = 0x00050000,
596 TXCS_SELECT_QUEUE6 = 0x00060000,
597 TXCS_SELECT_QUEUE7 = 0x00070000,
598
599 TXCS_DEFAULT = TXCS_FIFOTH_4QW |
600 TXCS_BURST,
601};
602
603#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */
604
605/*
606 * TX MAC Control/Status Bits
607 */
608enum jme_txmcs_bit_masks {
609 TXMCS_IFG2 = 0xC0000000,
610 TXMCS_IFG1 = 0x30000000,
611 TXMCS_TTHOLD = 0x00000300,
612 TXMCS_FBURST = 0x00000080,
613 TXMCS_CARRIEREXT = 0x00000040,
614 TXMCS_DEFER = 0x00000020,
615 TXMCS_BACKOFF = 0x00000010,
616 TXMCS_CARRIERSENSE = 0x00000008,
617 TXMCS_COLLISION = 0x00000004,
618 TXMCS_CRC = 0x00000002,
619 TXMCS_PADDING = 0x00000001,
620};
621
622enum jme_txmcs_values {
623 TXMCS_IFG2_6_4 = 0x00000000,
624 TXMCS_IFG2_8_5 = 0x40000000,
625 TXMCS_IFG2_10_6 = 0x80000000,
626 TXMCS_IFG2_12_7 = 0xC0000000,
627
628 TXMCS_IFG1_8_4 = 0x00000000,
629 TXMCS_IFG1_12_6 = 0x10000000,
630 TXMCS_IFG1_16_8 = 0x20000000,
631 TXMCS_IFG1_20_10 = 0x30000000,
632
633 TXMCS_TTHOLD_1_8 = 0x00000000,
634 TXMCS_TTHOLD_1_4 = 0x00000100,
635 TXMCS_TTHOLD_1_2 = 0x00000200,
636 TXMCS_TTHOLD_FULL = 0x00000300,
637
638 TXMCS_DEFAULT = TXMCS_IFG2_8_5 |
639 TXMCS_IFG1_16_8 |
640 TXMCS_TTHOLD_FULL |
641 TXMCS_DEFER |
642 TXMCS_CRC |
643 TXMCS_PADDING,
644};
645
646enum jme_txpfc_bits_masks {
647 TXPFC_VLAN_TAG = 0xFFFF0000,
648 TXPFC_VLAN_EN = 0x00008000,
649 TXPFC_PF_EN = 0x00000001,
650};
651
652enum jme_txtrhd_bits_masks {
653 TXTRHD_TXPEN = 0x80000000,
654 TXTRHD_TXP = 0x7FFFFF00,
655 TXTRHD_TXREN = 0x00000080,
656 TXTRHD_TXRL = 0x0000007F,
657};
658
659enum jme_txtrhd_shifts {
660 TXTRHD_TXP_SHIFT = 8,
661 TXTRHD_TXRL_SHIFT = 0,
662};
663
664/*
665 * RX Control/Status Bits
666 */
667enum jme_rxcs_bit_masks {
668 /* FIFO full threshold for transmitting Tx Pause Packet */
669 RXCS_FIFOTHTP = 0x30000000,
670 /* FIFO threshold for processing next packet */
671 RXCS_FIFOTHNP = 0x0C000000,
672 RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */
673 RXCS_QUEUESEL = 0x00030000, /* Queue selection */
674 RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */
675 RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */
676 RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */
677 RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */
678 RXCS_SHORT = 0x00000010, /* Enable receive short packet */
679 RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */
680 RXCS_QST = 0x00000004, /* Receive queue start */
681 RXCS_SUSPEND = 0x00000002,
682 RXCS_ENABLE = 0x00000001,
683};
684
685enum jme_rxcs_values {
686 RXCS_FIFOTHTP_16T = 0x00000000,
687 RXCS_FIFOTHTP_32T = 0x10000000,
688 RXCS_FIFOTHTP_64T = 0x20000000,
689 RXCS_FIFOTHTP_128T = 0x30000000,
690
691 RXCS_FIFOTHNP_16QW = 0x00000000,
692 RXCS_FIFOTHNP_32QW = 0x04000000,
693 RXCS_FIFOTHNP_64QW = 0x08000000,
694 RXCS_FIFOTHNP_128QW = 0x0C000000,
695
696 RXCS_DMAREQSZ_16B = 0x00000000,
697 RXCS_DMAREQSZ_32B = 0x01000000,
698 RXCS_DMAREQSZ_64B = 0x02000000,
699 RXCS_DMAREQSZ_128B = 0x03000000,
700
701 RXCS_QUEUESEL_Q0 = 0x00000000,
702 RXCS_QUEUESEL_Q1 = 0x00010000,
703 RXCS_QUEUESEL_Q2 = 0x00020000,
704 RXCS_QUEUESEL_Q3 = 0x00030000,
705
706 RXCS_RETRYGAP_256ns = 0x00000000,
707 RXCS_RETRYGAP_512ns = 0x00001000,
708 RXCS_RETRYGAP_1024ns = 0x00002000,
709 RXCS_RETRYGAP_2048ns = 0x00003000,
710 RXCS_RETRYGAP_4096ns = 0x00004000,
711 RXCS_RETRYGAP_8192ns = 0x00005000,
712 RXCS_RETRYGAP_16384ns = 0x00006000,
713 RXCS_RETRYGAP_32768ns = 0x00007000,
714
715 RXCS_RETRYCNT_0 = 0x00000000,
716 RXCS_RETRYCNT_4 = 0x00000100,
717 RXCS_RETRYCNT_8 = 0x00000200,
718 RXCS_RETRYCNT_12 = 0x00000300,
719 RXCS_RETRYCNT_16 = 0x00000400,
720 RXCS_RETRYCNT_20 = 0x00000500,
721 RXCS_RETRYCNT_24 = 0x00000600,
722 RXCS_RETRYCNT_28 = 0x00000700,
723 RXCS_RETRYCNT_32 = 0x00000800,
724 RXCS_RETRYCNT_36 = 0x00000900,
725 RXCS_RETRYCNT_40 = 0x00000A00,
726 RXCS_RETRYCNT_44 = 0x00000B00,
727 RXCS_RETRYCNT_48 = 0x00000C00,
728 RXCS_RETRYCNT_52 = 0x00000D00,
729 RXCS_RETRYCNT_56 = 0x00000E00,
730 RXCS_RETRYCNT_60 = 0x00000F00,
731
732 RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
733 RXCS_FIFOTHNP_128QW |
734 RXCS_DMAREQSZ_128B |
735 RXCS_RETRYGAP_256ns |
736 RXCS_RETRYCNT_32,
737};
738
739#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */
740
741/*
742 * RX MAC Control/Status Bits
743 */
744enum jme_rxmcs_bits {
745 RXMCS_ALLFRAME = 0x00000800,
746 RXMCS_BRDFRAME = 0x00000400,
747 RXMCS_MULFRAME = 0x00000200,
748 RXMCS_UNIFRAME = 0x00000100,
749 RXMCS_ALLMULFRAME = 0x00000080,
750 RXMCS_MULFILTERED = 0x00000040,
751 RXMCS_RXCOLLDEC = 0x00000020,
752 RXMCS_FLOWCTRL = 0x00000008,
753 RXMCS_VTAGRM = 0x00000004,
754 RXMCS_PREPAD = 0x00000002,
755 RXMCS_CHECKSUM = 0x00000001,
756
757 RXMCS_DEFAULT = RXMCS_VTAGRM |
758 RXMCS_PREPAD |
759 RXMCS_FLOWCTRL |
760 RXMCS_CHECKSUM,
761};
762
763/*
764 * Wakeup Frame setup interface registers
765 */
766#define WAKEUP_FRAME_NR 8
767#define WAKEUP_FRAME_MASK_DWNR 4
768
769enum jme_wfoi_bit_masks {
770 WFOI_MASK_SEL = 0x00000070,
771 WFOI_CRC_SEL = 0x00000008,
772 WFOI_FRAME_SEL = 0x00000007,
773};
774
775enum jme_wfoi_shifts {
776 WFOI_MASK_SHIFT = 4,
777};
778
779/*
780 * SMI Related definitions
781 */
782enum jme_smi_bit_mask {
783 SMI_DATA_MASK = 0xFFFF0000,
784 SMI_REG_ADDR_MASK = 0x0000F800,
785 SMI_PHY_ADDR_MASK = 0x000007C0,
786 SMI_OP_WRITE = 0x00000020,
787 /* Set to 1, after req done it'll be cleared to 0 */
788 SMI_OP_REQ = 0x00000010,
789 SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */
790 SMI_OP_MDOE = 0x00000004, /* Software Output Enable */
791 SMI_OP_MDC = 0x00000002, /* Software CLK Control */
792 SMI_OP_MDEN = 0x00000001, /* Software access Enable */
793};
794
795enum jme_smi_bit_shift {
796 SMI_DATA_SHIFT = 16,
797 SMI_REG_ADDR_SHIFT = 11,
798 SMI_PHY_ADDR_SHIFT = 6,
799};
800
801static inline u32 smi_reg_addr(int x)
802{
803 return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK;
804}
805
806static inline u32 smi_phy_addr(int x)
807{
808 return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK;
809}
810
811#define JME_PHY_TIMEOUT 100 /* 100 msec */
812#define JME_PHY_REG_NR 32
813
814/*
815 * Global Host Control
816 */
817enum jme_ghc_bit_mask {
818 GHC_SWRST = 0x40000000,
819 GHC_DPX = 0x00000040,
820 GHC_SPEED = 0x00000030,
821 GHC_LINK_POLL = 0x00000001,
822};
823
824enum jme_ghc_speed_val {
825 GHC_SPEED_10M = 0x00000010,
826 GHC_SPEED_100M = 0x00000020,
827 GHC_SPEED_1000M = 0x00000030,
828};
829
830/*
831 * Power management control and status register
832 */
833enum jme_pmcs_bit_masks {
834 PMCS_WF7DET = 0x80000000,
835 PMCS_WF6DET = 0x40000000,
836 PMCS_WF5DET = 0x20000000,
837 PMCS_WF4DET = 0x10000000,
838 PMCS_WF3DET = 0x08000000,
839 PMCS_WF2DET = 0x04000000,
840 PMCS_WF1DET = 0x02000000,
841 PMCS_WF0DET = 0x01000000,
842 PMCS_LFDET = 0x00040000,
843 PMCS_LRDET = 0x00020000,
844 PMCS_MFDET = 0x00010000,
845 PMCS_WF7EN = 0x00008000,
846 PMCS_WF6EN = 0x00004000,
847 PMCS_WF5EN = 0x00002000,
848 PMCS_WF4EN = 0x00001000,
849 PMCS_WF3EN = 0x00000800,
850 PMCS_WF2EN = 0x00000400,
851 PMCS_WF1EN = 0x00000200,
852 PMCS_WF0EN = 0x00000100,
853 PMCS_LFEN = 0x00000004,
854 PMCS_LREN = 0x00000002,
855 PMCS_MFEN = 0x00000001,
856};
857
858/*
859 * Giga PHY Status Registers
860 */
861enum jme_phy_link_bit_mask {
862 PHY_LINK_SPEED_MASK = 0x0000C000,
863 PHY_LINK_DUPLEX = 0x00002000,
864 PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800,
865 PHY_LINK_UP = 0x00000400,
866 PHY_LINK_AUTONEG_COMPLETE = 0x00000200,
867 PHY_LINK_MDI_STAT = 0x00000040,
868};
869
870enum jme_phy_link_speed_val {
871 PHY_LINK_SPEED_10M = 0x00000000,
872 PHY_LINK_SPEED_100M = 0x00004000,
873 PHY_LINK_SPEED_1000M = 0x00008000,
874};
875
876#define JME_SPDRSV_TIMEOUT 500 /* 500 us */
877
878/*
879 * SMB Control and Status
880 */
881enum jme_smbcsr_bit_mask {
882 SMBCSR_CNACK = 0x00020000,
883 SMBCSR_RELOAD = 0x00010000,
884 SMBCSR_EEPROMD = 0x00000020,
885 SMBCSR_INITDONE = 0x00000010,
886 SMBCSR_BUSY = 0x0000000F,
887};
888
889enum jme_smbintf_bit_mask {
890 SMBINTF_HWDATR = 0xFF000000,
891 SMBINTF_HWDATW = 0x00FF0000,
892 SMBINTF_HWADDR = 0x0000FF00,
893 SMBINTF_HWRWN = 0x00000020,
894 SMBINTF_HWCMD = 0x00000010,
895 SMBINTF_FASTM = 0x00000008,
896 SMBINTF_GPIOSCL = 0x00000004,
897 SMBINTF_GPIOSDA = 0x00000002,
898 SMBINTF_GPIOEN = 0x00000001,
899};
900
901enum jme_smbintf_vals {
902 SMBINTF_HWRWN_READ = 0x00000020,
903 SMBINTF_HWRWN_WRITE = 0x00000000,
904};
905
906enum jme_smbintf_shifts {
907 SMBINTF_HWDATR_SHIFT = 24,
908 SMBINTF_HWDATW_SHIFT = 16,
909 SMBINTF_HWADDR_SHIFT = 8,
910};
911
912#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */
913#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */
914#define JME_SMB_LEN 256
915#define JME_EEPROM_MAGIC 0x250
916
917/*
918 * Timer Control/Status Register
919 */
920enum jme_tmcsr_bit_masks {
921 TMCSR_SWIT = 0x80000000,
922 TMCSR_EN = 0x01000000,
923 TMCSR_CNT = 0x00FFFFFF,
924};
925
926/*
927 * General Purpose REG-0
928 */
929enum jme_gpreg0_masks {
930 GPREG0_DISSH = 0xFF000000,
931 GPREG0_PCIRLMT = 0x00300000,
932 GPREG0_PCCNOMUTCLR = 0x00040000,
933 GPREG0_LNKINTPOLL = 0x00001000,
934 GPREG0_PCCTMR = 0x00000300,
935 GPREG0_PHYADDR = 0x0000001F,
936};
937
938enum jme_gpreg0_vals {
939 GPREG0_DISSH_DW7 = 0x80000000,
940 GPREG0_DISSH_DW6 = 0x40000000,
941 GPREG0_DISSH_DW5 = 0x20000000,
942 GPREG0_DISSH_DW4 = 0x10000000,
943 GPREG0_DISSH_DW3 = 0x08000000,
944 GPREG0_DISSH_DW2 = 0x04000000,
945 GPREG0_DISSH_DW1 = 0x02000000,
946 GPREG0_DISSH_DW0 = 0x01000000,
947 GPREG0_DISSH_ALL = 0xFF000000,
948
949 GPREG0_PCIRLMT_8 = 0x00000000,
950 GPREG0_PCIRLMT_6 = 0x00100000,
951 GPREG0_PCIRLMT_5 = 0x00200000,
952 GPREG0_PCIRLMT_4 = 0x00300000,
953
954 GPREG0_PCCTMR_16ns = 0x00000000,
955 GPREG0_PCCTMR_256ns = 0x00000100,
956 GPREG0_PCCTMR_1us = 0x00000200,
957 GPREG0_PCCTMR_1ms = 0x00000300,
958
959 GPREG0_PHYADDR_1 = 0x00000001,
960
961 GPREG0_DEFAULT = GPREG0_PCIRLMT_4 |
962 GPREG0_PCCTMR_1us |
963 GPREG0_PHYADDR_1,
964};
965
966/*
967 * Interrupt Status Bits
968 */
969enum jme_interrupt_bits {
970 INTR_SWINTR = 0x80000000,
971 INTR_TMINTR = 0x40000000,
972 INTR_LINKCH = 0x20000000,
973 INTR_PAUSERCV = 0x10000000,
974 INTR_MAGICRCV = 0x08000000,
975 INTR_WAKERCV = 0x04000000,
976 INTR_PCCRX0TO = 0x02000000,
977 INTR_PCCRX1TO = 0x01000000,
978 INTR_PCCRX2TO = 0x00800000,
979 INTR_PCCRX3TO = 0x00400000,
980 INTR_PCCTXTO = 0x00200000,
981 INTR_PCCRX0 = 0x00100000,
982 INTR_PCCRX1 = 0x00080000,
983 INTR_PCCRX2 = 0x00040000,
984 INTR_PCCRX3 = 0x00020000,
985 INTR_PCCTX = 0x00010000,
986 INTR_RX3EMP = 0x00008000,
987 INTR_RX2EMP = 0x00004000,
988 INTR_RX1EMP = 0x00002000,
989 INTR_RX0EMP = 0x00001000,
990 INTR_RX3 = 0x00000800,
991 INTR_RX2 = 0x00000400,
992 INTR_RX1 = 0x00000200,
993 INTR_RX0 = 0x00000100,
994 INTR_TX7 = 0x00000080,
995 INTR_TX6 = 0x00000040,
996 INTR_TX5 = 0x00000020,
997 INTR_TX4 = 0x00000010,
998 INTR_TX3 = 0x00000008,
999 INTR_TX2 = 0x00000004,
1000 INTR_TX1 = 0x00000002,
1001 INTR_TX0 = 0x00000001,
1002};
1003
1004static const u32 INTR_ENABLE = INTR_SWINTR |
1005 INTR_TMINTR |
1006 INTR_LINKCH |
1007 INTR_PCCRX0TO |
1008 INTR_PCCRX0 |
1009 INTR_PCCTXTO |
1010 INTR_PCCTX |
1011 INTR_RX0EMP;
1012
1013/*
1014 * PCC Control Registers
1015 */
1016enum jme_pccrx_masks {
1017 PCCRXTO_MASK = 0xFFFF0000,
1018 PCCRX_MASK = 0x0000FF00,
1019};
1020
1021enum jme_pcctx_masks {
1022 PCCTXTO_MASK = 0xFFFF0000,
1023 PCCTX_MASK = 0x0000FF00,
1024 PCCTX_QS_MASK = 0x000000FF,
1025};
1026
1027enum jme_pccrx_shifts {
1028 PCCRXTO_SHIFT = 16,
1029 PCCRX_SHIFT = 8,
1030};
1031
1032enum jme_pcctx_shifts {
1033 PCCTXTO_SHIFT = 16,
1034 PCCTX_SHIFT = 8,
1035};
1036
1037enum jme_pcctx_bits {
1038 PCCTXQ0_EN = 0x00000001,
1039 PCCTXQ1_EN = 0x00000002,
1040 PCCTXQ2_EN = 0x00000004,
1041 PCCTXQ3_EN = 0x00000008,
1042 PCCTXQ4_EN = 0x00000010,
1043 PCCTXQ5_EN = 0x00000020,
1044 PCCTXQ6_EN = 0x00000040,
1045 PCCTXQ7_EN = 0x00000080,
1046};
1047
1048/*
1049 * Chip Mode Register
1050 */
1051enum jme_chipmode_bit_masks {
1052 CM_FPGAVER_MASK = 0xFFFF0000,
1053 CM_CHIPREV_MASK = 0x0000FF00,
1054 CM_CHIPMODE_MASK = 0x0000000F,
1055};
1056
1057enum jme_chipmode_shifts {
1058 CM_FPGAVER_SHIFT = 16,
1059 CM_CHIPREV_SHIFT = 8,
1060};
1061
1062/*
1063 * Shadow base address register bits
1064 */
1065enum jme_shadow_base_address_bits {
1066 SHBA_POSTEN = 0x1,
1067};
1068
1069/*
1070 * Aggressive Power Mode Control
1071 */
1072enum jme_apmc_bits {
1073 JME_APMC_PCIE_SD_EN = 0x40000000,
1074 JME_APMC_PSEUDO_HP_EN = 0x20000000,
1075 JME_APMC_EPIEN = 0x04000000,
1076 JME_APMC_EPIEN_CTRL = 0x03000000,
1077};
1078
1079enum jme_apmc_values {
1080 JME_APMC_EPIEN_CTRL_EN = 0x02000000,
1081 JME_APMC_EPIEN_CTRL_DIS = 0x01000000,
1082};
1083
1084#define APMC_PHP_SHUTDOWN_DELAY (10 * 1000 * 1000)
1085
1086#ifdef REG_DEBUG
1087static char *MAC_REG_NAME[] = {
1088 "JME_TXCS", "JME_TXDBA_LO", "JME_TXDBA_HI", "JME_TXQDC",
1089 "JME_TXNDA", "JME_TXMCS", "JME_TXPFC", "JME_TXTRHD",
1090 "JME_RXCS", "JME_RXDBA_LO", "JME_RXDBA_HI", "JME_RXQDC",
1091 "JME_RXNDA", "JME_RXMCS", "JME_RXUMA_LO", "JME_RXUMA_HI",
1092 "JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP", "JME_WFOI",
1093 "JME_SMI", "JME_GHC", "UNKNOWN", "UNKNOWN",
1094 "JME_PMCS"};
1095
1096static char *PE_REG_NAME[] = {
1097 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1098 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1099 "UNKNOWN", "UNKNOWN", "JME_PHY_CS", "UNKNOWN",
1100 "JME_PHY_LINK", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1101 "JME_SMBCSR", "JME_SMBINTF"};
1102
1103static char *MISC_REG_NAME[] = {
1104 "JME_TMCSR", "JME_GPIO", "JME_GPREG0", "JME_GPREG1",
1105 "JME_IEVE", "JME_IREQ", "JME_IENS", "JME_IENC",
1106 "JME_PCCRX0", "JME_PCCRX1", "JME_PCCRX2", "JME_PCCRX3",
1107 "JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO",
1108 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1109 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1110 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1111 "JME_TIMER1", "JME_TIMER2", "UNKNOWN", "JME_APMC",
1112 "JME_PCCSRX0"};
1113
1114static inline void reg_dbg(const struct jme_adapter *jme,
1115 const char *msg, u32 val, u32 reg)
1116{
1117 const char *regname;
1118 switch (reg & 0xF00) {
1119 case 0x000:
1120 regname = MAC_REG_NAME[(reg & 0xFF) >> 2];
1121 break;
1122 case 0x400:
1123 regname = PE_REG_NAME[(reg & 0xFF) >> 2];
1124 break;
1125 case 0x800:
1126 regname = MISC_REG_NAME[(reg & 0xFF) >> 2];
1127 break;
1128 default:
1129 regname = PE_REG_NAME[0];
1130 }
1131 printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name,
1132 msg, val, regname);
1133}
1134#else
1135static inline void reg_dbg(const struct jme_adapter *jme,
1136 const char *msg, u32 val, u32 reg) {}
1137#endif
1138
1139/*
1140 * Read/Write MMaped I/O Registers
1141 */
1142static inline u32 jread32(struct jme_adapter *jme, u32 reg)
1143{
1144 return readl(jme->regs + reg);
1145}
1146
1147static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val)
1148{
1149 reg_dbg(jme, "REG WRITE", val, reg);
1150 writel(val, jme->regs + reg);
1151 reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
1152}
1153
1154static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val)
1155{
1156 /*
1157 * Read after write should cause flush
1158 */
1159 reg_dbg(jme, "REG WRITE FLUSH", val, reg);
1160 writel(val, jme->regs + reg);
1161 readl(jme->regs + reg);
1162 reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
1163}
1164
1165/*
1166 * PHY Regs
1167 */
1168enum jme_phy_reg17_bit_masks {
1169 PREG17_SPEED = 0xC000,
1170 PREG17_DUPLEX = 0x2000,
1171 PREG17_SPDRSV = 0x0800,
1172 PREG17_LNKUP = 0x0400,
1173 PREG17_MDI = 0x0040,
1174};
1175
1176enum jme_phy_reg17_vals {
1177 PREG17_SPEED_10M = 0x0000,
1178 PREG17_SPEED_100M = 0x4000,
1179 PREG17_SPEED_1000M = 0x8000,
1180};
1181
1182#define BMSR_ANCOMP 0x0020
1183
1184/*
1185 * Workaround
1186 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev)
1188{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190}
1191
1192/*
1193 * Function prototypes
1194 */
1195static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd);
1197static void jme_set_multi(struct net_device *netdev);
1198
1199#endif
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0a97c26df6ab..a1e22ed1f6ee 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#if MFE_DEBUG>=1 43#if MFE_DEBUG>=1
44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) 44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
45#define MFE_RX_DEBUG 2 45#define MFE_RX_DEBUG 2
46#else 46#else
47#define DPRINTK(str,args...) 47#define DPRINTK(str,args...)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 6d343efb2717..4e7a5faf0351 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -203,7 +203,7 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
203 203
204out_badirq: 204out_badirq:
205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
206 dev->name, __FUNCTION__, irq); 206 dev->name, __func__, irq);
207 return ret; 207 return ret;
208} 208}
209 209
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 096bca54bcf7..b411b79d72ad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mm.h>
36#include <linux/bitmap.h> 37#include <linux/bitmap.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d6524db321af..005f2aa75019 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -183,7 +183,7 @@ struct myri10ge_slice_state {
183 dma_addr_t fw_stats_bus; 183 dma_addr_t fw_stats_bus;
184 int watchdog_tx_done; 184 int watchdog_tx_done;
185 int watchdog_tx_req; 185 int watchdog_tx_req;
186#ifdef CONFIG_DCA 186#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
187 int cached_dca_tag; 187 int cached_dca_tag;
188 int cpu; 188 int cpu;
189 __be32 __iomem *dca_tag; 189 __be32 __iomem *dca_tag;
@@ -215,7 +215,7 @@ struct myri10ge_priv {
215 int msi_enabled; 215 int msi_enabled;
216 int msix_enabled; 216 int msix_enabled;
217 struct msix_entry *msix_vectors; 217 struct msix_entry *msix_vectors;
218#ifdef CONFIG_DCA 218#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
219 int dca_enabled; 219 int dca_enabled;
220#endif 220#endif
221 u32 link_state; 221 u32 link_state;
@@ -891,7 +891,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
891 struct myri10ge_slice_state *ss; 891 struct myri10ge_slice_state *ss;
892 int i, status; 892 int i, status;
893 size_t bytes; 893 size_t bytes;
894#ifdef CONFIG_DCA 894#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
895 unsigned long dca_tag_off; 895 unsigned long dca_tag_off;
896#endif 896#endif
897 897
@@ -986,7 +986,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
986 } 986 }
987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
988 988
989#ifdef CONFIG_DCA 989#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
991 dca_tag_off = cmd.data0; 991 dca_tag_off = cmd.data0;
992 for (i = 0; i < mgp->num_slices; i++) { 992 for (i = 0; i < mgp->num_slices; i++) {
@@ -1025,7 +1025,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028#ifdef CONFIG_DCA 1028#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1029static void 1029static void
1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1031{ 1031{
@@ -1060,8 +1060,9 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1060 } 1060 }
1061 err = dca_add_requester(&pdev->dev); 1061 err = dca_add_requester(&pdev->dev);
1062 if (err) { 1062 if (err) {
1063 dev_err(&pdev->dev, 1063 if (err != -ENODEV)
1064 "dca_add_requester() failed, err=%d\n", err); 1064 dev_err(&pdev->dev,
1065 "dca_add_requester() failed, err=%d\n", err);
1065 return; 1066 return;
1066 } 1067 }
1067 mgp->dca_enabled = 1; 1068 mgp->dca_enabled = 1;
@@ -1457,7 +1458,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1457 struct net_device *netdev = ss->mgp->dev; 1458 struct net_device *netdev = ss->mgp->dev;
1458 int work_done; 1459 int work_done;
1459 1460
1460#ifdef CONFIG_DCA 1461#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1461 if (ss->mgp->dca_enabled) 1462 if (ss->mgp->dca_enabled)
1462 myri10ge_update_dca(ss); 1463 myri10ge_update_dca(ss);
1463#endif 1464#endif
@@ -1686,8 +1687,8 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1686 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1687 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1687 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1688 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1688 "serial_number", "watchdog_resets", 1689 "serial_number", "watchdog_resets",
1689#ifdef CONFIG_DCA 1690#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1690 "dca_capable", "dca_enabled", 1691 "dca_capable_firmware", "dca_device_present",
1691#endif 1692#endif
1692 "link_changes", "link_up", "dropped_link_overflow", 1693 "link_changes", "link_up", "dropped_link_overflow",
1693 "dropped_link_error_or_filtered", 1694 "dropped_link_error_or_filtered",
@@ -1765,7 +1766,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1765 data[i++] = (unsigned int)mgp->read_write_dma; 1766 data[i++] = (unsigned int)mgp->read_write_dma;
1766 data[i++] = (unsigned int)mgp->serial_number; 1767 data[i++] = (unsigned int)mgp->serial_number;
1767 data[i++] = (unsigned int)mgp->watchdog_resets; 1768 data[i++] = (unsigned int)mgp->watchdog_resets;
1768#ifdef CONFIG_DCA 1769#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1769 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1770 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1770 data[i++] = (unsigned int)(mgp->dca_enabled); 1771 data[i++] = (unsigned int)(mgp->dca_enabled);
1771#endif 1772#endif
@@ -3763,7 +3764,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3763 dev_err(&pdev->dev, "failed reset\n"); 3764 dev_err(&pdev->dev, "failed reset\n");
3764 goto abort_with_slices; 3765 goto abort_with_slices;
3765 } 3766 }
3766#ifdef CONFIG_DCA 3767#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3767 myri10ge_setup_dca(mgp); 3768 myri10ge_setup_dca(mgp);
3768#endif 3769#endif
3769 pci_set_drvdata(pdev, mgp); 3770 pci_set_drvdata(pdev, mgp);
@@ -3866,7 +3867,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3866 netdev = mgp->dev; 3867 netdev = mgp->dev;
3867 unregister_netdev(netdev); 3868 unregister_netdev(netdev);
3868 3869
3869#ifdef CONFIG_DCA 3870#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3870 myri10ge_teardown_dca(mgp); 3871 myri10ge_teardown_dca(mgp);
3871#endif 3872#endif
3872 myri10ge_dummy_rdma(mgp, 0); 3873 myri10ge_dummy_rdma(mgp, 0);
@@ -3911,7 +3912,7 @@ static struct pci_driver myri10ge_driver = {
3911#endif 3912#endif
3912}; 3913};
3913 3914
3914#ifdef CONFIG_DCA 3915#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3915static int 3916static int
3916myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 3917myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3917{ 3918{
@@ -3943,7 +3944,7 @@ static __init int myri10ge_init_module(void)
3943 myri10ge_driver.name, myri10ge_rss_hash); 3944 myri10ge_driver.name, myri10ge_rss_hash);
3944 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 3945 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3945 } 3946 }
3946#ifdef CONFIG_DCA 3947#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3947 dca_register_notify(&myri10ge_dca_notifier); 3948 dca_register_notify(&myri10ge_dca_notifier);
3948#endif 3949#endif
3949 3950
@@ -3954,7 +3955,7 @@ module_init(myri10ge_init_module);
3954 3955
3955static __exit void myri10ge_cleanup_module(void) 3956static __exit void myri10ge_cleanup_module(void)
3956{ 3957{
3957#ifdef CONFIG_DCA 3958#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3958 dca_unregister_notify(&myri10ge_dca_notifier); 3959 dca_unregister_notify(&myri10ge_dca_notifier);
3959#endif 3960#endif
3960 pci_unregister_driver(&myri10ge_driver); 3961 pci_unregister_driver(&myri10ge_driver);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index fa3ceca4e15c..79599900c4b5 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -844,8 +844,12 @@ static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state)
844{ 844{
845 struct net_device *dev = platform_get_drvdata(pdev); 845 struct net_device *dev = platform_get_drvdata(pdev);
846 846
847 if (netif_running(dev)) 847 if (netif_running(dev)) {
848 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
848 netif_device_detach(dev); 849 netif_device_detach(dev);
850 if (idev)
851 pnp_stop_dev(idev);
852 }
849 return 0; 853 return 0;
850} 854}
851 855
@@ -854,6 +858,9 @@ static int ne_drv_resume(struct platform_device *pdev)
854 struct net_device *dev = platform_get_drvdata(pdev); 858 struct net_device *dev = platform_get_drvdata(pdev);
855 859
856 if (netif_running(dev)) { 860 if (netif_running(dev)) {
861 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
862 if (idev)
863 pnp_start_dev(idev);
857 ne_reset_8390(dev); 864 ne_reset_8390(dev);
858 NS8390p_init(dev, 1); 865 NS8390p_init(dev, 1);
859 netif_device_attach(dev); 866 netif_device_attach(dev);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 3f9af759cb90..b9bed82e1d21 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -189,7 +189,7 @@ netx_eth_interrupt(int irq, void *dev_id)
189 189
190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) 190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
191 printk("%s: unexpected status: 0x%08x\n", 191 printk("%s: unexpected status: 0x%08x\n",
192 __FUNCTION__, status); 192 __func__, status);
193 193
194 fill_level = 194 fill_level =
195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id))); 195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 244ab49c4337..f8e601c51da7 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -742,7 +742,7 @@ extern char netxen_nic_driver_name[];
742 } while (0) 742 } while (0)
743#else 743#else
744#define DPRINTK(klevel, fmt, args...) do { \ 744#define DPRINTK(klevel, fmt, args...) do { \
745 printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\ 745 printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
746 (adapter != NULL && adapter->netdev != NULL) ? \ 746 (adapter != NULL && adapter->netdev != NULL) ? \
747 adapter->netdev->name : NULL, \ 747 adapter->netdev->name : NULL, \
748 ## args); } while(0) 748 ## args); } while(0)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 32bb47adbe39..6ef3f0d84bcf 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -77,18 +77,18 @@ static irqreturn_t netxen_msi_intr(int irq, void *data);
77 77
78/* PCI Device ID Table */ 78/* PCI Device ID Table */
79#define ENTRY(device) \ 79#define ENTRY(device) \
80 {PCI_DEVICE(0x4040, (device)), \ 80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
82 82
83static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 83static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(0x0001), 84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(0x0002), 85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(0x0003), 86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(0x0004), 87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(0x0005), 88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(0x0024), 89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(0x0025), 90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(0x0100), 91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,} 92 {0,}
93}; 93};
94 94
@@ -241,7 +241,7 @@ static void netxen_check_options(struct netxen_adapter *adapter)
241 case NETXEN_BRDTYPE_P3_REF_QG: 241 case NETXEN_BRDTYPE_P3_REF_QG:
242 case NETXEN_BRDTYPE_P3_4_GB: 242 case NETXEN_BRDTYPE_P3_4_GB:
243 case NETXEN_BRDTYPE_P3_4_GB_MM: 243 case NETXEN_BRDTYPE_P3_4_GB_MM:
244 adapter->msix_supported = 0; 244 adapter->msix_supported = !!use_msi_x;
245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
246 break; 246 break;
247 247
@@ -359,16 +359,6 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
359 int i, pos; 359 int i, pos;
360 struct pci_dev *pdev; 360 struct pci_dev *pdev;
361 361
362 pdev = pci_get_device(0x1166, 0x0140, NULL);
363 if (pdev) {
364 pci_dev_put(pdev);
365 adapter->hw_read_wx(adapter,
366 NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
367 chicken |= 0x4000;
368 adapter->hw_write_wx(adapter,
369 NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4);
370 }
371
372 pdev = adapter->pdev; 362 pdev = adapter->pdev;
373 363
374 adapter->hw_read_wx(adapter, 364 adapter->hw_read_wx(adapter,
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 53451c3b2c0d..0a575fef29e6 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -119,7 +119,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
119 119
120#ifdef NETDRV_DEBUG 120#ifdef NETDRV_DEBUG
121/* note: prints function name for you */ 121/* note: prints function name for you */
122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
123#else 123#else
124# define DPRINTK(fmt, args...) 124# define DPRINTK(fmt, args...)
125#endif 125#endif
@@ -130,7 +130,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
130# define assert(expr) \ 130# define assert(expr) \
131 if(!(expr)) { \ 131 if(!(expr)) { \
132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
133 #expr,__FILE__,__FUNCTION__,__LINE__); \ 133 #expr,__FILE__,__func__,__LINE__); \
134 } 134 }
135#endif 135#endif
136 136
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 3f682d49a4e6..52bf11b73c6e 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -784,6 +784,7 @@ static struct pcmcia_device_id axnet_ids[] = {
784 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5), 784 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
785 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e), 785 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
786 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90), 786 PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
787 PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
787 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8), 788 PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
788 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609), 789 PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
789 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04), 790 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2d4c4ad89b8d..ebc1ae6bcbe5 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1626,6 +1626,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1626 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1626 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1627 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1627 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
1628 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa), 1628 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-T", 0x5261440f, 0x6705fcaa),
1629 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether PCC-TD", 0x5261440f, 0x47d5ca83),
1629 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1630 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
1630 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1631 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
1631 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1632 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
@@ -1737,7 +1738,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1737 PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360), 1738 PCMCIA_DEVICE_PROD_ID1("CyQ've 10 Base-T LAN CARD", 0x94faf360),
1738 PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de), 1739 PCMCIA_DEVICE_PROD_ID1("EP-210 PCMCIA LAN CARD.", 0x8850b4de),
1739 PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f), 1740 PCMCIA_DEVICE_PROD_ID1("ETHER-C16", 0x06a8514f),
1740 PCMCIA_DEVICE_PROD_ID1("IC-CARD", 0x60cb09a6),
1741 PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a), 1741 PCMCIA_DEVICE_PROD_ID1("NE2000 Compatible", 0x75b8ad5a),
1742 PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078), 1742 PCMCIA_DEVICE_PROD_ID2("EN-6200P2", 0xa996d078),
1743 /* too generic! */ 1743 /* too generic! */
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index ddccc074a76a..5d4d21516a6c 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1833,9 +1833,11 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1833 1833
1834 /* If the queue is getting long, don't wait any longer for packets 1834 /* If the queue is getting long, don't wait any longer for packets
1835 before the start of the queue. */ 1835 before the start of the queue. */
1836 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN 1836 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1837 && seq_before(ppp->minseq, ppp->mrq.next->sequence)) 1837 struct sk_buff *skb = skb_peek(&ppp->mrq);
1838 ppp->minseq = ppp->mrq.next->sequence; 1838 if (seq_before(ppp->minseq, skb->sequence))
1839 ppp->minseq = skb->sequence;
1840 }
1839 1841
1840 /* Pull completed packets off the queue and receive them. */ 1842 /* Pull completed packets off the queue and receive them. */
1841 while ((skb = ppp_mp_reconstruct(ppp))) 1843 while ((skb = ppp_mp_reconstruct(ppp)))
@@ -1864,7 +1866,7 @@ ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1864 for (p = list->next; p != (struct sk_buff *)list; p = p->next) 1866 for (p = list->next; p != (struct sk_buff *)list; p = p->next)
1865 if (seq_before(seq, p->sequence)) 1867 if (seq_before(seq, p->sequence))
1866 break; 1868 break;
1867 __skb_insert(skb, p->prev, p, list); 1869 __skb_queue_before(list, p, skb);
1868} 1870}
1869 1871
1870/* 1872/*
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index ff175e8f36b2..185b1dff10a8 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -353,7 +353,7 @@ static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_
353 spin_lock_bh(&session->reorder_q.lock); 353 spin_lock_bh(&session->reorder_q.lock);
354 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 354 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
355 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { 355 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
356 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); 356 __skb_queue_before(&session->reorder_q, skbp, skb);
357 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, 357 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
358 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 358 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
359 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns, 359 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 000000000000..8a197658d76f
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Qlogic 10GbE PCI Express ethernet driver
3#
4
5obj-$(CONFIG_QLGE) += qlge.o
6
7qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 000000000000..c37ea436c918
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1593 @@
1/*
2 * QLogic QLA41xx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qlge for copyright and licensing details.
6 */
7#ifndef _QLGE_H_
8#define _QLGE_H_
9
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12
13/*
14 * General definitions...
15 */
16#define DRV_NAME "qlge"
17#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
18#define DRV_VERSION "v1.00.00-b3"
19
20#define PFX "qlge: "
21#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
22 do { \
23 if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
24 ; \
25 else \
26 dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
27 "%s: " fmt, __func__, ##args); \
28 } while (0)
29
30#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID1 0x8012
32#define QLGE_DEVICE_ID 0x8000
33
34#define MAX_RX_RINGS 128
35#define MAX_TX_RINGS 128
36
37#define NUM_TX_RING_ENTRIES 256
38#define NUM_RX_RING_ENTRIES 256
39
40#define NUM_SMALL_BUFFERS 512
41#define NUM_LARGE_BUFFERS 512
42
43#define SMALL_BUFFER_SIZE 256
44#define LARGE_BUFFER_SIZE PAGE_SIZE
45#define MAX_SPLIT_SIZE 1023
46#define QLGE_SB_PAD 32
47
48#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
49#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
50#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
51#define UDELAY_COUNT 3
52#define UDELAY_DELAY 10
53
54
55#define TX_DESC_PER_IOCB 8
56/* The maximum number of frags we handle is based
57 * on PAGE_SIZE...
58 */
59#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
60#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
61#elif (PAGE_SHIFT == 16) /* 64k pages */
62#define TX_DESC_PER_OAL 0
63#endif
64
65#define DB_PAGE_SIZE 4096
66
67/*
68 * Processor Address Register (PROC_ADDR) bit definitions.
69 */
70enum {
71
72 /* Misc. stuff */
73 MAILBOX_COUNT = 16,
74
75 PROC_ADDR_RDY = (1 << 31),
76 PROC_ADDR_R = (1 << 30),
77 PROC_ADDR_ERR = (1 << 29),
78 PROC_ADDR_DA = (1 << 28),
79 PROC_ADDR_FUNC0_MBI = 0x00001180,
80 PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
81 PROC_ADDR_FUNC0_CTL = 0x000011a1,
82 PROC_ADDR_FUNC2_MBI = 0x00001280,
83 PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
84 PROC_ADDR_FUNC2_CTL = 0x000012a1,
85 PROC_ADDR_MPI_RISC = 0x00000000,
86 PROC_ADDR_MDE = 0x00010000,
87 PROC_ADDR_REGBLOCK = 0x00020000,
88 PROC_ADDR_RISC_REG = 0x00030000,
89};
90
91/*
92 * System Register (SYS) bit definitions.
93 */
94enum {
95 SYS_EFE = (1 << 0),
96 SYS_FAE = (1 << 1),
97 SYS_MDC = (1 << 2),
98 SYS_DST = (1 << 3),
99 SYS_DWC = (1 << 4),
100 SYS_EVW = (1 << 5),
101 SYS_OMP_DLY_MASK = 0x3f000000,
102 /*
103 * There are no values defined as of edit #15.
104 */
105 SYS_ODI = (1 << 14),
106};
107
108/*
109 * Reset/Failover Register (RST_FO) bit definitions.
110 */
111enum {
112 RST_FO_TFO = (1 << 0),
113 RST_FO_RR_MASK = 0x00060000,
114 RST_FO_RR_CQ_CAM = 0x00000000,
115 RST_FO_RR_DROP = 0x00000001,
116 RST_FO_RR_DQ = 0x00000002,
117 RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
118 RST_FO_FRB = (1 << 12),
119 RST_FO_MOP = (1 << 13),
120 RST_FO_REG = (1 << 14),
121 RST_FO_FR = (1 << 15),
122};
123
124/*
125 * Function Specific Control Register (FSC) bit definitions.
126 */
127enum {
128 FSC_DBRST_MASK = 0x00070000,
129 FSC_DBRST_256 = 0x00000000,
130 FSC_DBRST_512 = 0x00000001,
131 FSC_DBRST_768 = 0x00000002,
132 FSC_DBRST_1024 = 0x00000003,
133 FSC_DBL_MASK = 0x00180000,
134 FSC_DBL_DBRST = 0x00000000,
135 FSC_DBL_MAX_PLD = 0x00000008,
136 FSC_DBL_MAX_BRST = 0x00000010,
137 FSC_DBL_128_BYTES = 0x00000018,
138 FSC_EC = (1 << 5),
139 FSC_EPC_MASK = 0x00c00000,
140 FSC_EPC_INBOUND = (1 << 6),
141 FSC_EPC_OUTBOUND = (1 << 7),
142 FSC_VM_PAGESIZE_MASK = 0x07000000,
143 FSC_VM_PAGE_2K = 0x00000100,
144 FSC_VM_PAGE_4K = 0x00000200,
145 FSC_VM_PAGE_8K = 0x00000300,
146 FSC_VM_PAGE_64K = 0x00000600,
147 FSC_SH = (1 << 11),
148 FSC_DSB = (1 << 12),
149 FSC_STE = (1 << 13),
150 FSC_FE = (1 << 15),
151};
152
153/*
154 * Host Command Status Register (CSR) bit definitions.
155 */
156enum {
157 CSR_ERR_STS_MASK = 0x0000003f,
158 /*
159 * There are no valued defined as of edit #15.
160 */
161 CSR_RR = (1 << 8),
162 CSR_HRI = (1 << 9),
163 CSR_RP = (1 << 10),
164 CSR_CMD_PARM_SHIFT = 22,
165 CSR_CMD_NOP = 0x00000000,
166 CSR_CMD_SET_RST = 0x1000000,
167 CSR_CMD_CLR_RST = 0x20000000,
168 CSR_CMD_SET_PAUSE = 0x30000000,
169 CSR_CMD_CLR_PAUSE = 0x40000000,
170 CSR_CMD_SET_H2R_INT = 0x50000000,
171 CSR_CMD_CLR_H2R_INT = 0x60000000,
172 CSR_CMD_PAR_EN = 0x70000000,
173 CSR_CMD_SET_BAD_PAR = 0x80000000,
174 CSR_CMD_CLR_BAD_PAR = 0x90000000,
175 CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
176};
177
178/*
179 * Configuration Register (CFG) bit definitions.
180 */
181enum {
182 CFG_LRQ = (1 << 0),
183 CFG_DRQ = (1 << 1),
184 CFG_LR = (1 << 2),
185 CFG_DR = (1 << 3),
186 CFG_LE = (1 << 5),
187 CFG_LCQ = (1 << 6),
188 CFG_DCQ = (1 << 7),
189 CFG_Q_SHIFT = 8,
190 CFG_Q_MASK = 0x7f000000,
191};
192
193/*
194 * Status Register (STS) bit definitions.
195 */
196enum {
197 STS_FE = (1 << 0),
198 STS_PI = (1 << 1),
199 STS_PL0 = (1 << 2),
200 STS_PL1 = (1 << 3),
201 STS_PI0 = (1 << 4),
202 STS_PI1 = (1 << 5),
203 STS_FUNC_ID_MASK = 0x000000c0,
204 STS_FUNC_ID_SHIFT = 6,
205 STS_F0E = (1 << 8),
206 STS_F1E = (1 << 9),
207 STS_F2E = (1 << 10),
208 STS_F3E = (1 << 11),
209 STS_NFE = (1 << 12),
210};
211
212/*
213 * Interrupt Enable Register (INTR_EN) bit definitions.
214 */
215enum {
216 INTR_EN_INTR_MASK = 0x007f0000,
217 INTR_EN_TYPE_MASK = 0x03000000,
218 INTR_EN_TYPE_ENABLE = 0x00000100,
219 INTR_EN_TYPE_DISABLE = 0x00000200,
220 INTR_EN_TYPE_READ = 0x00000300,
221 INTR_EN_IHD = (1 << 13),
222 INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
223 INTR_EN_EI = (1 << 14),
224 INTR_EN_EN = (1 << 15),
225};
226
227/*
228 * Interrupt Mask Register (INTR_MASK) bit definitions.
229 */
230enum {
231 INTR_MASK_PI = (1 << 0),
232 INTR_MASK_HL0 = (1 << 1),
233 INTR_MASK_LH0 = (1 << 2),
234 INTR_MASK_HL1 = (1 << 3),
235 INTR_MASK_LH1 = (1 << 4),
236 INTR_MASK_SE = (1 << 5),
237 INTR_MASK_LSC = (1 << 6),
238 INTR_MASK_MC = (1 << 7),
239 INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
240};
241
242/*
243 * Register (REV_ID) bit definitions.
244 */
245enum {
246 REV_ID_MASK = 0x0000000f,
247 REV_ID_NICROLL_SHIFT = 0,
248 REV_ID_NICREV_SHIFT = 4,
249 REV_ID_XGROLL_SHIFT = 8,
250 REV_ID_XGREV_SHIFT = 12,
251 REV_ID_CHIPREV_SHIFT = 28,
252};
253
254/*
255 * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
256 */
257enum {
258 FRC_ECC_ERR_VW = (1 << 12),
259 FRC_ECC_ERR_VB = (1 << 13),
260 FRC_ECC_ERR_NI = (1 << 14),
261 FRC_ECC_ERR_NO = (1 << 15),
262 FRC_ECC_PFE_SHIFT = 16,
263 FRC_ECC_ERR_DO = (1 << 18),
264 FRC_ECC_P14 = (1 << 19),
265};
266
267/*
268 * Error Status Register (ERR_STS) bit definitions.
269 */
270enum {
271 ERR_STS_NOF = (1 << 0),
272 ERR_STS_NIF = (1 << 1),
273 ERR_STS_DRP = (1 << 2),
274 ERR_STS_XGP = (1 << 3),
275 ERR_STS_FOU = (1 << 4),
276 ERR_STS_FOC = (1 << 5),
277 ERR_STS_FOF = (1 << 6),
278 ERR_STS_FIU = (1 << 7),
279 ERR_STS_FIC = (1 << 8),
280 ERR_STS_FIF = (1 << 9),
281 ERR_STS_MOF = (1 << 10),
282 ERR_STS_TA = (1 << 11),
283 ERR_STS_MA = (1 << 12),
284 ERR_STS_MPE = (1 << 13),
285 ERR_STS_SCE = (1 << 14),
286 ERR_STS_STE = (1 << 15),
287 ERR_STS_FOW = (1 << 16),
288 ERR_STS_UE = (1 << 17),
289 ERR_STS_MCH = (1 << 26),
290 ERR_STS_LOC_SHIFT = 27,
291};
292
293/*
294 * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
295 */
296enum {
297 RAM_DBG_ADDR_FW = (1 << 30),
298 RAM_DBG_ADDR_FR = (1 << 31),
299};
300
301/*
302 * Semaphore Register (SEM) bit definitions.
303 */
304enum {
305 /*
306 * Example:
307 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
308 */
309 SEM_CLEAR = 0,
310 SEM_SET = 1,
311 SEM_FORCE = 3,
312 SEM_XGMAC0_SHIFT = 0,
313 SEM_XGMAC1_SHIFT = 2,
314 SEM_ICB_SHIFT = 4,
315 SEM_MAC_ADDR_SHIFT = 6,
316 SEM_FLASH_SHIFT = 8,
317 SEM_PROBE_SHIFT = 10,
318 SEM_RT_IDX_SHIFT = 12,
319 SEM_PROC_REG_SHIFT = 14,
320 SEM_XGMAC0_MASK = 0x00030000,
321 SEM_XGMAC1_MASK = 0x000c0000,
322 SEM_ICB_MASK = 0x00300000,
323 SEM_MAC_ADDR_MASK = 0x00c00000,
324 SEM_FLASH_MASK = 0x03000000,
325 SEM_PROBE_MASK = 0x0c000000,
326 SEM_RT_IDX_MASK = 0x30000000,
327 SEM_PROC_REG_MASK = 0xc0000000,
328};
329
330/*
331 * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
332 */
333enum {
334 XGMAC_ADDR_RDY = (1 << 31),
335 XGMAC_ADDR_R = (1 << 30),
336 XGMAC_ADDR_XME = (1 << 29),
337
338 /* XGMAC control registers */
339 PAUSE_SRC_LO = 0x00000100,
340 PAUSE_SRC_HI = 0x00000104,
341 GLOBAL_CFG = 0x00000108,
342 GLOBAL_CFG_RESET = (1 << 0),
343 GLOBAL_CFG_JUMBO = (1 << 6),
344 GLOBAL_CFG_TX_STAT_EN = (1 << 10),
345 GLOBAL_CFG_RX_STAT_EN = (1 << 11),
346 TX_CFG = 0x0000010c,
347 TX_CFG_RESET = (1 << 0),
348 TX_CFG_EN = (1 << 1),
349 TX_CFG_PREAM = (1 << 2),
350 RX_CFG = 0x00000110,
351 RX_CFG_RESET = (1 << 0),
352 RX_CFG_EN = (1 << 1),
353 RX_CFG_PREAM = (1 << 2),
354 FLOW_CTL = 0x0000011c,
355 PAUSE_OPCODE = 0x00000120,
356 PAUSE_TIMER = 0x00000124,
357 PAUSE_FRM_DEST_LO = 0x00000128,
358 PAUSE_FRM_DEST_HI = 0x0000012c,
359 MAC_TX_PARAMS = 0x00000134,
360 MAC_TX_PARAMS_JUMBO = (1 << 31),
361 MAC_TX_PARAMS_SIZE_SHIFT = 16,
362 MAC_RX_PARAMS = 0x00000138,
363 MAC_SYS_INT = 0x00000144,
364 MAC_SYS_INT_MASK = 0x00000148,
365 MAC_MGMT_INT = 0x0000014c,
366 MAC_MGMT_IN_MASK = 0x00000150,
367 EXT_ARB_MODE = 0x000001fc,
368
369 /* XGMAC TX statistics registers */
370 TX_PKTS = 0x00000200,
371 TX_BYTES = 0x00000208,
372 TX_MCAST_PKTS = 0x00000210,
373 TX_BCAST_PKTS = 0x00000218,
374 TX_UCAST_PKTS = 0x00000220,
375 TX_CTL_PKTS = 0x00000228,
376 TX_PAUSE_PKTS = 0x00000230,
377 TX_64_PKT = 0x00000238,
378 TX_65_TO_127_PKT = 0x00000240,
379 TX_128_TO_255_PKT = 0x00000248,
380 TX_256_511_PKT = 0x00000250,
381 TX_512_TO_1023_PKT = 0x00000258,
382 TX_1024_TO_1518_PKT = 0x00000260,
383 TX_1519_TO_MAX_PKT = 0x00000268,
384 TX_UNDERSIZE_PKT = 0x00000270,
385 TX_OVERSIZE_PKT = 0x00000278,
386
387 /* XGMAC statistics control registers */
388 RX_HALF_FULL_DET = 0x000002a0,
389 TX_HALF_FULL_DET = 0x000002a4,
390 RX_OVERFLOW_DET = 0x000002a8,
391 TX_OVERFLOW_DET = 0x000002ac,
392 RX_HALF_FULL_MASK = 0x000002b0,
393 TX_HALF_FULL_MASK = 0x000002b4,
394 RX_OVERFLOW_MASK = 0x000002b8,
395 TX_OVERFLOW_MASK = 0x000002bc,
396 STAT_CNT_CTL = 0x000002c0,
397 STAT_CNT_CTL_CLEAR_TX = (1 << 0),
398 STAT_CNT_CTL_CLEAR_RX = (1 << 1),
399 AUX_RX_HALF_FULL_DET = 0x000002d0,
400 AUX_TX_HALF_FULL_DET = 0x000002d4,
401 AUX_RX_OVERFLOW_DET = 0x000002d8,
402 AUX_TX_OVERFLOW_DET = 0x000002dc,
403 AUX_RX_HALF_FULL_MASK = 0x000002f0,
404 AUX_TX_HALF_FULL_MASK = 0x000002f4,
405 AUX_RX_OVERFLOW_MASK = 0x000002f8,
406 AUX_TX_OVERFLOW_MASK = 0x000002fc,
407
408 /* XGMAC RX statistics registers */
409 RX_BYTES = 0x00000300,
410 RX_BYTES_OK = 0x00000308,
411 RX_PKTS = 0x00000310,
412 RX_PKTS_OK = 0x00000318,
413 RX_BCAST_PKTS = 0x00000320,
414 RX_MCAST_PKTS = 0x00000328,
415 RX_UCAST_PKTS = 0x00000330,
416 RX_UNDERSIZE_PKTS = 0x00000338,
417 RX_OVERSIZE_PKTS = 0x00000340,
418 RX_JABBER_PKTS = 0x00000348,
419 RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
420 RX_DROP_EVENTS = 0x00000358,
421 RX_FCERR_PKTS = 0x00000360,
422 RX_ALIGN_ERR = 0x00000368,
423 RX_SYMBOL_ERR = 0x00000370,
424 RX_MAC_ERR = 0x00000378,
425 RX_CTL_PKTS = 0x00000380,
426 RX_PAUSE_PKTS = 0x00000384,
427 RX_64_PKTS = 0x00000390,
428 RX_65_TO_127_PKTS = 0x00000398,
429 RX_128_255_PKTS = 0x000003a0,
430 RX_256_511_PKTS = 0x000003a8,
431 RX_512_TO_1023_PKTS = 0x000003b0,
432 RX_1024_TO_1518_PKTS = 0x000003b8,
433 RX_1519_TO_MAX_PKTS = 0x000003c0,
434 RX_LEN_ERR_PKTS = 0x000003c8,
435
436 /* XGMAC MDIO control registers */
437 MDIO_TX_DATA = 0x00000400,
438 MDIO_RX_DATA = 0x00000410,
439 MDIO_CMD = 0x00000420,
440 MDIO_PHY_ADDR = 0x00000430,
441 MDIO_PORT = 0x00000440,
442 MDIO_STATUS = 0x00000450,
443
444 /* XGMAC AUX statistics registers */
445};
446
447/*
448 * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
449 */
450enum {
451 ETS_QUEUE_SHIFT = 29,
452 ETS_REF = (1 << 26),
453 ETS_RS = (1 << 27),
454 ETS_P = (1 << 28),
455 ETS_FC_COS_SHIFT = 23,
456};
457
458/*
459 * Flash Address Register (FLASH_ADDR) bit definitions.
460 */
461enum {
462 FLASH_ADDR_RDY = (1 << 31),
463 FLASH_ADDR_R = (1 << 30),
464 FLASH_ADDR_ERR = (1 << 29),
465};
466
467/*
468 * Stop CQ Processing Register (CQ_STOP) bit definitions.
469 */
470enum {
471 CQ_STOP_QUEUE_MASK = (0x007f0000),
472 CQ_STOP_TYPE_MASK = (0x03000000),
473 CQ_STOP_TYPE_START = 0x00000100,
474 CQ_STOP_TYPE_STOP = 0x00000200,
475 CQ_STOP_TYPE_READ = 0x00000300,
476 CQ_STOP_EN = (1 << 15),
477};
478
479/*
480 * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
481 */
482enum {
483 MAC_ADDR_IDX_SHIFT = 4,
484 MAC_ADDR_TYPE_SHIFT = 16,
485 MAC_ADDR_TYPE_MASK = 0x000f0000,
486 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
487 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
488 MAC_ADDR_TYPE_VLAN = 0x00020000,
489 MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
490 MAC_ADDR_TYPE_FC_MAC = 0x00040000,
491 MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
492 MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
493 MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
494 MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
495 MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
496 MAC_ADDR_ADR = (1 << 25),
497 MAC_ADDR_RS = (1 << 26),
498 MAC_ADDR_E = (1 << 27),
499 MAC_ADDR_MR = (1 << 30),
500 MAC_ADDR_MW = (1 << 31),
501 MAX_MULTICAST_ENTRIES = 32,
502};
503
504/*
505 * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
506 */
507enum {
508 SPLT_HDR_EP = (1 << 31),
509};
510
511/*
512 * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
513 */
514enum {
515 FC_RCV_CFG_ECT = (1 << 15),
516 FC_RCV_CFG_DFH = (1 << 20),
517 FC_RCV_CFG_DVF = (1 << 21),
518 FC_RCV_CFG_RCE = (1 << 27),
519 FC_RCV_CFG_RFE = (1 << 28),
520 FC_RCV_CFG_TEE = (1 << 29),
521 FC_RCV_CFG_TCE = (1 << 30),
522 FC_RCV_CFG_TFE = (1 << 31),
523};
524
525/*
526 * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
527 */
528enum {
529 NIC_RCV_CFG_PPE = (1 << 0),
530 NIC_RCV_CFG_VLAN_MASK = 0x00060000,
531 NIC_RCV_CFG_VLAN_ALL = 0x00000000,
532 NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
533 NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
534 NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
535 NIC_RCV_CFG_RV = (1 << 3),
536 NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
537 NIC_RCV_CFG_DFQ_SHIFT = 8,
538 NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
539};
540
541/*
542 * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
543 */
544enum {
545 MGMT_RCV_CFG_ARP = (1 << 0),
546 MGMT_RCV_CFG_DHC = (1 << 1),
547 MGMT_RCV_CFG_DHS = (1 << 2),
548 MGMT_RCV_CFG_NP = (1 << 3),
549 MGMT_RCV_CFG_I6N = (1 << 4),
550 MGMT_RCV_CFG_I6R = (1 << 5),
551 MGMT_RCV_CFG_DH6 = (1 << 6),
552 MGMT_RCV_CFG_UD1 = (1 << 7),
553 MGMT_RCV_CFG_UD0 = (1 << 8),
554 MGMT_RCV_CFG_BCT = (1 << 9),
555 MGMT_RCV_CFG_MCT = (1 << 10),
556 MGMT_RCV_CFG_DM = (1 << 11),
557 MGMT_RCV_CFG_RM = (1 << 12),
558 MGMT_RCV_CFG_STL = (1 << 13),
559 MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
560 MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
561 MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
562 MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
563 MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
564};
565
566/*
567 * Routing Index Register (RT_IDX) bit definitions.
568 */
569enum {
570 RT_IDX_IDX_SHIFT = 8,
571 RT_IDX_TYPE_MASK = 0x000f0000,
572 RT_IDX_TYPE_RT = 0x00000000,
573 RT_IDX_TYPE_RT_INV = 0x00010000,
574 RT_IDX_TYPE_NICQ = 0x00020000,
575 RT_IDX_TYPE_NICQ_INV = 0x00030000,
576 RT_IDX_DST_MASK = 0x00700000,
577 RT_IDX_DST_RSS = 0x00000000,
578 RT_IDX_DST_CAM_Q = 0x00100000,
579 RT_IDX_DST_COS_Q = 0x00200000,
580 RT_IDX_DST_DFLT_Q = 0x00300000,
581 RT_IDX_DST_DEST_Q = 0x00400000,
582 RT_IDX_RS = (1 << 26),
583 RT_IDX_E = (1 << 27),
584 RT_IDX_MR = (1 << 30),
585 RT_IDX_MW = (1 << 31),
586
587 /* Nic Queue format - type 2 bits */
588 RT_IDX_BCAST = (1 << 0),
589 RT_IDX_MCAST = (1 << 1),
590 RT_IDX_MCAST_MATCH = (1 << 2),
591 RT_IDX_MCAST_REG_MATCH = (1 << 3),
592 RT_IDX_MCAST_HASH_MATCH = (1 << 4),
593 RT_IDX_FC_MACH = (1 << 5),
594 RT_IDX_ETH_FCOE = (1 << 6),
595 RT_IDX_CAM_HIT = (1 << 7),
596 RT_IDX_CAM_BIT0 = (1 << 8),
597 RT_IDX_CAM_BIT1 = (1 << 9),
598 RT_IDX_VLAN_TAG = (1 << 10),
599 RT_IDX_VLAN_MATCH = (1 << 11),
600 RT_IDX_VLAN_FILTER = (1 << 12),
601 RT_IDX_ETH_SKIP1 = (1 << 13),
602 RT_IDX_ETH_SKIP2 = (1 << 14),
603 RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
604 RT_IDX_802_3 = (1 << 16),
605 RT_IDX_LLDP = (1 << 17),
606 RT_IDX_UNUSED018 = (1 << 18),
607 RT_IDX_UNUSED019 = (1 << 19),
608 RT_IDX_UNUSED20 = (1 << 20),
609 RT_IDX_UNUSED21 = (1 << 21),
610 RT_IDX_ERR = (1 << 22),
611 RT_IDX_VALID = (1 << 23),
612 RT_IDX_TU_CSUM_ERR = (1 << 24),
613 RT_IDX_IP_CSUM_ERR = (1 << 25),
614 RT_IDX_MAC_ERR = (1 << 26),
615 RT_IDX_RSS_TCP6 = (1 << 27),
616 RT_IDX_RSS_TCP4 = (1 << 28),
617 RT_IDX_RSS_IPV6 = (1 << 29),
618 RT_IDX_RSS_IPV4 = (1 << 30),
619 RT_IDX_RSS_MATCH = (1 << 31),
620
621 /* Hierarchy for the NIC Queue Mask */
622 RT_IDX_ALL_ERR_SLOT = 0,
623 RT_IDX_MAC_ERR_SLOT = 0,
624 RT_IDX_IP_CSUM_ERR_SLOT = 1,
625 RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
626 RT_IDX_BCAST_SLOT = 3,
627 RT_IDX_MCAST_MATCH_SLOT = 4,
628 RT_IDX_ALLMULTI_SLOT = 5,
629 RT_IDX_UNUSED6_SLOT = 6,
630 RT_IDX_UNUSED7_SLOT = 7,
631 RT_IDX_RSS_MATCH_SLOT = 8,
632 RT_IDX_RSS_IPV4_SLOT = 8,
633 RT_IDX_RSS_IPV6_SLOT = 9,
634 RT_IDX_RSS_TCP4_SLOT = 10,
635 RT_IDX_RSS_TCP6_SLOT = 11,
636 RT_IDX_CAM_HIT_SLOT = 12,
637 RT_IDX_UNUSED013 = 13,
638 RT_IDX_UNUSED014 = 14,
639 RT_IDX_PROMISCUOUS_SLOT = 15,
640 RT_IDX_MAX_SLOTS = 16,
641};
642
643/*
644 * Control Register Set Map
645 */
646enum {
647 PROC_ADDR = 0, /* Use semaphore */
648 PROC_DATA = 0x04, /* Use semaphore */
649 SYS = 0x08,
650 RST_FO = 0x0c,
651 FSC = 0x10,
652 CSR = 0x14,
653 LED = 0x18,
654 ICB_RID = 0x1c, /* Use semaphore */
655 ICB_L = 0x20, /* Use semaphore */
656 ICB_H = 0x24, /* Use semaphore */
657 CFG = 0x28,
658 BIOS_ADDR = 0x2c,
659 STS = 0x30,
660 INTR_EN = 0x34,
661 INTR_MASK = 0x38,
662 ISR1 = 0x3c,
663 ISR2 = 0x40,
664 ISR3 = 0x44,
665 ISR4 = 0x48,
666 REV_ID = 0x4c,
667 FRC_ECC_ERR = 0x50,
668 ERR_STS = 0x54,
669 RAM_DBG_ADDR = 0x58,
670 RAM_DBG_DATA = 0x5c,
671 ECC_ERR_CNT = 0x60,
672 SEM = 0x64,
673 GPIO_1 = 0x68, /* Use semaphore */
674 GPIO_2 = 0x6c, /* Use semaphore */
675 GPIO_3 = 0x70, /* Use semaphore */
676 RSVD2 = 0x74,
677 XGMAC_ADDR = 0x78, /* Use semaphore */
678 XGMAC_DATA = 0x7c, /* Use semaphore */
679 NIC_ETS = 0x80,
680 CNA_ETS = 0x84,
681 FLASH_ADDR = 0x88, /* Use semaphore */
682 FLASH_DATA = 0x8c, /* Use semaphore */
683 CQ_STOP = 0x90,
684 PAGE_TBL_RID = 0x94,
685 WQ_PAGE_TBL_LO = 0x98,
686 WQ_PAGE_TBL_HI = 0x9c,
687 CQ_PAGE_TBL_LO = 0xa0,
688 CQ_PAGE_TBL_HI = 0xa4,
689 MAC_ADDR_IDX = 0xa8, /* Use semaphore */
690 MAC_ADDR_DATA = 0xac, /* Use semaphore */
691 COS_DFLT_CQ1 = 0xb0,
692 COS_DFLT_CQ2 = 0xb4,
693 ETYPE_SKIP1 = 0xb8,
694 ETYPE_SKIP2 = 0xbc,
695 SPLT_HDR = 0xc0,
696 FC_PAUSE_THRES = 0xc4,
697 NIC_PAUSE_THRES = 0xc8,
698 FC_ETHERTYPE = 0xcc,
699 FC_RCV_CFG = 0xd0,
700 NIC_RCV_CFG = 0xd4,
701 FC_COS_TAGS = 0xd8,
702 NIC_COS_TAGS = 0xdc,
703 MGMT_RCV_CFG = 0xe0,
704 RT_IDX = 0xe4,
705 RT_DATA = 0xe8,
706 RSVD7 = 0xec,
707 XG_SERDES_ADDR = 0xf0,
708 XG_SERDES_DATA = 0xf4,
709 PRB_MX_ADDR = 0xf8, /* Use semaphore */
710 PRB_MX_DATA = 0xfc, /* Use semaphore */
711};
712
713/*
714 * CAM output format.
715 */
716enum {
717 CAM_OUT_ROUTE_FC = 0,
718 CAM_OUT_ROUTE_NIC = 1,
719 CAM_OUT_FUNC_SHIFT = 2,
720 CAM_OUT_RV = (1 << 4),
721 CAM_OUT_SH = (1 << 15),
722 CAM_OUT_CQ_ID_SHIFT = 5,
723};
724
725/*
726 * Mailbox definitions
727 */
728enum {
729 /* Asynchronous Event Notifications */
730 AEN_SYS_ERR = 0x00008002,
731 AEN_LINK_UP = 0x00008011,
732 AEN_LINK_DOWN = 0x00008012,
733 AEN_IDC_CMPLT = 0x00008100,
734 AEN_IDC_REQ = 0x00008101,
735 AEN_FW_INIT_DONE = 0x00008400,
736 AEN_FW_INIT_FAIL = 0x00008401,
737
738 /* Mailbox Command Opcodes. */
739 MB_CMD_NOP = 0x00000000,
740 MB_CMD_EX_FW = 0x00000002,
741 MB_CMD_MB_TEST = 0x00000006,
742 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
743 MB_CMD_ABOUT_FW = 0x00000008,
744 MB_CMD_LOAD_RISC_RAM = 0x0000000b,
745 MB_CMD_DUMP_RISC_RAM = 0x0000000c,
746 MB_CMD_WRITE_RAM = 0x0000000d,
747 MB_CMD_READ_RAM = 0x0000000f,
748 MB_CMD_STOP_FW = 0x00000014,
749 MB_CMD_MAKE_SYS_ERR = 0x0000002a,
750 MB_CMD_INIT_FW = 0x00000060,
751 MB_CMD_GET_INIT_CB = 0x00000061,
752 MB_CMD_GET_FW_STATE = 0x00000069,
753 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
754 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
755 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
756 MB_WOL_DISABLE = 0x00000000,
757 MB_WOL_MAGIC_PKT = 0x00000001,
758 MB_WOL_FLTR = 0x00000002,
759 MB_WOL_UCAST = 0x00000004,
760 MB_WOL_MCAST = 0x00000008,
761 MB_WOL_BCAST = 0x00000010,
762 MB_WOL_LINK_UP = 0x00000020,
763 MB_WOL_LINK_DOWN = 0x00000040,
764 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
765 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
766 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
767 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */
768 MB_CMD_PORT_RESET = 0x00000120,
769 MB_CMD_SET_PORT_CFG = 0x00000122,
770 MB_CMD_GET_PORT_CFG = 0x00000123,
771 MB_CMD_SET_ASIC_VOLTS = 0x00000130,
772 MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */
773
774 /* Mailbox Command Status. */
775 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
776 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
777 MB_CMD_STS_ERR = 0x00004005, /* Error. */
778};
779
780struct mbox_params {
781 u32 mbox_in[MAILBOX_COUNT];
782 u32 mbox_out[MAILBOX_COUNT];
783 int in_count;
784 int out_count;
785};
786
787struct flash_params {
788 u8 dev_id_str[4];
789 u16 size;
790 u16 csum;
791 u16 ver;
792 u16 sub_dev_id;
793 u8 mac_addr[6];
794 u16 res;
795};
796
797
798/*
799 * doorbell space for the rx ring context
800 */
801struct rx_doorbell_context {
802 u32 cnsmr_idx; /* 0x00 */
803 u32 valid; /* 0x04 */
804 u32 reserved[4]; /* 0x08-0x14 */
805 u32 lbq_prod_idx; /* 0x18 */
806 u32 sbq_prod_idx; /* 0x1c */
807};
808
809/*
810 * doorbell space for the tx ring context
811 */
812struct tx_doorbell_context {
813 u32 prod_idx; /* 0x00 */
814 u32 valid; /* 0x04 */
815 u32 reserved[4]; /* 0x08-0x14 */
816 u32 lbq_prod_idx; /* 0x18 */
817 u32 sbq_prod_idx; /* 0x1c */
818};
819
820/* DATA STRUCTURES SHARED WITH HARDWARE. */
821
822struct bq_element {
823 u32 addr_lo;
824#define BQ_END 0x00000001
825#define BQ_CONT 0x00000002
826#define BQ_MASK 0x00000003
827 u32 addr_hi;
828} __attribute((packed));
829
830struct tx_buf_desc {
831 __le64 addr;
832 __le32 len;
833#define TX_DESC_LEN_MASK 0x000fffff
834#define TX_DESC_C 0x40000000
835#define TX_DESC_E 0x80000000
836} __attribute((packed));
837
838/*
839 * IOCB Definitions...
840 */
841
842#define OPCODE_OB_MAC_IOCB 0x01
843#define OPCODE_OB_MAC_TSO_IOCB 0x02
844#define OPCODE_IB_MAC_IOCB 0x20
845#define OPCODE_IB_MPI_IOCB 0x21
846#define OPCODE_IB_AE_IOCB 0x3f
847
848struct ob_mac_iocb_req {
849 u8 opcode;
850 u8 flags1;
851#define OB_MAC_IOCB_REQ_OI 0x01
852#define OB_MAC_IOCB_REQ_I 0x02
853#define OB_MAC_IOCB_REQ_D 0x08
854#define OB_MAC_IOCB_REQ_F 0x10
855 u8 flags2;
856 u8 flags3;
857#define OB_MAC_IOCB_DFP 0x02
858#define OB_MAC_IOCB_V 0x04
859 __le32 reserved1[2];
860 __le16 frame_len;
861#define OB_MAC_IOCB_LEN_MASK 0x3ffff
862 __le16 reserved2;
863 __le32 tid;
864 __le32 txq_idx;
865 __le32 reserved3;
866 __le16 vlan_tci;
867 __le16 reserved4;
868 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
869} __attribute((packed));
870
871struct ob_mac_iocb_rsp {
872 u8 opcode; /* */
873 u8 flags1; /* */
874#define OB_MAC_IOCB_RSP_OI 0x01 /* */
875#define OB_MAC_IOCB_RSP_I 0x02 /* */
876#define OB_MAC_IOCB_RSP_E 0x08 /* */
877#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
878#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
879#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
880 u8 flags2; /* */
881 u8 flags3; /* */
882#define OB_MAC_IOCB_RSP_B 0x80 /* */
883 __le32 tid;
884 __le32 txq_idx;
885 __le32 reserved[13];
886} __attribute((packed));
887
888struct ob_mac_tso_iocb_req {
889 u8 opcode;
890 u8 flags1;
891#define OB_MAC_TSO_IOCB_OI 0x01
892#define OB_MAC_TSO_IOCB_I 0x02
893#define OB_MAC_TSO_IOCB_D 0x08
894#define OB_MAC_TSO_IOCB_IP4 0x40
895#define OB_MAC_TSO_IOCB_IP6 0x80
896 u8 flags2;
897#define OB_MAC_TSO_IOCB_LSO 0x20
898#define OB_MAC_TSO_IOCB_UC 0x40
899#define OB_MAC_TSO_IOCB_TC 0x80
900 u8 flags3;
901#define OB_MAC_TSO_IOCB_IC 0x01
902#define OB_MAC_TSO_IOCB_DFP 0x02
903#define OB_MAC_TSO_IOCB_V 0x04
904 __le32 reserved1[2];
905 __le32 frame_len;
906 __le32 tid;
907 __le32 txq_idx;
908 __le16 total_hdrs_len;
909 __le16 net_trans_offset;
910#define OB_MAC_TRANSPORT_HDR_SHIFT 6
911 __le16 vlan_tci;
912 __le16 mss;
913 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
914} __attribute((packed));
915
916struct ob_mac_tso_iocb_rsp {
917 u8 opcode;
918 u8 flags1;
919#define OB_MAC_TSO_IOCB_RSP_OI 0x01
920#define OB_MAC_TSO_IOCB_RSP_I 0x02
921#define OB_MAC_TSO_IOCB_RSP_E 0x08
922#define OB_MAC_TSO_IOCB_RSP_S 0x10
923#define OB_MAC_TSO_IOCB_RSP_L 0x20
924#define OB_MAC_TSO_IOCB_RSP_P 0x40
925 u8 flags2; /* */
926 u8 flags3; /* */
927#define OB_MAC_TSO_IOCB_RSP_B 0x8000
928 __le32 tid;
929 __le32 txq_idx;
930 __le32 reserved2[13];
931} __attribute((packed));
932
933struct ib_mac_iocb_rsp {
934 u8 opcode; /* 0x20 */
935 u8 flags1;
936#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
937#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
938#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
939#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
940#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
941#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
942#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
943#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
944#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
945#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
946#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
947 u8 flags2;
948#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
949#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
950#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
951#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
952#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
953#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
954#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
955#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
956#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
957#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
958#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
959#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
960 u8 flags3;
961#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
962#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
963#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
964#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
965#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
966#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
967#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
968#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
969#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
970#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
971#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
972 __le32 data_len; /* */
973 __le32 data_addr_lo; /* */
974 __le32 data_addr_hi; /* */
975 __le32 rss; /* */
976 __le16 vlan_id; /* 12 bits */
977#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
978#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
979
980 __le16 reserved1;
981 __le32 reserved2[6];
982 __le32 flags4;
983#define IB_MAC_IOCB_RSP_HV 0x20000000 /* */
984#define IB_MAC_IOCB_RSP_HS 0x40000000 /* */
985#define IB_MAC_IOCB_RSP_HL 0x80000000 /* */
986 __le32 hdr_len; /* */
987 __le32 hdr_addr_lo; /* */
988 __le32 hdr_addr_hi; /* */
989} __attribute((packed));
990
991struct ib_ae_iocb_rsp {
992 u8 opcode;
993 u8 flags1;
994#define IB_AE_IOCB_RSP_OI 0x01
995#define IB_AE_IOCB_RSP_I 0x02
996 u8 event;
997#define LINK_UP_EVENT 0x00
998#define LINK_DOWN_EVENT 0x01
999#define CAM_LOOKUP_ERR_EVENT 0x06
1000#define SOFT_ECC_ERROR_EVENT 0x07
1001#define MGMT_ERR_EVENT 0x08
1002#define TEN_GIG_MAC_EVENT 0x09
1003#define GPI0_H2L_EVENT 0x10
1004#define GPI0_L2H_EVENT 0x20
1005#define GPI1_H2L_EVENT 0x11
1006#define GPI1_L2H_EVENT 0x21
1007#define PCI_ERR_ANON_BUF_RD 0x40
1008 u8 q_id;
1009 __le32 reserved[15];
1010} __attribute((packed));
1011
1012/*
1013 * These three structures are for generic
1014 * handling of ib and ob iocbs.
1015 */
1016struct ql_net_rsp_iocb {
1017 u8 opcode;
1018 u8 flags0;
1019 __le16 length;
1020 __le32 tid;
1021 __le32 reserved[14];
1022} __attribute((packed));
1023
1024struct net_req_iocb {
1025 u8 opcode;
1026 u8 flags0;
1027 __le16 flags1;
1028 __le32 tid;
1029 __le32 reserved1[30];
1030} __attribute((packed));
1031
1032/*
1033 * tx ring initialization control block for chip.
1034 * It is defined as:
1035 * "Work Queue Initialization Control Block"
1036 */
1037struct wqicb {
1038 __le16 len;
1039#define Q_LEN_V (1 << 4)
1040#define Q_LEN_CPP_CONT 0x0000
1041#define Q_LEN_CPP_16 0x0001
1042#define Q_LEN_CPP_32 0x0002
1043#define Q_LEN_CPP_64 0x0003
1044 __le16 flags;
1045#define Q_PRI_SHIFT 1
1046#define Q_FLAGS_LC 0x1000
1047#define Q_FLAGS_LB 0x2000
1048#define Q_FLAGS_LI 0x4000
1049#define Q_FLAGS_LO 0x8000
1050 __le16 cq_id_rss;
1051#define Q_CQ_ID_RSS_RV 0x8000
1052 __le16 rid;
1053 __le32 addr_lo;
1054 __le32 addr_hi;
1055 __le32 cnsmr_idx_addr_lo;
1056 __le32 cnsmr_idx_addr_hi;
1057} __attribute((packed));
1058
1059/*
1060 * rx ring initialization control block for chip.
1061 * It is defined as:
1062 * "Completion Queue Initialization Control Block"
1063 */
1064struct cqicb {
1065 u8 msix_vect;
1066 u8 reserved1;
1067 u8 reserved2;
1068 u8 flags;
1069#define FLAGS_LV 0x08
1070#define FLAGS_LS 0x10
1071#define FLAGS_LL 0x20
1072#define FLAGS_LI 0x40
1073#define FLAGS_LC 0x80
1074 __le16 len;
1075#define LEN_V (1 << 4)
1076#define LEN_CPP_CONT 0x0000
1077#define LEN_CPP_32 0x0001
1078#define LEN_CPP_64 0x0002
1079#define LEN_CPP_128 0x0003
1080 __le16 rid;
1081 __le32 addr_lo;
1082 __le32 addr_hi;
1083 __le32 prod_idx_addr_lo;
1084 __le32 prod_idx_addr_hi;
1085 __le16 pkt_delay;
1086 __le16 irq_delay;
1087 __le32 lbq_addr_lo;
1088 __le32 lbq_addr_hi;
1089 __le16 lbq_buf_size;
1090 __le16 lbq_len; /* entry count */
1091 __le32 sbq_addr_lo;
1092 __le32 sbq_addr_hi;
1093 __le16 sbq_buf_size;
1094 __le16 sbq_len; /* entry count */
1095} __attribute((packed));
1096
1097struct ricb {
1098 u8 base_cq;
1099#define RSS_L4K 0x80
1100 u8 flags;
1101#define RSS_L6K 0x01
1102#define RSS_LI 0x02
1103#define RSS_LB 0x04
1104#define RSS_LM 0x08
1105#define RSS_RI4 0x10
1106#define RSS_RT4 0x20
1107#define RSS_RI6 0x40
1108#define RSS_RT6 0x80
1109 __le16 mask;
1110 __le32 hash_cq_id[256];
1111 __le32 ipv6_hash_key[10];
1112 __le32 ipv4_hash_key[4];
1113} __attribute((packed));
1114
1115/* SOFTWARE/DRIVER DATA STRUCTURES. */
1116
1117struct oal {
1118 struct tx_buf_desc oal[TX_DESC_PER_OAL];
1119};
1120
1121struct map_list {
1122 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1123 DECLARE_PCI_UNMAP_LEN(maplen);
1124};
1125
1126struct tx_ring_desc {
1127 struct sk_buff *skb;
1128 struct ob_mac_iocb_req *queue_entry;
1129 int index;
1130 struct oal oal;
1131 struct map_list map[MAX_SKB_FRAGS + 1];
1132 int map_cnt;
1133 struct tx_ring_desc *next;
1134};
1135
1136struct bq_desc {
1137 union {
1138 struct page *lbq_page;
1139 struct sk_buff *skb;
1140 } p;
1141 struct bq_element *bq;
1142 int index;
1143 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1144 DECLARE_PCI_UNMAP_LEN(maplen);
1145};
1146
1147#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
1148
1149struct tx_ring {
1150 /*
1151 * queue info.
1152 */
1153 struct wqicb wqicb; /* structure used to inform chip of new queue */
1154 void *wq_base; /* pci_alloc:virtual addr for tx */
1155 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
1156 u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
1157 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
1158 u32 wq_size; /* size in bytes of queue area */
1159 u32 wq_len; /* number of entries in queue */
1160 void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
1161 void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
1162 u16 prod_idx; /* current value for prod idx */
1163 u16 cq_id; /* completion (rx) queue for tx completions */
1164 u8 wq_id; /* queue id for this entry */
1165 u8 reserved1[3];
1166 struct tx_ring_desc *q; /* descriptor list for the queue */
1167 spinlock_t lock;
1168 atomic_t tx_count; /* counts down for every outstanding IO */
1169 atomic_t queue_stopped; /* Turns queue off when full. */
1170 struct delayed_work tx_work;
1171 struct ql_adapter *qdev;
1172};
1173
1174/*
1175 * Type of inbound queue.
1176 */
1177enum {
1178 DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
1179 TX_Q = 3, /* Handles outbound completions. */
1180 RX_Q = 4, /* Handles inbound completions. */
1181};
1182
1183struct rx_ring {
1184 struct cqicb cqicb; /* The chip's completion queue init control block. */
1185
1186 /* Completion queue elements. */
1187 void *cq_base;
1188 dma_addr_t cq_base_dma;
1189 u32 cq_size;
1190 u32 cq_len;
1191 u16 cq_id;
1192 u32 *prod_idx_sh_reg; /* Shadowed producer register. */
1193 dma_addr_t prod_idx_sh_reg_dma;
1194 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1195 u32 cnsmr_idx; /* current sw idx */
1196 struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
1197 void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
1198
1199 /* Large buffer queue elements. */
1200 u32 lbq_len; /* entry count */
1201 u32 lbq_size; /* size in bytes of queue */
1202 u32 lbq_buf_size;
1203 void *lbq_base;
1204 dma_addr_t lbq_base_dma;
1205 void *lbq_base_indirect;
1206 dma_addr_t lbq_base_indirect_dma;
1207 struct bq_desc *lbq; /* array of control blocks */
1208 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1209 u32 lbq_prod_idx; /* current sw prod idx */
1210 u32 lbq_curr_idx; /* next entry we expect */
1211 u32 lbq_clean_idx; /* beginning of new descs */
1212 u32 lbq_free_cnt; /* free buffer desc cnt */
1213
1214 /* Small buffer queue elements. */
1215 u32 sbq_len; /* entry count */
1216 u32 sbq_size; /* size in bytes of queue */
1217 u32 sbq_buf_size;
1218 void *sbq_base;
1219 dma_addr_t sbq_base_dma;
1220 void *sbq_base_indirect;
1221 dma_addr_t sbq_base_indirect_dma;
1222 struct bq_desc *sbq; /* array of control blocks */
1223 void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
1224 u32 sbq_prod_idx; /* current sw prod idx */
1225 u32 sbq_curr_idx; /* next entry we expect */
1226 u32 sbq_clean_idx; /* beginning of new descs */
1227 u32 sbq_free_cnt; /* free buffer desc cnt */
1228
1229 /* Misc. handler elements. */
1230 u32 type; /* Type of queue, tx, rx, or default. */
1231 u32 irq; /* Which vector this ring is assigned. */
1232 u32 cpu; /* Which CPU this should run on. */
1233 char name[IFNAMSIZ + 5];
1234 struct napi_struct napi;
1235 struct delayed_work rx_work;
1236 u8 reserved;
1237 struct ql_adapter *qdev;
1238};
1239
1240/*
1241 * RSS Initialization Control Block
1242 */
1243struct hash_id {
1244 u8 value[4];
1245};
1246
1247struct nic_stats {
1248 /*
1249 * These stats come from offset 200h to 278h
1250 * in the XGMAC register.
1251 */
1252 u64 tx_pkts;
1253 u64 tx_bytes;
1254 u64 tx_mcast_pkts;
1255 u64 tx_bcast_pkts;
1256 u64 tx_ucast_pkts;
1257 u64 tx_ctl_pkts;
1258 u64 tx_pause_pkts;
1259 u64 tx_64_pkt;
1260 u64 tx_65_to_127_pkt;
1261 u64 tx_128_to_255_pkt;
1262 u64 tx_256_511_pkt;
1263 u64 tx_512_to_1023_pkt;
1264 u64 tx_1024_to_1518_pkt;
1265 u64 tx_1519_to_max_pkt;
1266 u64 tx_undersize_pkt;
1267 u64 tx_oversize_pkt;
1268
1269 /*
1270 * These stats come from offset 300h to 3C8h
1271 * in the XGMAC register.
1272 */
1273 u64 rx_bytes;
1274 u64 rx_bytes_ok;
1275 u64 rx_pkts;
1276 u64 rx_pkts_ok;
1277 u64 rx_bcast_pkts;
1278 u64 rx_mcast_pkts;
1279 u64 rx_ucast_pkts;
1280 u64 rx_undersize_pkts;
1281 u64 rx_oversize_pkts;
1282 u64 rx_jabber_pkts;
1283 u64 rx_undersize_fcerr_pkts;
1284 u64 rx_drop_events;
1285 u64 rx_fcerr_pkts;
1286 u64 rx_align_err;
1287 u64 rx_symbol_err;
1288 u64 rx_mac_err;
1289 u64 rx_ctl_pkts;
1290 u64 rx_pause_pkts;
1291 u64 rx_64_pkts;
1292 u64 rx_65_to_127_pkts;
1293 u64 rx_128_255_pkts;
1294 u64 rx_256_511_pkts;
1295 u64 rx_512_to_1023_pkts;
1296 u64 rx_1024_to_1518_pkts;
1297 u64 rx_1519_to_max_pkts;
1298 u64 rx_len_err_pkts;
1299};
1300
1301/*
1302 * intr_context structure is used during initialization
1303 * to hook the interrupts. It is also used in a single
1304 * irq environment as a context to the ISR.
1305 */
1306struct intr_context {
1307 struct ql_adapter *qdev;
1308 u32 intr;
1309 u32 hooked;
1310 u32 intr_en_mask; /* value/mask used to enable this intr */
1311 u32 intr_dis_mask; /* value/mask used to disable this intr */
1312 u32 intr_read_mask; /* value/mask used to read this intr */
1313 char name[IFNAMSIZ * 2];
1314 atomic_t irq_cnt; /* irq_cnt is used in single vector
1315 * environment. It's incremented for each
1316 * irq handler that is scheduled. When each
1317 * handler finishes it decrements irq_cnt and
1318 * enables interrupts if it's zero. */
1319 irq_handler_t handler;
1320};
1321
1322/* adapter flags definitions. */
1323enum {
1324 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */
1325 QL_LEGACY_ENABLED = (1 << 3),
1326 QL_MSI_ENABLED = (1 << 3),
1327 QL_MSIX_ENABLED = (1 << 4),
1328 QL_DMA64 = (1 << 5),
1329 QL_PROMISCUOUS = (1 << 6),
1330 QL_ALLMULTI = (1 << 7),
1331};
1332
1333/* link_status bit definitions */
1334enum {
1335 LOOPBACK_MASK = 0x00000700,
1336 LOOPBACK_PCS = 0x00000100,
1337 LOOPBACK_HSS = 0x00000200,
1338 LOOPBACK_EXT = 0x00000300,
1339 PAUSE_MASK = 0x000000c0,
1340 PAUSE_STD = 0x00000040,
1341 PAUSE_PRI = 0x00000080,
1342 SPEED_MASK = 0x00000038,
1343 SPEED_100Mb = 0x00000000,
1344 SPEED_1Gb = 0x00000008,
1345 SPEED_10Gb = 0x00000010,
1346 LINK_TYPE_MASK = 0x00000007,
1347 LINK_TYPE_XFI = 0x00000001,
1348 LINK_TYPE_XAUI = 0x00000002,
1349 LINK_TYPE_XFI_BP = 0x00000003,
1350 LINK_TYPE_XAUI_BP = 0x00000004,
1351 LINK_TYPE_10GBASET = 0x00000005,
1352};
1353
1354/*
1355 * The main Adapter structure definition.
1356 * This structure has all fields relevant to the hardware.
1357 */
1358struct ql_adapter {
1359 struct ricb ricb;
1360 unsigned long flags;
1361 u32 wol;
1362
1363 struct nic_stats nic_stats;
1364
1365 struct vlan_group *vlgrp;
1366
1367 /* PCI Configuration information for this device */
1368 struct pci_dev *pdev;
1369 struct net_device *ndev; /* Parent NET device */
1370
1371 /* Hardware information */
1372 u32 chip_rev_id;
1373 u32 func; /* PCI function for this adapter */
1374
1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379
1380 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base;
1382 void __iomem *doorbell_area;
1383 u32 doorbell_area_size;
1384
1385 u32 msg_enable;
1386
1387 /* Page for Shadow Registers */
1388 void *rx_ring_shadow_reg_area;
1389 dma_addr_t rx_ring_shadow_reg_dma;
1390 void *tx_ring_shadow_reg_area;
1391 dma_addr_t tx_ring_shadow_reg_dma;
1392
1393 u32 mailbox_in;
1394 u32 mailbox_out;
1395
1396 int tx_ring_size;
1397 int rx_ring_size;
1398 u32 intr_count;
1399 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS];
1401
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */
1407 /*
1408 * rx_ring_count =
1409 * one default queue +
1410 * (CPU count * outbound completion rx_ring) +
1411 * (CPU count * inbound (RSS) completion rx_ring)
1412 */
1413 int rx_ring_count;
1414 int ring_mem_size;
1415 void *ring_mem;
1416 struct rx_ring *rx_ring;
1417 int rx_csum;
1418 struct tx_ring *tx_ring;
1419 u32 default_rx_queue;
1420
1421 u16 rx_coalesce_usecs; /* cqicb->int_delay */
1422 u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1423 u16 tx_coalesce_usecs; /* cqicb->int_delay */
1424 u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1425
1426 u32 xg_sem_mask;
1427 u32 port_link_up;
1428 u32 port_init;
1429 u32 link_status;
1430
1431 struct flash_params flash;
1432
1433 struct net_device_stats stats;
1434 struct workqueue_struct *q_workqueue;
1435 struct workqueue_struct *workqueue;
1436 struct delayed_work asic_reset_work;
1437 struct delayed_work mpi_reset_work;
1438 struct delayed_work mpi_work;
1439};
1440
1441/*
1442 * Typical Register accessor for memory mapped device.
1443 */
1444static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
1445{
1446 return readl(qdev->reg_base + reg);
1447}
1448
1449/*
1450 * Typical Register accessor for memory mapped device.
1451 */
1452static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
1453{
1454 writel(val, qdev->reg_base + reg);
1455}
1456
1457/*
1458 * Doorbell Registers:
1459 * Doorbell registers are virtual registers in the PCI memory space.
1460 * The space is allocated by the chip during PCI initialization. The
1461 * device driver finds the doorbell address in BAR 3 in PCI config space.
1462 * The registers are used to control outbound and inbound queues. For
1463 * example, the producer index for an outbound queue. Each queue uses
1464 * 1 4k chunk of memory. The lower half of the space is for outbound
1465 * queues. The upper half is for inbound queues.
1466 */
1467static inline void ql_write_db_reg(u32 val, void __iomem *addr)
1468{
1469 writel(val, addr);
1470 mmiowb();
1471}
1472
1473/*
1474 * Shadow Registers:
1475 * Outbound queues have a consumer index that is maintained by the chip.
1476 * Inbound queues have a producer index that is maintained by the chip.
1477 * For lower overhead, these registers are "shadowed" to host memory
1478 * which allows the device driver to track the queue progress without
1479 * PCI reads. When an entry is placed on an inbound queue, the chip will
1480 * update the relevant index register and then copy the value to the
1481 * shadow register in host memory.
1482 */
1483static inline unsigned int ql_read_sh_reg(const volatile void *addr)
1484{
1485 return *(volatile unsigned int __force *)addr;
1486}
1487
1488extern char qlge_driver_name[];
1489extern const char qlge_driver_version[];
1490extern const struct ethtool_ops qlge_ethtool_ops;
1491
1492extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
1493extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
1494extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1495extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
1496 u32 *value);
1497extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
1498extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1499 u16 q_id);
1500void ql_queue_fw_error(struct ql_adapter *qdev);
1501void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508
1509#if 1
1510#define QL_ALL_DUMP
1511#define QL_REG_DUMP
1512#define QL_DEV_DUMP
1513#define QL_CB_DUMP
1514/* #define QL_IB_DUMP */
1515/* #define QL_OB_DUMP */
1516#endif
1517
1518#ifdef QL_REG_DUMP
1519extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
1520extern void ql_dump_routing_entries(struct ql_adapter *qdev);
1521extern void ql_dump_regs(struct ql_adapter *qdev);
1522#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
1523#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
1524#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
1525#else
1526#define QL_DUMP_REGS(qdev)
1527#define QL_DUMP_ROUTE(qdev)
1528#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
1529#endif
1530
1531#ifdef QL_STAT_DUMP
1532extern void ql_dump_stat(struct ql_adapter *qdev);
1533#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
1534#else
1535#define QL_DUMP_STAT(qdev)
1536#endif
1537
1538#ifdef QL_DEV_DUMP
1539extern void ql_dump_qdev(struct ql_adapter *qdev);
1540#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
1541#else
1542#define QL_DUMP_QDEV(qdev)
1543#endif
1544
1545#ifdef QL_CB_DUMP
1546extern void ql_dump_wqicb(struct wqicb *wqicb);
1547extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
1548extern void ql_dump_ricb(struct ricb *ricb);
1549extern void ql_dump_cqicb(struct cqicb *cqicb);
1550extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
1551extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
1552#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
1553#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
1554#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
1555#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
1556#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
1557#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
1558 ql_dump_hw_cb(qdev, size, bit, q_id)
1559#else
1560#define QL_DUMP_RICB(ricb)
1561#define QL_DUMP_WQICB(wqicb)
1562#define QL_DUMP_TX_RING(tx_ring)
1563#define QL_DUMP_CQICB(cqicb)
1564#define QL_DUMP_RX_RING(rx_ring)
1565#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
1566#endif
1567
1568#ifdef QL_OB_DUMP
1569extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
1570extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
1571extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
1572#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
1573#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
1574#else
1575#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
1576#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
1577#endif
1578
1579#ifdef QL_IB_DUMP
1580extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
1581#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
1582#else
1583#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
1584#endif
1585
1586#ifdef QL_ALL_DUMP
1587extern void ql_dump_all(struct ql_adapter *qdev);
1588#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
1589#else
1590#define QL_DUMP_ALL(qdev)
1591#endif
1592
1593#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 000000000000..47df304a02c8
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,858 @@
1#include "qlge.h"
2
3#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev)
5{
6 int i;
7 u32 value;
8 for (i = 0; i < qdev->intr_count; i++) {
9 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
10 value = ql_read32(qdev, INTR_EN);
11 printk(KERN_ERR PFX
12 "%s: Interrupt %d is %s.\n",
13 qdev->ndev->name, i,
14 (value & INTR_EN_EN ? "enabled" : "disabled"));
15 }
16}
17
18void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
19{
20 u32 data;
21 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
22 printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
23 return;
24 }
25 ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
26 printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
27 data);
28 ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
29 printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
30 data);
31 ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
32 printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
33 data);
34 ql_read_xgmac_reg(qdev, TX_CFG, &data);
35 printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
36 ql_read_xgmac_reg(qdev, RX_CFG, &data);
37 printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
38 ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
39 printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
40 data);
41 ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
42 printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
43 data);
44 ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
45 printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
46 data);
47 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
48 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
49 qdev->ndev->name, data);
50 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
51 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
52 qdev->ndev->name, data);
53 ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
54 printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
55 data);
56 ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
57 printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
58 data);
59 ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
60 printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
61 data);
62 ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
63 printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
64 qdev->ndev->name, data);
65 ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
66 printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
67 data);
68 ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
69 printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
70 qdev->ndev->name, data);
71 ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
72 printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
73 data);
74 ql_sem_unlock(qdev, qdev->xg_sem_mask);
75
76}
77
78static void ql_dump_ets_regs(struct ql_adapter *qdev)
79{
80}
81
82static void ql_dump_cam_entries(struct ql_adapter *qdev)
83{
84 int i;
85 u32 value[3];
86 for (i = 0; i < 4; i++) {
87 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
88 printk(KERN_ERR PFX
89 "%s: Failed read of mac index register.\n",
90 __func__);
91 return;
92 } else {
93 if (value[0])
94 printk(KERN_ERR PFX
95 "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
96 qdev->ndev->name, i, value[1], value[0],
97 value[2]);
98 }
99 }
100 for (i = 0; i < 32; i++) {
101 if (ql_get_mac_addr_reg
102 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
103 printk(KERN_ERR PFX
104 "%s: Failed read of mac index register.\n",
105 __func__);
106 return;
107 } else {
108 if (value[0])
109 printk(KERN_ERR PFX
110 "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
111 qdev->ndev->name, i, value[1], value[0]);
112 }
113 }
114}
115
116void ql_dump_routing_entries(struct ql_adapter *qdev)
117{
118 int i;
119 u32 value;
120 for (i = 0; i < 16; i++) {
121 value = 0;
122 if (ql_get_routing_reg(qdev, i, &value)) {
123 printk(KERN_ERR PFX
124 "%s: Failed read of routing index register.\n",
125 __func__);
126 return;
127 } else {
128 if (value)
129 printk(KERN_ERR PFX
130 "%s: Routing Mask %d = 0x%.08x.\n",
131 qdev->ndev->name, i, value);
132 }
133 }
134}
135
136void ql_dump_regs(struct ql_adapter *qdev)
137{
138 printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
139 printk(KERN_ERR PFX "SYS = 0x%x.\n",
140 ql_read32(qdev, SYS));
141 printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
142 ql_read32(qdev, RST_FO));
143 printk(KERN_ERR PFX "FSC = 0x%x.\n",
144 ql_read32(qdev, FSC));
145 printk(KERN_ERR PFX "CSR = 0x%x.\n",
146 ql_read32(qdev, CSR));
147 printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
148 ql_read32(qdev, ICB_RID));
149 printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
150 ql_read32(qdev, ICB_L));
151 printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
152 ql_read32(qdev, ICB_H));
153 printk(KERN_ERR PFX "CFG = 0x%x.\n",
154 ql_read32(qdev, CFG));
155 printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
156 ql_read32(qdev, BIOS_ADDR));
157 printk(KERN_ERR PFX "STS = 0x%x.\n",
158 ql_read32(qdev, STS));
159 printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
160 ql_read32(qdev, INTR_EN));
161 printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
162 ql_read32(qdev, INTR_MASK));
163 printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
164 ql_read32(qdev, ISR1));
165 printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
166 ql_read32(qdev, ISR2));
167 printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
168 ql_read32(qdev, ISR3));
169 printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
170 ql_read32(qdev, ISR4));
171 printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
172 ql_read32(qdev, REV_ID));
173 printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
174 ql_read32(qdev, FRC_ECC_ERR));
175 printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
176 ql_read32(qdev, ERR_STS));
177 printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
178 ql_read32(qdev, RAM_DBG_ADDR));
179 printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
180 ql_read32(qdev, RAM_DBG_DATA));
181 printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
182 ql_read32(qdev, ECC_ERR_CNT));
183 printk(KERN_ERR PFX "SEM = 0x%x.\n",
184 ql_read32(qdev, SEM));
185 printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
186 ql_read32(qdev, GPIO_1));
187 printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
188 ql_read32(qdev, GPIO_2));
189 printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
190 ql_read32(qdev, GPIO_3));
191 printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
192 ql_read32(qdev, XGMAC_ADDR));
193 printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
194 ql_read32(qdev, XGMAC_DATA));
195 printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
196 ql_read32(qdev, NIC_ETS));
197 printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
198 ql_read32(qdev, CNA_ETS));
199 printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
200 ql_read32(qdev, FLASH_ADDR));
201 printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
202 ql_read32(qdev, FLASH_DATA));
203 printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
204 ql_read32(qdev, CQ_STOP));
205 printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
206 ql_read32(qdev, PAGE_TBL_RID));
207 printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
208 ql_read32(qdev, WQ_PAGE_TBL_LO));
209 printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
210 ql_read32(qdev, WQ_PAGE_TBL_HI));
211 printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
212 ql_read32(qdev, CQ_PAGE_TBL_LO));
213 printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
214 ql_read32(qdev, CQ_PAGE_TBL_HI));
215 printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
216 ql_read32(qdev, COS_DFLT_CQ1));
217 printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
218 ql_read32(qdev, COS_DFLT_CQ2));
219 printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
220 ql_read32(qdev, SPLT_HDR));
221 printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
222 ql_read32(qdev, FC_PAUSE_THRES));
223 printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
224 ql_read32(qdev, NIC_PAUSE_THRES));
225 printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
226 ql_read32(qdev, FC_ETHERTYPE));
227 printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
228 ql_read32(qdev, FC_RCV_CFG));
229 printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
230 ql_read32(qdev, NIC_RCV_CFG));
231 printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
232 ql_read32(qdev, FC_COS_TAGS));
233 printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
234 ql_read32(qdev, NIC_COS_TAGS));
235 printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
236 ql_read32(qdev, MGMT_RCV_CFG));
237 printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
238 ql_read32(qdev, XG_SERDES_ADDR));
239 printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
240 ql_read32(qdev, XG_SERDES_DATA));
241 printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
242 ql_read32(qdev, PRB_MX_ADDR));
243 printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
244 ql_read32(qdev, PRB_MX_DATA));
245 ql_dump_intr_states(qdev);
246 ql_dump_xgmac_control_regs(qdev);
247 ql_dump_ets_regs(qdev);
248 ql_dump_cam_entries(qdev);
249 ql_dump_routing_entries(qdev);
250}
251#endif
252
253#ifdef QL_STAT_DUMP
254void ql_dump_stat(struct ql_adapter *qdev)
255{
256 printk(KERN_ERR "%s: Enter.\n", __func__);
257 printk(KERN_ERR "tx_pkts = %ld\n",
258 (unsigned long)qdev->nic_stats.tx_pkts);
259 printk(KERN_ERR "tx_bytes = %ld\n",
260 (unsigned long)qdev->nic_stats.tx_bytes);
261 printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
262 (unsigned long)qdev->nic_stats.tx_mcast_pkts);
263 printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
264 (unsigned long)qdev->nic_stats.tx_bcast_pkts);
265 printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
266 (unsigned long)qdev->nic_stats.tx_ucast_pkts);
267 printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
268 (unsigned long)qdev->nic_stats.tx_ctl_pkts);
269 printk(KERN_ERR "tx_pause_pkts = %ld.\n",
270 (unsigned long)qdev->nic_stats.tx_pause_pkts);
271 printk(KERN_ERR "tx_64_pkt = %ld.\n",
272 (unsigned long)qdev->nic_stats.tx_64_pkt);
273 printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
274 (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
275 printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
276 (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
277 printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
278 (unsigned long)qdev->nic_stats.tx_256_511_pkt);
279 printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
280 (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
281 printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
282 (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
283 printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
284 (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
285 printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
286 (unsigned long)qdev->nic_stats.tx_undersize_pkt);
287 printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
288 (unsigned long)qdev->nic_stats.tx_oversize_pkt);
289 printk(KERN_ERR "rx_bytes = %ld.\n",
290 (unsigned long)qdev->nic_stats.rx_bytes);
291 printk(KERN_ERR "rx_bytes_ok = %ld.\n",
292 (unsigned long)qdev->nic_stats.rx_bytes_ok);
293 printk(KERN_ERR "rx_pkts = %ld.\n",
294 (unsigned long)qdev->nic_stats.rx_pkts);
295 printk(KERN_ERR "rx_pkts_ok = %ld.\n",
296 (unsigned long)qdev->nic_stats.rx_pkts_ok);
297 printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
298 (unsigned long)qdev->nic_stats.rx_bcast_pkts);
299 printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
300 (unsigned long)qdev->nic_stats.rx_mcast_pkts);
301 printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
302 (unsigned long)qdev->nic_stats.rx_ucast_pkts);
303 printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
304 (unsigned long)qdev->nic_stats.rx_undersize_pkts);
305 printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
306 (unsigned long)qdev->nic_stats.rx_oversize_pkts);
307 printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
308 (unsigned long)qdev->nic_stats.rx_jabber_pkts);
309 printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
310 (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
311 printk(KERN_ERR "rx_drop_events = %ld.\n",
312 (unsigned long)qdev->nic_stats.rx_drop_events);
313 printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
314 (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
315 printk(KERN_ERR "rx_align_err = %ld.\n",
316 (unsigned long)qdev->nic_stats.rx_align_err);
317 printk(KERN_ERR "rx_symbol_err = %ld.\n",
318 (unsigned long)qdev->nic_stats.rx_symbol_err);
319 printk(KERN_ERR "rx_mac_err = %ld.\n",
320 (unsigned long)qdev->nic_stats.rx_mac_err);
321 printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
322 (unsigned long)qdev->nic_stats.rx_ctl_pkts);
323 printk(KERN_ERR "rx_pause_pkts = %ld.\n",
324 (unsigned long)qdev->nic_stats.rx_pause_pkts);
325 printk(KERN_ERR "rx_64_pkts = %ld.\n",
326 (unsigned long)qdev->nic_stats.rx_64_pkts);
327 printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
328 (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
329 printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
330 (unsigned long)qdev->nic_stats.rx_128_255_pkts);
331 printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
332 (unsigned long)qdev->nic_stats.rx_256_511_pkts);
333 printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
334 (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
335 printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
336 (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
337 printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
338 (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
339 printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
340 (unsigned long)qdev->nic_stats.rx_len_err_pkts);
341};
342#endif
343
344#ifdef QL_DEV_DUMP
345void ql_dump_qdev(struct ql_adapter *qdev)
346{
347 int i;
348 printk(KERN_ERR PFX "qdev->flags = %lx.\n",
349 qdev->flags);
350 printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
351 qdev->vlgrp);
352 printk(KERN_ERR PFX "qdev->pdev = %p.\n",
353 qdev->pdev);
354 printk(KERN_ERR PFX "qdev->ndev = %p.\n",
355 qdev->ndev);
356 printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
357 qdev->chip_rev_id);
358 printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
359 qdev->reg_base);
360 printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
361 qdev->doorbell_area);
362 printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
363 qdev->doorbell_area_size);
364 printk(KERN_ERR PFX "msg_enable = %x.\n",
365 qdev->msg_enable);
366 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
367 qdev->rx_ring_shadow_reg_area);
368 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
369 (unsigned long long) qdev->rx_ring_shadow_reg_dma);
370 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
371 qdev->tx_ring_shadow_reg_area);
372 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
373 (unsigned long long) qdev->tx_ring_shadow_reg_dma);
374 printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
375 qdev->intr_count);
376 if (qdev->msi_x_entry)
377 for (i = 0; i < qdev->intr_count; i++) {
378 printk(KERN_ERR PFX
379 "msi_x_entry.[%d]vector = %d.\n", i,
380 qdev->msi_x_entry[i].vector);
381 printk(KERN_ERR PFX
382 "msi_x_entry.[%d]entry = %d.\n", i,
383 qdev->msi_x_entry[i].entry);
384 }
385 for (i = 0; i < qdev->intr_count; i++) {
386 printk(KERN_ERR PFX
387 "intr_context[%d].qdev = %p.\n", i,
388 qdev->intr_context[i].qdev);
389 printk(KERN_ERR PFX
390 "intr_context[%d].intr = %d.\n", i,
391 qdev->intr_context[i].intr);
392 printk(KERN_ERR PFX
393 "intr_context[%d].hooked = %d.\n", i,
394 qdev->intr_context[i].hooked);
395 printk(KERN_ERR PFX
396 "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
397 qdev->intr_context[i].intr_en_mask);
398 printk(KERN_ERR PFX
399 "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
400 qdev->intr_context[i].intr_dis_mask);
401 printk(KERN_ERR PFX
402 "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
403 qdev->intr_context[i].intr_read_mask);
404 }
405 printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
406 printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
407 printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
408 printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
409 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
410 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
411 qdev->tx_ring);
412 printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n",
413 qdev->rss_ring_first_cq_id);
414 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
415 qdev->rss_ring_count);
416 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
417 printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
418 qdev->default_rx_queue);
419 printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
420 qdev->xg_sem_mask);
421 printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
422 qdev->port_link_up);
423 printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
424 qdev->port_init);
425
426}
427#endif
428
429#ifdef QL_CB_DUMP
430void ql_dump_wqicb(struct wqicb *wqicb)
431{
432 printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
433 printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
434 printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
435 printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
436 le16_to_cpu(wqicb->cq_id_rss));
437 printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
438 printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
439 le32_to_cpu(wqicb->addr_lo));
440 printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
441 le32_to_cpu(wqicb->addr_hi));
442 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
443 le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
444 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
445 le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
446}
447
448void ql_dump_tx_ring(struct tx_ring *tx_ring)
449{
450 if (tx_ring == NULL)
451 return;
452 printk(KERN_ERR PFX
453 "===================== Dumping tx_ring %d ===============.\n",
454 tx_ring->wq_id);
455 printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
456 printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
457 (unsigned long long) tx_ring->wq_base_dma);
458 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
459 tx_ring->cnsmr_idx_sh_reg);
460 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
461 (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma);
462 printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
463 printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
464 printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
465 tx_ring->prod_idx_db_reg);
466 printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
467 tx_ring->valid_db_reg);
468 printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
469 printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
470 printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
471 printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
472 printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
473 atomic_read(&tx_ring->tx_count));
474}
475
476void ql_dump_ricb(struct ricb *ricb)
477{
478 int i;
479 printk(KERN_ERR PFX
480 "===================== Dumping ricb ===============.\n");
481 printk(KERN_ERR PFX "Dumping ricb stuff...\n");
482
483 printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
484 printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
485 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
486 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
487 ricb->flags & RSS_LI ? "RSS_LI " : "",
488 ricb->flags & RSS_LB ? "RSS_LB " : "",
489 ricb->flags & RSS_LM ? "RSS_LM " : "",
490 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
491 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
492 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
493 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
494 printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
495 for (i = 0; i < 16; i++)
496 printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
497 le32_to_cpu(ricb->hash_cq_id[i]));
498 for (i = 0; i < 10; i++)
499 printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
500 le32_to_cpu(ricb->ipv6_hash_key[i]));
501 for (i = 0; i < 4; i++)
502 printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
503 le32_to_cpu(ricb->ipv4_hash_key[i]));
504}
505
506void ql_dump_cqicb(struct cqicb *cqicb)
507{
508 printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
509
510 printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
511 printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
512 printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
513 printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
514 le32_to_cpu(cqicb->addr_lo));
515 printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
516 le32_to_cpu(cqicb->addr_hi));
517 printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
518 le32_to_cpu(cqicb->prod_idx_addr_lo));
519 printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
520 le32_to_cpu(cqicb->prod_idx_addr_hi));
521 printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
522 le16_to_cpu(cqicb->pkt_delay));
523 printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
524 le16_to_cpu(cqicb->irq_delay));
525 printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
526 le32_to_cpu(cqicb->lbq_addr_lo));
527 printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
528 le32_to_cpu(cqicb->lbq_addr_hi));
529 printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
530 le16_to_cpu(cqicb->lbq_buf_size));
531 printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
532 le16_to_cpu(cqicb->lbq_len));
533 printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
534 le32_to_cpu(cqicb->sbq_addr_lo));
535 printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
536 le32_to_cpu(cqicb->sbq_addr_hi));
537 printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
538 le16_to_cpu(cqicb->sbq_buf_size));
539 printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
540 le16_to_cpu(cqicb->sbq_len));
541}
542
543void ql_dump_rx_ring(struct rx_ring *rx_ring)
544{
545 if (rx_ring == NULL)
546 return;
547 printk(KERN_ERR PFX
548 "===================== Dumping rx_ring %d ===============.\n",
549 rx_ring->cq_id);
550 printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
551 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
552 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
553 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
554 printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
555 printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
556 printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
557 (unsigned long long) rx_ring->cq_base_dma);
558 printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
559 printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
560 printk(KERN_ERR PFX
561 "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
562 rx_ring->prod_idx_sh_reg,
563 rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
564 printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
565 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
566 printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
567 rx_ring->cnsmr_idx_db_reg);
568 printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
569 printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
570 printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
571 rx_ring->valid_db_reg);
572
573 printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
574 printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
575 (unsigned long long) rx_ring->lbq_base_dma);
576 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
577 rx_ring->lbq_base_indirect);
578 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
579 (unsigned long long) rx_ring->lbq_base_indirect_dma);
580 printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
581 printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
582 printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
583 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
584 rx_ring->lbq_prod_idx_db_reg);
585 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
586 rx_ring->lbq_prod_idx);
587 printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
588 rx_ring->lbq_curr_idx);
589 printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
590 rx_ring->lbq_clean_idx);
591 printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
592 rx_ring->lbq_free_cnt);
593 printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
594 rx_ring->lbq_buf_size);
595
596 printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
597 printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
598 (unsigned long long) rx_ring->sbq_base_dma);
599 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
600 rx_ring->sbq_base_indirect);
601 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
602 (unsigned long long) rx_ring->sbq_base_indirect_dma);
603 printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
604 printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
605 printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
606 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
607 rx_ring->sbq_prod_idx_db_reg);
608 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
609 rx_ring->sbq_prod_idx);
610 printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
611 rx_ring->sbq_curr_idx);
612 printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
613 rx_ring->sbq_clean_idx);
614 printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
615 rx_ring->sbq_free_cnt);
616 printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
617 rx_ring->sbq_buf_size);
618 printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
619 printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
620 printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
621 printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
622}
623
624void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
625{
626 void *ptr;
627
628 printk(KERN_ERR PFX "%s: Enter.\n", __func__);
629
630 ptr = kmalloc(size, GFP_ATOMIC);
631 if (ptr == NULL) {
632 printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
633 __func__);
634 return;
635 }
636
637 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
638 printk(KERN_ERR "%s: Failed to upload control block!\n",
639 __func__);
640 goto fail_it;
641 }
642 switch (bit) {
643 case CFG_DRQ:
644 ql_dump_wqicb((struct wqicb *)ptr);
645 break;
646 case CFG_DCQ:
647 ql_dump_cqicb((struct cqicb *)ptr);
648 break;
649 case CFG_DR:
650 ql_dump_ricb((struct ricb *)ptr);
651 break;
652 default:
653 printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
654 __func__, bit);
655 break;
656 }
657fail_it:
658 kfree(ptr);
659}
660#endif
661
662#ifdef QL_OB_DUMP
663void ql_dump_tx_desc(struct tx_buf_desc *tbd)
664{
665 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
666 le64_to_cpu((u64) tbd->addr));
667 printk(KERN_ERR PFX "tbd->len = %d\n",
668 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
669 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
670 tbd->len & TX_DESC_C ? "C" : ".",
671 tbd->len & TX_DESC_E ? "E" : ".");
672 tbd++;
673 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
674 le64_to_cpu((u64) tbd->addr));
675 printk(KERN_ERR PFX "tbd->len = %d\n",
676 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
677 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
678 tbd->len & TX_DESC_C ? "C" : ".",
679 tbd->len & TX_DESC_E ? "E" : ".");
680 tbd++;
681 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
682 le64_to_cpu((u64) tbd->addr));
683 printk(KERN_ERR PFX "tbd->len = %d\n",
684 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
685 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
686 tbd->len & TX_DESC_C ? "C" : ".",
687 tbd->len & TX_DESC_E ? "E" : ".");
688
689}
690
691void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
692{
693 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
694 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
695 struct tx_buf_desc *tbd;
696 u16 frame_len;
697
698 printk(KERN_ERR PFX "%s\n", __func__);
699 printk(KERN_ERR PFX "opcode = %s\n",
700 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
701 printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
702 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
703 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
704 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
705 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
706 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
707 printk(KERN_ERR PFX "flags2 = %s %s %s\n",
708 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
709 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
710 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
711 printk(KERN_ERR PFX "flags3 = %s %s %s \n",
712 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
713 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
714 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
715 printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
716 printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
717 printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
718 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
719 printk(KERN_ERR PFX "frame_len = %d\n",
720 le32_to_cpu(ob_mac_tso_iocb->frame_len));
721 printk(KERN_ERR PFX "mss = %d\n",
722 le16_to_cpu(ob_mac_tso_iocb->mss));
723 printk(KERN_ERR PFX "prot_hdr_len = %d\n",
724 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
725 printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
726 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
727 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
728 } else {
729 printk(KERN_ERR PFX "frame_len = %d\n",
730 le16_to_cpu(ob_mac_iocb->frame_len));
731 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
732 }
733 tbd = &ob_mac_iocb->tbd[0];
734 ql_dump_tx_desc(tbd);
735}
736
737void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
738{
739 printk(KERN_ERR PFX "%s\n", __func__);
740 printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
741 printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
742 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
743 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
744 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
745 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
746 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
747 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
748 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
749 printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
750}
751#endif
752
753#ifdef QL_IB_DUMP
754void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
755{
756 printk(KERN_ERR PFX "%s\n", __func__);
757 printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
758 printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
759 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
760 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
761 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
762 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
763 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
764 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
765
766 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
767 printk(KERN_ERR PFX "%s%s%s Multicast.\n",
768 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
769 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
770 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
771 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
772 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
773 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
774
775 printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
776 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
777 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
778 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
779 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
780 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
781
782 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
783 printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
784 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
785 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
786 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
787 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
788 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
789 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
790 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
791 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
792 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
793 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
794
795 printk(KERN_ERR PFX "flags3 = %s%s.\n",
796 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
797 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
798
799 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
800 printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
801 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
802 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
803 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
804 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
805 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
806 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
807 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
808 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
809
810 printk(KERN_ERR PFX "data_len = %d\n",
811 le32_to_cpu(ib_mac_rsp->data_len));
812 printk(KERN_ERR PFX "data_addr_hi = 0x%x\n",
813 le32_to_cpu(ib_mac_rsp->data_addr_hi));
814 printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
815 le32_to_cpu(ib_mac_rsp->data_addr_lo));
816 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
817 printk(KERN_ERR PFX "rss = %x\n",
818 le32_to_cpu(ib_mac_rsp->rss));
819 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
820 printk(KERN_ERR PFX "vlan_id = %x\n",
821 le16_to_cpu(ib_mac_rsp->vlan_id));
822
823 printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
824 le32_to_cpu(ib_mac_rsp->
825 flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
826 le32_to_cpu(ib_mac_rsp->
827 flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
828 le32_to_cpu(ib_mac_rsp->
829 flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
830
831 if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
832 printk(KERN_ERR PFX "hdr length = %d.\n",
833 le32_to_cpu(ib_mac_rsp->hdr_len));
834 printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
835 le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
836 printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
837 le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
838 }
839}
840#endif
841
842#ifdef QL_ALL_DUMP
843void ql_dump_all(struct ql_adapter *qdev)
844{
845 int i;
846
847 QL_DUMP_REGS(qdev);
848 QL_DUMP_QDEV(qdev);
849 for (i = 0; i < qdev->tx_ring_count; i++) {
850 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
851 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
852 }
853 for (i = 0; i < qdev->rx_ring_count; i++) {
854 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
855 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
856 }
857}
858#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 000000000000..6457f8c4fdaa
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,415 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/types.h>
4#include <linux/module.h>
5#include <linux/list.h>
6#include <linux/pci.h>
7#include <linux/dma-mapping.h>
8#include <linux/pagemap.h>
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/dmapool.h>
12#include <linux/mempool.h>
13#include <linux/spinlock.h>
14#include <linux/kthread.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/in.h>
19#include <linux/ip.h>
20#include <linux/ipv6.h>
21#include <net/ipv6.h>
22#include <linux/tcp.h>
23#include <linux/udp.h>
24#include <linux/if_arp.h>
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/skbuff.h>
30#include <linux/rtnetlink.h>
31#include <linux/if_vlan.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/mm.h>
35#include <linux/vmalloc.h>
36
37#include <linux/version.h>
38
39#include "qlge.h"
40
41static int ql_update_ring_coalescing(struct ql_adapter *qdev)
42{
43 int i, status = 0;
44 struct rx_ring *rx_ring;
45 struct cqicb *cqicb;
46
47 if (!netif_running(qdev->ndev))
48 return status;
49
50 spin_lock(&qdev->hw_lock);
51 /* Skip the default queue, and update the outbound handler
52 * queues if they changed.
53 */
54 cqicb = (struct cqicb *)&qdev->rx_ring[1];
55 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
56 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
57 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
58 rx_ring = &qdev->rx_ring[i];
59 cqicb = (struct cqicb *)rx_ring;
60 cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
61 cqicb->pkt_delay =
62 le16_to_cpu(qdev->tx_max_coalesced_frames);
63 cqicb->flags = FLAGS_LI;
64 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
65 CFG_LCQ, rx_ring->cq_id);
66 if (status) {
67 QPRINTK(qdev, IFUP, ERR,
68 "Failed to load CQICB.\n");
69 goto exit;
70 }
71 }
72 }
73
74 /* Update the inbound (RSS) handler queues if they changed. */
75 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
76 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
77 le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
78 for (i = qdev->rss_ring_first_cq_id;
79 i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
80 i++) {
81 rx_ring = &qdev->rx_ring[i];
82 cqicb = (struct cqicb *)rx_ring;
83 cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
84 cqicb->pkt_delay =
85 le16_to_cpu(qdev->rx_max_coalesced_frames);
86 cqicb->flags = FLAGS_LI;
87 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
88 CFG_LCQ, rx_ring->cq_id);
89 if (status) {
90 QPRINTK(qdev, IFUP, ERR,
91 "Failed to load CQICB.\n");
92 goto exit;
93 }
94 }
95 }
96exit:
97 spin_unlock(&qdev->hw_lock);
98 return status;
99}
100
101void ql_update_stats(struct ql_adapter *qdev)
102{
103 u32 i;
104 u64 data;
105 u64 *iter = &qdev->nic_stats.tx_pkts;
106
107 spin_lock(&qdev->stats_lock);
108 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 QPRINTK(qdev, DRV, ERR,
110 "Couldn't get xgmac sem.\n");
111 goto quit;
112 }
113 /*
114 * Get TX statistics.
115 */
116 for (i = 0x200; i < 0x280; i += 8) {
117 if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 QPRINTK(qdev, DRV, ERR,
119 "Error reading status register 0x%.04x.\n", i);
120 goto end;
121 } else
122 *iter = data;
123 iter++;
124 }
125
126 /*
127 * Get RX statistics.
128 */
129 for (i = 0x300; i < 0x3d0; i += 8) {
130 if (ql_read_xgmac_reg64(qdev, i, &data)) {
131 QPRINTK(qdev, DRV, ERR,
132 "Error reading status register 0x%.04x.\n", i);
133 goto end;
134 } else
135 *iter = data;
136 iter++;
137 }
138
139end:
140 ql_sem_unlock(qdev, qdev->xg_sem_mask);
141quit:
142 spin_unlock(&qdev->stats_lock);
143
144 QL_DUMP_STAT(qdev);
145
146 return;
147}
148
149static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
150 {"tx_pkts"},
151 {"tx_bytes"},
152 {"tx_mcast_pkts"},
153 {"tx_bcast_pkts"},
154 {"tx_ucast_pkts"},
155 {"tx_ctl_pkts"},
156 {"tx_pause_pkts"},
157 {"tx_64_pkts"},
158 {"tx_65_to_127_pkts"},
159 {"tx_128_to_255_pkts"},
160 {"tx_256_511_pkts"},
161 {"tx_512_to_1023_pkts"},
162 {"tx_1024_to_1518_pkts"},
163 {"tx_1519_to_max_pkts"},
164 {"tx_undersize_pkts"},
165 {"tx_oversize_pkts"},
166 {"rx_bytes"},
167 {"rx_bytes_ok"},
168 {"rx_pkts"},
169 {"rx_pkts_ok"},
170 {"rx_bcast_pkts"},
171 {"rx_mcast_pkts"},
172 {"rx_ucast_pkts"},
173 {"rx_undersize_pkts"},
174 {"rx_oversize_pkts"},
175 {"rx_jabber_pkts"},
176 {"rx_undersize_fcerr_pkts"},
177 {"rx_drop_events"},
178 {"rx_fcerr_pkts"},
179 {"rx_align_err"},
180 {"rx_symbol_err"},
181 {"rx_mac_err"},
182 {"rx_ctl_pkts"},
183 {"rx_pause_pkts"},
184 {"rx_64_pkts"},
185 {"rx_65_to_127_pkts"},
186 {"rx_128_255_pkts"},
187 {"rx_256_511_pkts"},
188 {"rx_512_to_1023_pkts"},
189 {"rx_1024_to_1518_pkts"},
190 {"rx_1519_to_max_pkts"},
191 {"rx_len_err_pkts"},
192};
193
194static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
195{
196 switch (stringset) {
197 case ETH_SS_STATS:
198 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
199 break;
200 }
201}
202
203static int ql_get_sset_count(struct net_device *dev, int sset)
204{
205 switch (sset) {
206 case ETH_SS_STATS:
207 return ARRAY_SIZE(ql_stats_str_arr);
208 default:
209 return -EOPNOTSUPP;
210 }
211}
212
213static void
214ql_get_ethtool_stats(struct net_device *ndev,
215 struct ethtool_stats *stats, u64 *data)
216{
217 struct ql_adapter *qdev = netdev_priv(ndev);
218 struct nic_stats *s = &qdev->nic_stats;
219
220 ql_update_stats(qdev);
221
222 *data++ = s->tx_pkts;
223 *data++ = s->tx_bytes;
224 *data++ = s->tx_mcast_pkts;
225 *data++ = s->tx_bcast_pkts;
226 *data++ = s->tx_ucast_pkts;
227 *data++ = s->tx_ctl_pkts;
228 *data++ = s->tx_pause_pkts;
229 *data++ = s->tx_64_pkt;
230 *data++ = s->tx_65_to_127_pkt;
231 *data++ = s->tx_128_to_255_pkt;
232 *data++ = s->tx_256_511_pkt;
233 *data++ = s->tx_512_to_1023_pkt;
234 *data++ = s->tx_1024_to_1518_pkt;
235 *data++ = s->tx_1519_to_max_pkt;
236 *data++ = s->tx_undersize_pkt;
237 *data++ = s->tx_oversize_pkt;
238 *data++ = s->rx_bytes;
239 *data++ = s->rx_bytes_ok;
240 *data++ = s->rx_pkts;
241 *data++ = s->rx_pkts_ok;
242 *data++ = s->rx_bcast_pkts;
243 *data++ = s->rx_mcast_pkts;
244 *data++ = s->rx_ucast_pkts;
245 *data++ = s->rx_undersize_pkts;
246 *data++ = s->rx_oversize_pkts;
247 *data++ = s->rx_jabber_pkts;
248 *data++ = s->rx_undersize_fcerr_pkts;
249 *data++ = s->rx_drop_events;
250 *data++ = s->rx_fcerr_pkts;
251 *data++ = s->rx_align_err;
252 *data++ = s->rx_symbol_err;
253 *data++ = s->rx_mac_err;
254 *data++ = s->rx_ctl_pkts;
255 *data++ = s->rx_pause_pkts;
256 *data++ = s->rx_64_pkts;
257 *data++ = s->rx_65_to_127_pkts;
258 *data++ = s->rx_128_255_pkts;
259 *data++ = s->rx_256_511_pkts;
260 *data++ = s->rx_512_to_1023_pkts;
261 *data++ = s->rx_1024_to_1518_pkts;
262 *data++ = s->rx_1519_to_max_pkts;
263 *data++ = s->rx_len_err_pkts;
264}
265
266static int ql_get_settings(struct net_device *ndev,
267 struct ethtool_cmd *ecmd)
268{
269 struct ql_adapter *qdev = netdev_priv(ndev);
270
271 ecmd->supported = SUPPORTED_10000baseT_Full;
272 ecmd->advertising = ADVERTISED_10000baseT_Full;
273 ecmd->autoneg = AUTONEG_ENABLE;
274 ecmd->transceiver = XCVR_EXTERNAL;
275 if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
276 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
277 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
278 ecmd->port = PORT_TP;
279 } else {
280 ecmd->supported |= SUPPORTED_FIBRE;
281 ecmd->advertising |= ADVERTISED_FIBRE;
282 ecmd->port = PORT_FIBRE;
283 }
284
285 ecmd->speed = SPEED_10000;
286 ecmd->duplex = DUPLEX_FULL;
287
288 return 0;
289}
290
291static void ql_get_drvinfo(struct net_device *ndev,
292 struct ethtool_drvinfo *drvinfo)
293{
294 struct ql_adapter *qdev = netdev_priv(ndev);
295 strncpy(drvinfo->driver, qlge_driver_name, 32);
296 strncpy(drvinfo->version, qlge_driver_version, 32);
297 strncpy(drvinfo->fw_version, "N/A", 32);
298 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
299 drvinfo->n_stats = 0;
300 drvinfo->testinfo_len = 0;
301 drvinfo->regdump_len = 0;
302 drvinfo->eedump_len = 0;
303}
304
305static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
306{
307 struct ql_adapter *qdev = netdev_priv(dev);
308
309 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
310 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
311
312 /* This chip coalesces as follows:
313 * If a packet arrives, hold off interrupts until
314 * cqicb->int_delay expires, but if no other packets arrive don't
315 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
316 * timer to coalesce on a frame basis. So, we have to take ethtool's
317 * max_coalesced_frames value and convert it to a delay in microseconds.
318 * We do this by using a basic thoughput of 1,000,000 frames per
319 * second @ (1024 bytes). This means one frame per usec. So it's a
320 * simple one to one ratio.
321 */
322 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
323 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
324
325 return 0;
326}
327
328static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
329{
330 struct ql_adapter *qdev = netdev_priv(ndev);
331
332 /* Validate user parameters. */
333 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
334 return -EINVAL;
335 /* Don't wait more than 10 usec. */
336 if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
337 return -EINVAL;
338 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
339 return -EINVAL;
340 if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
341 return -EINVAL;
342
343 /* Verify a change took place before updating the hardware. */
344 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
345 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
346 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
347 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
348 return 0;
349
350 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
351 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
352 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
353 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
354
355 return ql_update_ring_coalescing(qdev);
356}
357
358static u32 ql_get_rx_csum(struct net_device *netdev)
359{
360 struct ql_adapter *qdev = netdev_priv(netdev);
361 return qdev->rx_csum;
362}
363
364static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
365{
366 struct ql_adapter *qdev = netdev_priv(netdev);
367 qdev->rx_csum = data;
368 return 0;
369}
370
371static int ql_set_tso(struct net_device *ndev, uint32_t data)
372{
373
374 if (data) {
375 ndev->features |= NETIF_F_TSO;
376 ndev->features |= NETIF_F_TSO6;
377 } else {
378 ndev->features &= ~NETIF_F_TSO;
379 ndev->features &= ~NETIF_F_TSO6;
380 }
381 return 0;
382}
383
384static u32 ql_get_msglevel(struct net_device *ndev)
385{
386 struct ql_adapter *qdev = netdev_priv(ndev);
387 return qdev->msg_enable;
388}
389
390static void ql_set_msglevel(struct net_device *ndev, u32 value)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 qdev->msg_enable = value;
394}
395
396const struct ethtool_ops qlge_ethtool_ops = {
397 .get_settings = ql_get_settings,
398 .get_drvinfo = ql_get_drvinfo,
399 .get_msglevel = ql_get_msglevel,
400 .set_msglevel = ql_set_msglevel,
401 .get_link = ethtool_op_get_link,
402 .get_rx_csum = ql_get_rx_csum,
403 .set_rx_csum = ql_set_rx_csum,
404 .get_tx_csum = ethtool_op_get_tx_csum,
405 .get_sg = ethtool_op_get_sg,
406 .set_sg = ethtool_op_set_sg,
407 .get_tso = ethtool_op_get_tso,
408 .set_tso = ql_set_tso,
409 .get_coalesce = ql_get_coalesce,
410 .set_coalesce = ql_set_coalesce,
411 .get_sset_count = ql_get_sset_count,
412 .get_strings = ql_get_strings,
413 .get_ethtool_stats = ql_get_ethtool_stats,
414};
415
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 000000000000..3af822b6226e
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,3956 @@
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
39#include <linux/init.h>
40#include <linux/delay.h>
41#include <linux/mm.h>
42#include <linux/vmalloc.h>
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
80 /* required last entry */
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85
86/* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
89 */
90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
91{
92 u32 sem_bits = 0;
93
94 switch (sem_mask) {
95 case SEM_XGMAC0_MASK:
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
97 break;
98 case SEM_XGMAC1_MASK:
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
100 break;
101 case SEM_ICB_MASK:
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 break;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
106 break;
107 case SEM_FLASH_MASK:
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
109 break;
110 case SEM_PROBE_MASK:
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 break;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 break;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
118 break;
119 default:
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
121 return -EINVAL;
122 }
123
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
126}
127
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{
130 unsigned int seconds = 3;
131 do {
132 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0;
134 ssleep(1);
135 } while (--seconds);
136 return -ETIMEDOUT;
137}
138
139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
143}
144
145/* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 */
150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
151{
152 u32 temp;
153 int count = UDELAY_COUNT;
154
155 while (count) {
156 temp = ql_read32(qdev, reg);
157
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
162 reg, temp);
163 return -EIO;
164 } else if (temp & bit)
165 return 0;
166 udelay(UDELAY_DELAY);
167 count--;
168 }
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
171 return -ETIMEDOUT;
172}
173
174/* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
176 */
177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178{
179 int count = UDELAY_COUNT;
180 u32 temp;
181
182 while (count) {
183 temp = ql_read32(qdev, CFG);
184 if (temp & CFG_LE)
185 return -EIO;
186 if (!(temp & bit))
187 return 0;
188 udelay(UDELAY_DELAY);
189 count--;
190 }
191 return -ETIMEDOUT;
192}
193
194
195/* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
197 */
198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
199 u16 q_id)
200{
201 u64 map;
202 int status = 0;
203 int direction;
204 u32 mask;
205 u32 value;
206
207 direction =
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
209 PCI_DMA_FROMDEVICE;
210
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
214 return -ENOMEM;
215 }
216
217 status = ql_wait_cfg(qdev, bit);
218 if (status) {
219 QPRINTK(qdev, IFUP, ERR,
220 "Timed out waiting for CFG to come ready.\n");
221 goto exit;
222 }
223
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
225 if (status)
226 goto exit;
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
230
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
234
235 /*
236 * Wait for the bit to clear after signaling hw.
237 */
238 status = ql_wait_cfg(qdev, bit);
239exit:
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
251 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
252 if (status)
253 return status;
254 switch (type) {
255 case MAC_ADDR_TYPE_MULTI_MAC:
256 case MAC_ADDR_TYPE_CAM_MAC:
257 {
258 status =
259 ql_wait_reg_rdy(qdev,
260 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
261 if (status)
262 goto exit;
263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
264 (index << MAC_ADDR_IDX_SHIFT) | /* index */
265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
266 status =
267 ql_wait_reg_rdy(qdev,
268 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
269 if (status)
270 goto exit;
271 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 if (type == MAC_ADDR_TYPE_CAM_MAC) {
287 status =
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 MAC_ADDR_MR, MAC_ADDR_E);
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 }
302 break;
303 }
304 case MAC_ADDR_TYPE_VLAN:
305 case MAC_ADDR_TYPE_MULTI_FLTR:
306 default:
307 QPRINTK(qdev, IFUP, CRIT,
308 "Address type %d not yet supported.\n", type);
309 status = -EPERM;
310 }
311exit:
312 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
313 return status;
314}
315
316/* Set up a MAC, multicast or VLAN address for the
317 * inbound frame matching.
318 */
319static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
320 u16 index)
321{
322 u32 offset = 0;
323 int status = 0;
324
325 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
326 if (status)
327 return status;
328 switch (type) {
329 case MAC_ADDR_TYPE_MULTI_MAC:
330 case MAC_ADDR_TYPE_CAM_MAC:
331 {
332 u32 cam_output;
333 u32 upper = (addr[0] << 8) | addr[1];
334 u32 lower =
335 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
336 (addr[5]);
337
338 QPRINTK(qdev, IFUP, INFO,
339 "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
340 " at index %d in the CAM.\n",
341 ((type ==
342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
343 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
344 addr[4], addr[5], index);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
352 (index << MAC_ADDR_IDX_SHIFT) | /* index */
353 type); /* type */
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
361 (index << MAC_ADDR_IDX_SHIFT) | /* index */
362 type); /* type */
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
367 if (status)
368 goto exit;
369 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
370 (index << MAC_ADDR_IDX_SHIFT) | /* index */
371 type); /* type */
372 /* This field should also include the queue id
373 and possibly the function id. Right now we hardcode
374 the route field to NIC core.
375 */
376 if (type == MAC_ADDR_TYPE_CAM_MAC) {
377 cam_output = (CAM_OUT_ROUTE_NIC |
378 (qdev->
379 func << CAM_OUT_FUNC_SHIFT) |
380 (qdev->
381 rss_ring_first_cq_id <<
382 CAM_OUT_CQ_ID_SHIFT));
383 if (qdev->vlgrp)
384 cam_output |= CAM_OUT_RV;
385 /* route to NIC core */
386 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
387 }
388 break;
389 }
390 case MAC_ADDR_TYPE_VLAN:
391 {
392 u32 enable_bit = *((u32 *) &addr[0]);
393 /* For VLAN, the addr actually holds a bit that
394 * either enables or disables the vlan id we are
395 * addressing. It's either MAC_ADDR_E on or off.
396 * That's bit-27 we're talking about.
397 */
398 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
399 (enable_bit ? "Adding" : "Removing"),
400 index, (enable_bit ? "to" : "from"));
401
402 status =
403 ql_wait_reg_rdy(qdev,
404 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
405 if (status)
406 goto exit;
407 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 type | /* type */
410 enable_bit); /* enable/disable */
411 break;
412 }
413 case MAC_ADDR_TYPE_MULTI_FLTR:
414 default:
415 QPRINTK(qdev, IFUP, CRIT,
416 "Address type %d not yet supported.\n", type);
417 status = -EPERM;
418 }
419exit:
420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
421 return status;
422}
423
424/* Get a specific frame routing value from the CAM.
425 * Used for debug and reg dump.
426 */
427int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
428{
429 int status = 0;
430
431 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
432 if (status)
433 goto exit;
434
435 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
436 if (status)
437 goto exit;
438
439 ql_write32(qdev, RT_IDX,
440 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
441 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
442 if (status)
443 goto exit;
444 *value = ql_read32(qdev, RT_DATA);
445exit:
446 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
447 return status;
448}
449
450/* The NIC function for this chip has 16 routing indexes. Each one can be used
451 * to route different frame types to various inbound queues. We send broadcast/
452 * multicast/error frames to the default queue for slow handling,
453 * and CAM hit/RSS frames to the fast handling queues.
454 */
455static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
456 int enable)
457{
458 int status;
459 u32 value = 0;
460
461 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
462 if (status)
463 return status;
464
465 QPRINTK(qdev, IFUP, DEBUG,
466 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
467 (enable ? "Adding" : "Removing"),
468 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
469 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
470 ((index ==
471 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
472 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
473 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
474 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
475 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
476 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
477 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
478 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
479 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
480 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
481 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
482 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
483 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
484 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
485 (enable ? "to" : "from"));
486
487 switch (mask) {
488 case RT_IDX_CAM_HIT:
489 {
490 value = RT_IDX_DST_CAM_Q | /* dest */
491 RT_IDX_TYPE_NICQ | /* type */
492 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
493 break;
494 }
495 case RT_IDX_VALID: /* Promiscuous Mode frames. */
496 {
497 value = RT_IDX_DST_DFLT_Q | /* dest */
498 RT_IDX_TYPE_NICQ | /* type */
499 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
500 break;
501 }
502 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
503 {
504 value = RT_IDX_DST_DFLT_Q | /* dest */
505 RT_IDX_TYPE_NICQ | /* type */
506 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
507 break;
508 }
509 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
510 {
511 value = RT_IDX_DST_DFLT_Q | /* dest */
512 RT_IDX_TYPE_NICQ | /* type */
513 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
514 break;
515 }
516 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
517 {
518 value = RT_IDX_DST_CAM_Q | /* dest */
519 RT_IDX_TYPE_NICQ | /* type */
520 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
521 break;
522 }
523 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
524 {
525 value = RT_IDX_DST_CAM_Q | /* dest */
526 RT_IDX_TYPE_NICQ | /* type */
527 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
528 break;
529 }
530 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
531 {
532 value = RT_IDX_DST_RSS | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
535 break;
536 }
537 case 0: /* Clear the E-bit on an entry. */
538 {
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (index << RT_IDX_IDX_SHIFT);/* index */
542 break;
543 }
544 default:
545 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
546 mask);
547 status = -EPERM;
548 goto exit;
549 }
550
551 if (value) {
552 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
553 if (status)
554 goto exit;
555 value |= (enable ? RT_IDX_E : 0);
556 ql_write32(qdev, RT_IDX, value);
557 ql_write32(qdev, RT_DATA, enable ? mask : 0);
558 }
559exit:
560 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
561 return status;
562}
563
564static void ql_enable_interrupts(struct ql_adapter *qdev)
565{
566 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
567}
568
569static void ql_disable_interrupts(struct ql_adapter *qdev)
570{
571 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
572}
573
574/* If we're running with multiple MSI-X vectors then we enable on the fly.
575 * Otherwise, we may have multiple outstanding workers and don't want to
576 * enable until the last one finishes. In this case, the irq_cnt gets
577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
583 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask);
585 else {
586 if (qdev->legacy_check)
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 }
600}
601
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{
604 u32 var = 0;
605
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
607 goto exit;
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
609 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask);
611 var = ql_read32(qdev, STS);
612 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt);
614exit:
615 return var;
616}
617
618static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
619{
620 int i;
621 for (i = 0; i < qdev->intr_count; i++) {
622 /* The enable call does a atomic_dec_and_test
623 * and enables only if the result is zero.
624 * So we precharge it here.
625 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i);
628 }
629
630}
631
632int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
633{
634 int status = 0;
635 /* wait for reg to come ready */
636 status = ql_wait_reg_rdy(qdev,
637 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
638 if (status)
639 goto exit;
640 /* set up for reg read */
641 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
642 /* wait for reg to come ready */
643 status = ql_wait_reg_rdy(qdev,
644 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
645 if (status)
646 goto exit;
647 /* get the data */
648 *data = ql_read32(qdev, FLASH_DATA);
649exit:
650 return status;
651}
652
653static int ql_get_flash_params(struct ql_adapter *qdev)
654{
655 int i;
656 int status;
657 u32 *p = (u32 *)&qdev->flash;
658
659 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
660 return -ETIMEDOUT;
661
662 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
663 status = ql_read_flash_word(qdev, i, p);
664 if (status) {
665 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
666 goto exit;
667 }
668
669 }
670exit:
671 ql_sem_unlock(qdev, SEM_FLASH_MASK);
672 return status;
673}
674
675/* xgmac register are located behind the xgmac_addr and xgmac_data
676 * register pair. Each read/write requires us to wait for the ready
677 * bit before reading/writing the data.
678 */
679static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
680{
681 int status;
682 /* wait for reg to come ready */
683 status = ql_wait_reg_rdy(qdev,
684 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
685 if (status)
686 return status;
687 /* write the data to the data reg */
688 ql_write32(qdev, XGMAC_DATA, data);
689 /* trigger the write */
690 ql_write32(qdev, XGMAC_ADDR, reg);
691 return status;
692}
693
694/* xgmac register are located behind the xgmac_addr and xgmac_data
695 * register pair. Each read/write requires us to wait for the ready
696 * bit before reading/writing the data.
697 */
698int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
699{
700 int status = 0;
701 /* wait for reg to come ready */
702 status = ql_wait_reg_rdy(qdev,
703 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
704 if (status)
705 goto exit;
706 /* set up for reg read */
707 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
708 /* wait for reg to come ready */
709 status = ql_wait_reg_rdy(qdev,
710 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
711 if (status)
712 goto exit;
713 /* get the data */
714 *data = ql_read32(qdev, XGMAC_DATA);
715exit:
716 return status;
717}
718
719/* This is used for reading the 64-bit statistics regs. */
720int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
721{
722 int status = 0;
723 u32 hi = 0;
724 u32 lo = 0;
725
726 status = ql_read_xgmac_reg(qdev, reg, &lo);
727 if (status)
728 goto exit;
729
730 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
731 if (status)
732 goto exit;
733
734 *data = (u64) lo | ((u64) hi << 32);
735
736exit:
737 return status;
738}
739
740/* Take the MAC Core out of reset.
741 * Enable statistics counting.
742 * Take the transmitter/receiver out of reset.
743 * This functionality may be done in the MPI firmware at a
744 * later date.
745 */
746static int ql_port_initialize(struct ql_adapter *qdev)
747{
748 int status = 0;
749 u32 data;
750
751 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
752 /* Another function has the semaphore, so
753 * wait for the port init bit to come ready.
754 */
755 QPRINTK(qdev, LINK, INFO,
756 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
757 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
758 if (status) {
759 QPRINTK(qdev, LINK, CRIT,
760 "Port initialize timed out.\n");
761 }
762 return status;
763 }
764
765 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
766 /* Set the core reset. */
767 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
768 if (status)
769 goto end;
770 data |= GLOBAL_CFG_RESET;
771 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
772 if (status)
773 goto end;
774
775 /* Clear the core reset and turn on jumbo for receiver. */
776 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
777 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
778 data |= GLOBAL_CFG_TX_STAT_EN;
779 data |= GLOBAL_CFG_RX_STAT_EN;
780 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
781 if (status)
782 goto end;
783
784 /* Enable transmitter, and clear it's reset. */
785 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
786 if (status)
787 goto end;
788 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
789 data |= TX_CFG_EN; /* Enable the transmitter. */
790 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
791 if (status)
792 goto end;
793
794 /* Enable receiver and clear it's reset. */
795 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
796 if (status)
797 goto end;
798 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
799 data |= RX_CFG_EN; /* Enable the receiver. */
800 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
801 if (status)
802 goto end;
803
804 /* Turn on jumbo. */
805 status =
806 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
807 if (status)
808 goto end;
809 status =
810 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
811 if (status)
812 goto end;
813
814 /* Signal to the world that the port is enabled. */
815 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
816end:
817 ql_sem_unlock(qdev, qdev->xg_sem_mask);
818 return status;
819}
820
821/* Get the next large buffer. */
822struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
823{
824 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
825 rx_ring->lbq_curr_idx++;
826 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
827 rx_ring->lbq_curr_idx = 0;
828 rx_ring->lbq_free_cnt++;
829 return lbq_desc;
830}
831
832/* Get the next small buffer. */
833struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
834{
835 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
836 rx_ring->sbq_curr_idx++;
837 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
838 rx_ring->sbq_curr_idx = 0;
839 rx_ring->sbq_free_cnt++;
840 return sbq_desc;
841}
842
843/* Update an rx ring index. */
844static void ql_update_cq(struct rx_ring *rx_ring)
845{
846 rx_ring->cnsmr_idx++;
847 rx_ring->curr_entry++;
848 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
849 rx_ring->cnsmr_idx = 0;
850 rx_ring->curr_entry = rx_ring->cq_base;
851 }
852}
853
854static void ql_write_cq_idx(struct rx_ring *rx_ring)
855{
856 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
857}
858
859/* Process (refill) a large buffer queue. */
860static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
861{
862 int clean_idx = rx_ring->lbq_clean_idx;
863 struct bq_desc *lbq_desc;
864 struct bq_element *bq;
865 u64 map;
866 int i;
867
868 while (rx_ring->lbq_free_cnt > 16) {
869 for (i = 0; i < 16; i++) {
870 QPRINTK(qdev, RX_STATUS, DEBUG,
871 "lbq: try cleaning clean_idx = %d.\n",
872 clean_idx);
873 lbq_desc = &rx_ring->lbq[clean_idx];
874 bq = lbq_desc->bq;
875 if (lbq_desc->p.lbq_page == NULL) {
876 QPRINTK(qdev, RX_STATUS, DEBUG,
877 "lbq: getting new page for index %d.\n",
878 lbq_desc->index);
879 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
880 if (lbq_desc->p.lbq_page == NULL) {
881 QPRINTK(qdev, RX_STATUS, ERR,
882 "Couldn't get a page.\n");
883 return;
884 }
885 map = pci_map_page(qdev->pdev,
886 lbq_desc->p.lbq_page,
887 0, PAGE_SIZE,
888 PCI_DMA_FROMDEVICE);
889 if (pci_dma_mapping_error(qdev->pdev, map)) {
890 QPRINTK(qdev, RX_STATUS, ERR,
891 "PCI mapping failed.\n");
892 return;
893 }
894 pci_unmap_addr_set(lbq_desc, mapaddr, map);
895 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
896 bq->addr_lo = /*lbq_desc->addr_lo = */
897 cpu_to_le32(map);
898 bq->addr_hi = /*lbq_desc->addr_hi = */
899 cpu_to_le32(map >> 32);
900 }
901 clean_idx++;
902 if (clean_idx == rx_ring->lbq_len)
903 clean_idx = 0;
904 }
905
906 rx_ring->lbq_clean_idx = clean_idx;
907 rx_ring->lbq_prod_idx += 16;
908 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
909 rx_ring->lbq_prod_idx = 0;
910 QPRINTK(qdev, RX_STATUS, DEBUG,
911 "lbq: updating prod idx = %d.\n",
912 rx_ring->lbq_prod_idx);
913 ql_write_db_reg(rx_ring->lbq_prod_idx,
914 rx_ring->lbq_prod_idx_db_reg);
915 rx_ring->lbq_free_cnt -= 16;
916 }
917}
918
919/* Process (refill) a small buffer queue. */
920static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
921{
922 int clean_idx = rx_ring->sbq_clean_idx;
923 struct bq_desc *sbq_desc;
924 struct bq_element *bq;
925 u64 map;
926 int i;
927
928 while (rx_ring->sbq_free_cnt > 16) {
929 for (i = 0; i < 16; i++) {
930 sbq_desc = &rx_ring->sbq[clean_idx];
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "sbq: try cleaning clean_idx = %d.\n",
933 clean_idx);
934 bq = sbq_desc->bq;
935 if (sbq_desc->p.skb == NULL) {
936 QPRINTK(qdev, RX_STATUS, DEBUG,
937 "sbq: getting new skb for index %d.\n",
938 sbq_desc->index);
939 sbq_desc->p.skb =
940 netdev_alloc_skb(qdev->ndev,
941 rx_ring->sbq_buf_size);
942 if (sbq_desc->p.skb == NULL) {
943 QPRINTK(qdev, PROBE, ERR,
944 "Couldn't get an skb.\n");
945 rx_ring->sbq_clean_idx = clean_idx;
946 return;
947 }
948 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
949 map = pci_map_single(qdev->pdev,
950 sbq_desc->p.skb->data,
951 rx_ring->sbq_buf_size /
952 2, PCI_DMA_FROMDEVICE);
953 pci_unmap_addr_set(sbq_desc, mapaddr, map);
954 pci_unmap_len_set(sbq_desc, maplen,
955 rx_ring->sbq_buf_size / 2);
956 bq->addr_lo = cpu_to_le32(map);
957 bq->addr_hi = cpu_to_le32(map >> 32);
958 }
959
960 clean_idx++;
961 if (clean_idx == rx_ring->sbq_len)
962 clean_idx = 0;
963 }
964 rx_ring->sbq_clean_idx = clean_idx;
965 rx_ring->sbq_prod_idx += 16;
966 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
967 rx_ring->sbq_prod_idx = 0;
968 QPRINTK(qdev, RX_STATUS, DEBUG,
969 "sbq: updating prod idx = %d.\n",
970 rx_ring->sbq_prod_idx);
971 ql_write_db_reg(rx_ring->sbq_prod_idx,
972 rx_ring->sbq_prod_idx_db_reg);
973
974 rx_ring->sbq_free_cnt -= 16;
975 }
976}
977
978static void ql_update_buffer_queues(struct ql_adapter *qdev,
979 struct rx_ring *rx_ring)
980{
981 ql_update_sbq(qdev, rx_ring);
982 ql_update_lbq(qdev, rx_ring);
983}
984
985/* Unmaps tx buffers. Can be called from send() if a pci mapping
986 * fails at some stage, or from the interrupt when a tx completes.
987 */
988static void ql_unmap_send(struct ql_adapter *qdev,
989 struct tx_ring_desc *tx_ring_desc, int mapped)
990{
991 int i;
992 for (i = 0; i < mapped; i++) {
993 if (i == 0 || (i == 7 && mapped > 7)) {
994 /*
995 * Unmap the skb->data area, or the
996 * external sglist (AKA the Outbound
997 * Address List (OAL)).
998 * If its the zeroeth element, then it's
999 * the skb->data area. If it's the 7th
1000 * element and there is more than 6 frags,
1001 * then its an OAL.
1002 */
1003 if (i == 7) {
1004 QPRINTK(qdev, TX_DONE, DEBUG,
1005 "unmapping OAL area.\n");
1006 }
1007 pci_unmap_single(qdev->pdev,
1008 pci_unmap_addr(&tx_ring_desc->map[i],
1009 mapaddr),
1010 pci_unmap_len(&tx_ring_desc->map[i],
1011 maplen),
1012 PCI_DMA_TODEVICE);
1013 } else {
1014 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1015 i);
1016 pci_unmap_page(qdev->pdev,
1017 pci_unmap_addr(&tx_ring_desc->map[i],
1018 mapaddr),
1019 pci_unmap_len(&tx_ring_desc->map[i],
1020 maplen), PCI_DMA_TODEVICE);
1021 }
1022 }
1023
1024}
1025
1026/* Map the buffers for this transmit. This will return
1027 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1028 */
1029static int ql_map_send(struct ql_adapter *qdev,
1030 struct ob_mac_iocb_req *mac_iocb_ptr,
1031 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1032{
1033 int len = skb_headlen(skb);
1034 dma_addr_t map;
1035 int frag_idx, err, map_idx = 0;
1036 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1037 int frag_cnt = skb_shinfo(skb)->nr_frags;
1038
1039 if (frag_cnt) {
1040 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1041 }
1042 /*
1043 * Map the skb buffer first.
1044 */
1045 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1046
1047 err = pci_dma_mapping_error(qdev->pdev, map);
1048 if (err) {
1049 QPRINTK(qdev, TX_QUEUED, ERR,
1050 "PCI mapping failed with error: %d\n", err);
1051
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055 tbd->len = cpu_to_le32(len);
1056 tbd->addr = cpu_to_le64(map);
1057 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1058 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1059 map_idx++;
1060
1061 /*
1062 * This loop fills the remainder of the 8 address descriptors
1063 * in the IOCB. If there are more than 7 fragments, then the
1064 * eighth address desc will point to an external list (OAL).
1065 * When this happens, the remainder of the frags will be stored
1066 * in this list.
1067 */
1068 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1069 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1070 tbd++;
1071 if (frag_idx == 6 && frag_cnt > 7) {
1072 /* Let's tack on an sglist.
1073 * Our control block will now
1074 * look like this:
1075 * iocb->seg[0] = skb->data
1076 * iocb->seg[1] = frag[0]
1077 * iocb->seg[2] = frag[1]
1078 * iocb->seg[3] = frag[2]
1079 * iocb->seg[4] = frag[3]
1080 * iocb->seg[5] = frag[4]
1081 * iocb->seg[6] = frag[5]
1082 * iocb->seg[7] = ptr to OAL (external sglist)
1083 * oal->seg[0] = frag[6]
1084 * oal->seg[1] = frag[7]
1085 * oal->seg[2] = frag[8]
1086 * oal->seg[3] = frag[9]
1087 * oal->seg[4] = frag[10]
1088 * etc...
1089 */
1090 /* Tack on the OAL in the eighth segment of IOCB. */
1091 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1092 sizeof(struct oal),
1093 PCI_DMA_TODEVICE);
1094 err = pci_dma_mapping_error(qdev->pdev, map);
1095 if (err) {
1096 QPRINTK(qdev, TX_QUEUED, ERR,
1097 "PCI mapping outbound address list with error: %d\n",
1098 err);
1099 goto map_error;
1100 }
1101
1102 tbd->addr = cpu_to_le64(map);
1103 /*
1104 * The length is the number of fragments
1105 * that remain to be mapped times the length
1106 * of our sglist (OAL).
1107 */
1108 tbd->len =
1109 cpu_to_le32((sizeof(struct tx_buf_desc) *
1110 (frag_cnt - frag_idx)) | TX_DESC_C);
1111 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1112 map);
1113 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1114 sizeof(struct oal));
1115 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1116 map_idx++;
1117 }
1118
1119 map =
1120 pci_map_page(qdev->pdev, frag->page,
1121 frag->page_offset, frag->size,
1122 PCI_DMA_TODEVICE);
1123
1124 err = pci_dma_mapping_error(qdev->pdev, map);
1125 if (err) {
1126 QPRINTK(qdev, TX_QUEUED, ERR,
1127 "PCI mapping frags failed with error: %d.\n",
1128 err);
1129 goto map_error;
1130 }
1131
1132 tbd->addr = cpu_to_le64(map);
1133 tbd->len = cpu_to_le32(frag->size);
1134 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1135 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1136 frag->size);
1137
1138 }
1139 /* Save the number of segments we've mapped. */
1140 tx_ring_desc->map_cnt = map_idx;
1141 /* Terminate the last segment. */
1142 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1143 return NETDEV_TX_OK;
1144
1145map_error:
1146 /*
1147 * If the first frag mapping failed, then i will be zero.
1148 * This causes the unmap of the skb->data area. Otherwise
1149 * we pass in the number of frags that mapped successfully
1150 * so they can be umapped.
1151 */
1152 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1153 return NETDEV_TX_BUSY;
1154}
1155
1156void ql_realign_skb(struct sk_buff *skb, int len)
1157{
1158 void *temp_addr = skb->data;
1159
1160 /* Undo the skb_reserve(skb,32) we did before
1161 * giving to hardware, and realign data on
1162 * a 2-byte boundary.
1163 */
1164 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1165 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1166 skb_copy_to_linear_data(skb, temp_addr,
1167 (unsigned int)len);
1168}
1169
1170/*
1171 * This function builds an skb for the given inbound
1172 * completion. It will be rewritten for readability in the near
1173 * future, but for not it works well.
1174 */
1175static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1176 struct rx_ring *rx_ring,
1177 struct ib_mac_iocb_rsp *ib_mac_rsp)
1178{
1179 struct bq_desc *lbq_desc;
1180 struct bq_desc *sbq_desc;
1181 struct sk_buff *skb = NULL;
1182 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1183 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1184
1185 /*
1186 * Handle the header buffer if present.
1187 */
1188 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1189 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1190 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1191 /*
1192 * Headers fit nicely into a small buffer.
1193 */
1194 sbq_desc = ql_get_curr_sbuf(rx_ring);
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(sbq_desc, mapaddr),
1197 pci_unmap_len(sbq_desc, maplen),
1198 PCI_DMA_FROMDEVICE);
1199 skb = sbq_desc->p.skb;
1200 ql_realign_skb(skb, hdr_len);
1201 skb_put(skb, hdr_len);
1202 sbq_desc->p.skb = NULL;
1203 }
1204
1205 /*
1206 * Handle the data buffer(s).
1207 */
1208 if (unlikely(!length)) { /* Is there data too? */
1209 QPRINTK(qdev, RX_STATUS, DEBUG,
1210 "No Data buffer in this packet.\n");
1211 return skb;
1212 }
1213
1214 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1215 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1216 QPRINTK(qdev, RX_STATUS, DEBUG,
1217 "Headers in small, data of %d bytes in small, combine them.\n", length);
1218 /*
1219 * Data is less than small buffer size so it's
1220 * stuffed in a small buffer.
1221 * For this case we append the data
1222 * from the "data" small buffer to the "header" small
1223 * buffer.
1224 */
1225 sbq_desc = ql_get_curr_sbuf(rx_ring);
1226 pci_dma_sync_single_for_cpu(qdev->pdev,
1227 pci_unmap_addr
1228 (sbq_desc, mapaddr),
1229 pci_unmap_len
1230 (sbq_desc, maplen),
1231 PCI_DMA_FROMDEVICE);
1232 memcpy(skb_put(skb, length),
1233 sbq_desc->p.skb->data, length);
1234 pci_dma_sync_single_for_device(qdev->pdev,
1235 pci_unmap_addr
1236 (sbq_desc,
1237 mapaddr),
1238 pci_unmap_len
1239 (sbq_desc,
1240 maplen),
1241 PCI_DMA_FROMDEVICE);
1242 } else {
1243 QPRINTK(qdev, RX_STATUS, DEBUG,
1244 "%d bytes in a single small buffer.\n", length);
1245 sbq_desc = ql_get_curr_sbuf(rx_ring);
1246 skb = sbq_desc->p.skb;
1247 ql_realign_skb(skb, length);
1248 skb_put(skb, length);
1249 pci_unmap_single(qdev->pdev,
1250 pci_unmap_addr(sbq_desc,
1251 mapaddr),
1252 pci_unmap_len(sbq_desc,
1253 maplen),
1254 PCI_DMA_FROMDEVICE);
1255 sbq_desc->p.skb = NULL;
1256 }
1257 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1258 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1259 QPRINTK(qdev, RX_STATUS, DEBUG,
1260 "Header in small, %d bytes in large. Chain large to small!\n", length);
1261 /*
1262 * The data is in a single large buffer. We
1263 * chain it to the header buffer's skb and let
1264 * it rip.
1265 */
1266 lbq_desc = ql_get_curr_lbuf(rx_ring);
1267 pci_unmap_page(qdev->pdev,
1268 pci_unmap_addr(lbq_desc,
1269 mapaddr),
1270 pci_unmap_len(lbq_desc, maplen),
1271 PCI_DMA_FROMDEVICE);
1272 QPRINTK(qdev, RX_STATUS, DEBUG,
1273 "Chaining page to skb.\n");
1274 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1275 0, length);
1276 skb->len += length;
1277 skb->data_len += length;
1278 skb->truesize += length;
1279 lbq_desc->p.lbq_page = NULL;
1280 } else {
1281 /*
1282 * The headers and data are in a single large buffer. We
1283 * copy it to a new skb and let it go. This can happen with
1284 * jumbo mtu on a non-TCP/UDP frame.
1285 */
1286 lbq_desc = ql_get_curr_lbuf(rx_ring);
1287 skb = netdev_alloc_skb(qdev->ndev, length);
1288 if (skb == NULL) {
1289 QPRINTK(qdev, PROBE, DEBUG,
1290 "No skb available, drop the packet.\n");
1291 return NULL;
1292 }
1293 skb_reserve(skb, NET_IP_ALIGN);
1294 QPRINTK(qdev, RX_STATUS, DEBUG,
1295 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1296 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1297 0, length);
1298 skb->len += length;
1299 skb->data_len += length;
1300 skb->truesize += length;
1301 length -= length;
1302 lbq_desc->p.lbq_page = NULL;
1303 __pskb_pull_tail(skb,
1304 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1305 VLAN_ETH_HLEN : ETH_HLEN);
1306 }
1307 } else {
1308 /*
1309 * The data is in a chain of large buffers
1310 * pointed to by a small buffer. We loop
1311 * thru and chain them to the our small header
1312 * buffer's skb.
1313 * frags: There are 18 max frags and our small
1314 * buffer will hold 32 of them. The thing is,
1315 * we'll use 3 max for our 9000 byte jumbo
1316 * frames. If the MTU goes up we could
1317 * eventually be in trouble.
1318 */
1319 int size, offset, i = 0;
1320 struct bq_element *bq, bq_array[8];
1321 sbq_desc = ql_get_curr_sbuf(rx_ring);
1322 pci_unmap_single(qdev->pdev,
1323 pci_unmap_addr(sbq_desc, mapaddr),
1324 pci_unmap_len(sbq_desc, maplen),
1325 PCI_DMA_FROMDEVICE);
1326 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1327 /*
1328 * This is an non TCP/UDP IP frame, so
1329 * the headers aren't split into a small
1330 * buffer. We have to use the small buffer
1331 * that contains our sg list as our skb to
1332 * send upstairs. Copy the sg list here to
1333 * a local buffer and use it to find the
1334 * pages to chain.
1335 */
1336 QPRINTK(qdev, RX_STATUS, DEBUG,
1337 "%d bytes of headers & data in chain of large.\n", length);
1338 skb = sbq_desc->p.skb;
1339 bq = &bq_array[0];
1340 memcpy(bq, skb->data, sizeof(bq_array));
1341 sbq_desc->p.skb = NULL;
1342 skb_reserve(skb, NET_IP_ALIGN);
1343 } else {
1344 QPRINTK(qdev, RX_STATUS, DEBUG,
1345 "Headers in small, %d bytes of data in chain of large.\n", length);
1346 bq = (struct bq_element *)sbq_desc->p.skb->data;
1347 }
1348 while (length > 0) {
1349 lbq_desc = ql_get_curr_lbuf(rx_ring);
1350 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1351 QPRINTK(qdev, RX_STATUS, ERR,
1352 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1353 lbq_desc->bq->addr_lo, bq->addr_lo);
1354 return NULL;
1355 }
1356 pci_unmap_page(qdev->pdev,
1357 pci_unmap_addr(lbq_desc,
1358 mapaddr),
1359 pci_unmap_len(lbq_desc,
1360 maplen),
1361 PCI_DMA_FROMDEVICE);
1362 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1363 offset = 0;
1364
1365 QPRINTK(qdev, RX_STATUS, DEBUG,
1366 "Adding page %d to skb for %d bytes.\n",
1367 i, size);
1368 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1369 offset, size);
1370 skb->len += size;
1371 skb->data_len += size;
1372 skb->truesize += size;
1373 length -= size;
1374 lbq_desc->p.lbq_page = NULL;
1375 bq++;
1376 i++;
1377 }
1378 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1379 VLAN_ETH_HLEN : ETH_HLEN);
1380 }
1381 return skb;
1382}
1383
1384/* Process an inbound completion from an rx ring. */
1385static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1386 struct rx_ring *rx_ring,
1387 struct ib_mac_iocb_rsp *ib_mac_rsp)
1388{
1389 struct net_device *ndev = qdev->ndev;
1390 struct sk_buff *skb = NULL;
1391
1392 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1393
1394 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1395 if (unlikely(!skb)) {
1396 QPRINTK(qdev, RX_STATUS, DEBUG,
1397 "No skb available, drop packet.\n");
1398 return;
1399 }
1400
1401 prefetch(skb->data);
1402 skb->dev = ndev;
1403 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1405 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1406 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1407 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1408 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1409 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1410 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1411 }
1412 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1413 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1414 }
1415 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1416 QPRINTK(qdev, RX_STATUS, ERR,
1417 "Bad checksum for this %s packet.\n",
1418 ((ib_mac_rsp->
1419 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1420 skb->ip_summed = CHECKSUM_NONE;
1421 } else if (qdev->rx_csum &&
1422 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1423 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1424 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1425 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1426 skb->ip_summed = CHECKSUM_UNNECESSARY;
1427 }
1428 qdev->stats.rx_packets++;
1429 qdev->stats.rx_bytes += skb->len;
1430 skb->protocol = eth_type_trans(skb, ndev);
1431 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1432 QPRINTK(qdev, RX_STATUS, DEBUG,
1433 "Passing a VLAN packet upstream.\n");
1434 vlan_hwaccel_rx(skb, qdev->vlgrp,
1435 le16_to_cpu(ib_mac_rsp->vlan_id));
1436 } else {
1437 QPRINTK(qdev, RX_STATUS, DEBUG,
1438 "Passing a normal packet upstream.\n");
1439 netif_rx(skb);
1440 }
1441 ndev->last_rx = jiffies;
1442}
1443
1444/* Process an outbound completion from an rx ring. */
1445static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1446 struct ob_mac_iocb_rsp *mac_rsp)
1447{
1448 struct tx_ring *tx_ring;
1449 struct tx_ring_desc *tx_ring_desc;
1450
1451 QL_DUMP_OB_MAC_RSP(mac_rsp);
1452 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1453 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1454 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1455 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1456 qdev->stats.tx_packets++;
1457 dev_kfree_skb(tx_ring_desc->skb);
1458 tx_ring_desc->skb = NULL;
1459
1460 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1461 OB_MAC_IOCB_RSP_S |
1462 OB_MAC_IOCB_RSP_L |
1463 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1464 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1465 QPRINTK(qdev, TX_DONE, WARNING,
1466 "Total descriptor length did not match transfer length.\n");
1467 }
1468 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1469 QPRINTK(qdev, TX_DONE, WARNING,
1470 "Frame too short to be legal, not sent.\n");
1471 }
1472 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1473 QPRINTK(qdev, TX_DONE, WARNING,
1474 "Frame too long, but sent anyway.\n");
1475 }
1476 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1477 QPRINTK(qdev, TX_DONE, WARNING,
1478 "PCI backplane error. Frame not sent.\n");
1479 }
1480 }
1481 atomic_inc(&tx_ring->tx_count);
1482}
1483
1484/* Fire up a handler to reset the MPI processor. */
1485void ql_queue_fw_error(struct ql_adapter *qdev)
1486{
1487 netif_stop_queue(qdev->ndev);
1488 netif_carrier_off(qdev->ndev);
1489 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1490}
1491
1492void ql_queue_asic_error(struct ql_adapter *qdev)
1493{
1494 netif_stop_queue(qdev->ndev);
1495 netif_carrier_off(qdev->ndev);
1496 ql_disable_interrupts(qdev);
1497 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1498}
1499
1500static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1501 struct ib_ae_iocb_rsp *ib_ae_rsp)
1502{
1503 switch (ib_ae_rsp->event) {
1504 case MGMT_ERR_EVENT:
1505 QPRINTK(qdev, RX_ERR, ERR,
1506 "Management Processor Fatal Error.\n");
1507 ql_queue_fw_error(qdev);
1508 return;
1509
1510 case CAM_LOOKUP_ERR_EVENT:
1511 QPRINTK(qdev, LINK, ERR,
1512 "Multiple CAM hits lookup occurred.\n");
1513 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1514 ql_queue_asic_error(qdev);
1515 return;
1516
1517 case SOFT_ECC_ERROR_EVENT:
1518 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1519 ql_queue_asic_error(qdev);
1520 break;
1521
1522 case PCI_ERR_ANON_BUF_RD:
1523 QPRINTK(qdev, RX_ERR, ERR,
1524 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1525 ib_ae_rsp->q_id);
1526 ql_queue_asic_error(qdev);
1527 break;
1528
1529 default:
1530 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1531 ib_ae_rsp->event);
1532 ql_queue_asic_error(qdev);
1533 break;
1534 }
1535}
1536
1537static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1538{
1539 struct ql_adapter *qdev = rx_ring->qdev;
1540 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1541 struct ob_mac_iocb_rsp *net_rsp = NULL;
1542 int count = 0;
1543
1544 /* While there are entries in the completion queue. */
1545 while (prod != rx_ring->cnsmr_idx) {
1546
1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1548 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1549 prod, rx_ring->cnsmr_idx);
1550
1551 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1552 rmb();
1553 switch (net_rsp->opcode) {
1554
1555 case OPCODE_OB_MAC_TSO_IOCB:
1556 case OPCODE_OB_MAC_IOCB:
1557 ql_process_mac_tx_intr(qdev, net_rsp);
1558 break;
1559 default:
1560 QPRINTK(qdev, RX_STATUS, DEBUG,
1561 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1562 net_rsp->opcode);
1563 }
1564 count++;
1565 ql_update_cq(rx_ring);
1566 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1567 }
1568 ql_write_cq_idx(rx_ring);
1569 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1570 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1571 if (atomic_read(&tx_ring->queue_stopped) &&
1572 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1573 /*
1574 * The queue got stopped because the tx_ring was full.
1575 * Wake it up, because it's now at least 25% empty.
1576 */
1577 netif_wake_queue(qdev->ndev);
1578 }
1579
1580 return count;
1581}
1582
1583static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1584{
1585 struct ql_adapter *qdev = rx_ring->qdev;
1586 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1587 struct ql_net_rsp_iocb *net_rsp;
1588 int count = 0;
1589
1590 /* While there are entries in the completion queue. */
1591 while (prod != rx_ring->cnsmr_idx) {
1592
1593 QPRINTK(qdev, RX_STATUS, DEBUG,
1594 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1595 prod, rx_ring->cnsmr_idx);
1596
1597 net_rsp = rx_ring->curr_entry;
1598 rmb();
1599 switch (net_rsp->opcode) {
1600 case OPCODE_IB_MAC_IOCB:
1601 ql_process_mac_rx_intr(qdev, rx_ring,
1602 (struct ib_mac_iocb_rsp *)
1603 net_rsp);
1604 break;
1605
1606 case OPCODE_IB_AE_IOCB:
1607 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1608 net_rsp);
1609 break;
1610 default:
1611 {
1612 QPRINTK(qdev, RX_STATUS, DEBUG,
1613 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1614 net_rsp->opcode);
1615 }
1616 }
1617 count++;
1618 ql_update_cq(rx_ring);
1619 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1620 if (count == budget)
1621 break;
1622 }
1623 ql_update_buffer_queues(qdev, rx_ring);
1624 ql_write_cq_idx(rx_ring);
1625 return count;
1626}
1627
1628static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1629{
1630 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1631 struct ql_adapter *qdev = rx_ring->qdev;
1632 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1633
1634 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1635 rx_ring->cq_id);
1636
1637 if (work_done < budget) {
1638 __netif_rx_complete(qdev->ndev, napi);
1639 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1640 }
1641 return work_done;
1642}
1643
1644static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1645{
1646 struct ql_adapter *qdev = netdev_priv(ndev);
1647
1648 qdev->vlgrp = grp;
1649 if (grp) {
1650 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1651 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1652 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1653 } else {
1654 QPRINTK(qdev, IFUP, DEBUG,
1655 "Turning off VLAN in NIC_RCV_CFG.\n");
1656 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1657 }
1658}
1659
1660static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1661{
1662 struct ql_adapter *qdev = netdev_priv(ndev);
1663 u32 enable_bit = MAC_ADDR_E;
1664
1665 spin_lock(&qdev->hw_lock);
1666 if (ql_set_mac_addr_reg
1667 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1668 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1669 }
1670 spin_unlock(&qdev->hw_lock);
1671}
1672
1673static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1674{
1675 struct ql_adapter *qdev = netdev_priv(ndev);
1676 u32 enable_bit = 0;
1677
1678 spin_lock(&qdev->hw_lock);
1679 if (ql_set_mac_addr_reg
1680 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1681 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1682 }
1683 spin_unlock(&qdev->hw_lock);
1684
1685}
1686
1687/* Worker thread to process a given rx_ring that is dedicated
1688 * to outbound completions.
1689 */
1690static void ql_tx_clean(struct work_struct *work)
1691{
1692 struct rx_ring *rx_ring =
1693 container_of(work, struct rx_ring, rx_work.work);
1694 ql_clean_outbound_rx_ring(rx_ring);
1695 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1696
1697}
1698
1699/* Worker thread to process a given rx_ring that is dedicated
1700 * to inbound completions.
1701 */
1702static void ql_rx_clean(struct work_struct *work)
1703{
1704 struct rx_ring *rx_ring =
1705 container_of(work, struct rx_ring, rx_work.work);
1706 ql_clean_inbound_rx_ring(rx_ring, 64);
1707 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1708}
1709
1710/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1711static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1712{
1713 struct rx_ring *rx_ring = dev_id;
1714 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1715 &rx_ring->rx_work, 0);
1716 return IRQ_HANDLED;
1717}
1718
1719/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1720static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1721{
1722 struct rx_ring *rx_ring = dev_id;
1723 struct ql_adapter *qdev = rx_ring->qdev;
1724 netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1725 return IRQ_HANDLED;
1726}
1727
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of
1744 * the rx_rings.
1745 */
1746static irqreturn_t qlge_isr(int irq, void *dev_id)
1747{
1748 struct rx_ring *rx_ring = dev_id;
1749 struct ql_adapter *qdev = rx_ring->qdev;
1750 struct intr_context *intr_context = &qdev->intr_context[0];
1751 u32 var;
1752 int i;
1753 int work_done = 0;
1754
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) {
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
1757 return IRQ_NONE; /* Not our interrupt */
1758 }
1759
1760 var = ql_read32(qdev, STS);
1761
1762 /*
1763 * Check for fatal error.
1764 */
1765 if (var & STS_FE) {
1766 ql_queue_asic_error(qdev);
1767 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1768 var = ql_read32(qdev, ERR_STS);
1769 QPRINTK(qdev, INTR, ERR,
1770 "Resetting chip. Error Status Register = 0x%x\n", var);
1771 return IRQ_HANDLED;
1772 }
1773
1774 /*
1775 * Check MPI processor activity.
1776 */
1777 if (var & STS_PI) {
1778 /*
1779 * We've got an async event or mailbox completion.
1780 * Handle it and clear the source of the interrupt.
1781 */
1782 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1783 ql_disable_completion_interrupt(qdev, intr_context->intr);
1784 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1785 &qdev->mpi_work, 0);
1786 work_done++;
1787 }
1788
1789 /*
1790 * Check the default queue and wake handler if active.
1791 */
1792 rx_ring = &qdev->rx_ring[0];
1793 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1794 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1795 ql_disable_completion_interrupt(qdev, intr_context->intr);
1796 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1797 &rx_ring->rx_work, 0);
1798 work_done++;
1799 }
1800
1801 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1802 /*
1803 * Start the DPC for each active queue.
1804 */
1805 for (i = 1; i < qdev->rx_ring_count; i++) {
1806 rx_ring = &qdev->rx_ring[i];
1807 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1808 rx_ring->cnsmr_idx) {
1809 QPRINTK(qdev, INTR, INFO,
1810 "Waking handler for rx_ring[%d].\n", i);
1811 ql_disable_completion_interrupt(qdev,
1812 intr_context->
1813 intr);
1814 if (i < qdev->rss_ring_first_cq_id)
1815 queue_delayed_work_on(rx_ring->cpu,
1816 qdev->q_workqueue,
1817 &rx_ring->rx_work,
1818 0);
1819 else
1820 netif_rx_schedule(qdev->ndev,
1821 &rx_ring->napi);
1822 work_done++;
1823 }
1824 }
1825 }
1826 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827}
1828
1829static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1830{
1831
1832 if (skb_is_gso(skb)) {
1833 int err;
1834 if (skb_header_cloned(skb)) {
1835 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1836 if (err)
1837 return err;
1838 }
1839
1840 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1841 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1842 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1843 mac_iocb_ptr->total_hdrs_len =
1844 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1845 mac_iocb_ptr->net_trans_offset =
1846 cpu_to_le16(skb_network_offset(skb) |
1847 skb_transport_offset(skb)
1848 << OB_MAC_TRANSPORT_HDR_SHIFT);
1849 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1850 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1851 if (likely(skb->protocol == htons(ETH_P_IP))) {
1852 struct iphdr *iph = ip_hdr(skb);
1853 iph->check = 0;
1854 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1855 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1856 iph->daddr, 0,
1857 IPPROTO_TCP,
1858 0);
1859 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1860 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1861 tcp_hdr(skb)->check =
1862 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1863 &ipv6_hdr(skb)->daddr,
1864 0, IPPROTO_TCP, 0);
1865 }
1866 return 1;
1867 }
1868 return 0;
1869}
1870
1871static void ql_hw_csum_setup(struct sk_buff *skb,
1872 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1873{
1874 int len;
1875 struct iphdr *iph = ip_hdr(skb);
1876 u16 *check;
1877 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1878 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1879 mac_iocb_ptr->net_trans_offset =
1880 cpu_to_le16(skb_network_offset(skb) |
1881 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1882
1883 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1884 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1885 if (likely(iph->protocol == IPPROTO_TCP)) {
1886 check = &(tcp_hdr(skb)->check);
1887 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1888 mac_iocb_ptr->total_hdrs_len =
1889 cpu_to_le16(skb_transport_offset(skb) +
1890 (tcp_hdr(skb)->doff << 2));
1891 } else {
1892 check = &(udp_hdr(skb)->check);
1893 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1894 mac_iocb_ptr->total_hdrs_len =
1895 cpu_to_le16(skb_transport_offset(skb) +
1896 sizeof(struct udphdr));
1897 }
1898 *check = ~csum_tcpudp_magic(iph->saddr,
1899 iph->daddr, len, iph->protocol, 0);
1900}
1901
1902static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1903{
1904 struct tx_ring_desc *tx_ring_desc;
1905 struct ob_mac_iocb_req *mac_iocb_ptr;
1906 struct ql_adapter *qdev = netdev_priv(ndev);
1907 int tso;
1908 struct tx_ring *tx_ring;
1909 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1910
1911 tx_ring = &qdev->tx_ring[tx_ring_idx];
1912
1913 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1914 QPRINTK(qdev, TX_QUEUED, INFO,
1915 "%s: shutting down tx queue %d du to lack of resources.\n",
1916 __func__, tx_ring_idx);
1917 netif_stop_queue(ndev);
1918 atomic_inc(&tx_ring->queue_stopped);
1919 return NETDEV_TX_BUSY;
1920 }
1921 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1922 mac_iocb_ptr = tx_ring_desc->queue_entry;
1923 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1924 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1925 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1926 return NETDEV_TX_BUSY;
1927 }
1928
1929 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1930 mac_iocb_ptr->tid = tx_ring_desc->index;
1931 /* We use the upper 32-bits to store the tx queue for this IO.
1932 * When we get the completion we can use it to establish the context.
1933 */
1934 mac_iocb_ptr->txq_idx = tx_ring_idx;
1935 tx_ring_desc->skb = skb;
1936
1937 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1938
1939 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1940 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1941 vlan_tx_tag_get(skb));
1942 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1943 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1944 }
1945 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1946 if (tso < 0) {
1947 dev_kfree_skb_any(skb);
1948 return NETDEV_TX_OK;
1949 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1950 ql_hw_csum_setup(skb,
1951 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1952 }
1953 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1954 tx_ring->prod_idx++;
1955 if (tx_ring->prod_idx == tx_ring->wq_len)
1956 tx_ring->prod_idx = 0;
1957 wmb();
1958
1959 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1960 ndev->trans_start = jiffies;
1961 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1962 tx_ring->prod_idx, skb->len);
1963
1964 atomic_dec(&tx_ring->tx_count);
1965 return NETDEV_TX_OK;
1966}
1967
1968static void ql_free_shadow_space(struct ql_adapter *qdev)
1969{
1970 if (qdev->rx_ring_shadow_reg_area) {
1971 pci_free_consistent(qdev->pdev,
1972 PAGE_SIZE,
1973 qdev->rx_ring_shadow_reg_area,
1974 qdev->rx_ring_shadow_reg_dma);
1975 qdev->rx_ring_shadow_reg_area = NULL;
1976 }
1977 if (qdev->tx_ring_shadow_reg_area) {
1978 pci_free_consistent(qdev->pdev,
1979 PAGE_SIZE,
1980 qdev->tx_ring_shadow_reg_area,
1981 qdev->tx_ring_shadow_reg_dma);
1982 qdev->tx_ring_shadow_reg_area = NULL;
1983 }
1984}
1985
1986static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1987{
1988 qdev->rx_ring_shadow_reg_area =
1989 pci_alloc_consistent(qdev->pdev,
1990 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1991 if (qdev->rx_ring_shadow_reg_area == NULL) {
1992 QPRINTK(qdev, IFUP, ERR,
1993 "Allocation of RX shadow space failed.\n");
1994 return -ENOMEM;
1995 }
1996 qdev->tx_ring_shadow_reg_area =
1997 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
1998 &qdev->tx_ring_shadow_reg_dma);
1999 if (qdev->tx_ring_shadow_reg_area == NULL) {
2000 QPRINTK(qdev, IFUP, ERR,
2001 "Allocation of TX shadow space failed.\n");
2002 goto err_wqp_sh_area;
2003 }
2004 return 0;
2005
2006err_wqp_sh_area:
2007 pci_free_consistent(qdev->pdev,
2008 PAGE_SIZE,
2009 qdev->rx_ring_shadow_reg_area,
2010 qdev->rx_ring_shadow_reg_dma);
2011 return -ENOMEM;
2012}
2013
2014static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2015{
2016 struct tx_ring_desc *tx_ring_desc;
2017 int i;
2018 struct ob_mac_iocb_req *mac_iocb_ptr;
2019
2020 mac_iocb_ptr = tx_ring->wq_base;
2021 tx_ring_desc = tx_ring->q;
2022 for (i = 0; i < tx_ring->wq_len; i++) {
2023 tx_ring_desc->index = i;
2024 tx_ring_desc->skb = NULL;
2025 tx_ring_desc->queue_entry = mac_iocb_ptr;
2026 mac_iocb_ptr++;
2027 tx_ring_desc++;
2028 }
2029 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2030 atomic_set(&tx_ring->queue_stopped, 0);
2031}
2032
2033static void ql_free_tx_resources(struct ql_adapter *qdev,
2034 struct tx_ring *tx_ring)
2035{
2036 if (tx_ring->wq_base) {
2037 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2038 tx_ring->wq_base, tx_ring->wq_base_dma);
2039 tx_ring->wq_base = NULL;
2040 }
2041 kfree(tx_ring->q);
2042 tx_ring->q = NULL;
2043}
2044
2045static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2046 struct tx_ring *tx_ring)
2047{
2048 tx_ring->wq_base =
2049 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2050 &tx_ring->wq_base_dma);
2051
2052 if ((tx_ring->wq_base == NULL)
2053 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2054 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2055 return -ENOMEM;
2056 }
2057 tx_ring->q =
2058 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2059 if (tx_ring->q == NULL)
2060 goto err;
2061
2062 return 0;
2063err:
2064 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2065 tx_ring->wq_base, tx_ring->wq_base_dma);
2066 return -ENOMEM;
2067}
2068
2069void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2070{
2071 int i;
2072 struct bq_desc *lbq_desc;
2073
2074 for (i = 0; i < rx_ring->lbq_len; i++) {
2075 lbq_desc = &rx_ring->lbq[i];
2076 if (lbq_desc->p.lbq_page) {
2077 pci_unmap_page(qdev->pdev,
2078 pci_unmap_addr(lbq_desc, mapaddr),
2079 pci_unmap_len(lbq_desc, maplen),
2080 PCI_DMA_FROMDEVICE);
2081
2082 put_page(lbq_desc->p.lbq_page);
2083 lbq_desc->p.lbq_page = NULL;
2084 }
2085 lbq_desc->bq->addr_lo = 0;
2086 lbq_desc->bq->addr_hi = 0;
2087 }
2088}
2089
2090/*
2091 * Allocate and map a page for each element of the lbq.
2092 */
2093static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2094 struct rx_ring *rx_ring)
2095{
2096 int i;
2097 struct bq_desc *lbq_desc;
2098 u64 map;
2099 struct bq_element *bq = rx_ring->lbq_base;
2100
2101 for (i = 0; i < rx_ring->lbq_len; i++) {
2102 lbq_desc = &rx_ring->lbq[i];
2103 memset(lbq_desc, 0, sizeof(lbq_desc));
2104 lbq_desc->bq = bq;
2105 lbq_desc->index = i;
2106 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2107 if (unlikely(!lbq_desc->p.lbq_page)) {
2108 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2109 goto mem_error;
2110 } else {
2111 map = pci_map_page(qdev->pdev,
2112 lbq_desc->p.lbq_page,
2113 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2114 if (pci_dma_mapping_error(qdev->pdev, map)) {
2115 QPRINTK(qdev, IFUP, ERR,
2116 "PCI mapping failed.\n");
2117 goto mem_error;
2118 }
2119 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2120 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2121 bq->addr_lo = cpu_to_le32(map);
2122 bq->addr_hi = cpu_to_le32(map >> 32);
2123 }
2124 bq++;
2125 }
2126 return 0;
2127mem_error:
2128 ql_free_lbq_buffers(qdev, rx_ring);
2129 return -ENOMEM;
2130}
2131
2132void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2133{
2134 int i;
2135 struct bq_desc *sbq_desc;
2136
2137 for (i = 0; i < rx_ring->sbq_len; i++) {
2138 sbq_desc = &rx_ring->sbq[i];
2139 if (sbq_desc == NULL) {
2140 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2141 return;
2142 }
2143 if (sbq_desc->p.skb) {
2144 pci_unmap_single(qdev->pdev,
2145 pci_unmap_addr(sbq_desc, mapaddr),
2146 pci_unmap_len(sbq_desc, maplen),
2147 PCI_DMA_FROMDEVICE);
2148 dev_kfree_skb(sbq_desc->p.skb);
2149 sbq_desc->p.skb = NULL;
2150 }
2151 if (sbq_desc->bq == NULL) {
2152 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2153 i);
2154 return;
2155 }
2156 sbq_desc->bq->addr_lo = 0;
2157 sbq_desc->bq->addr_hi = 0;
2158 }
2159}
2160
2161/* Allocate and map an skb for each element of the sbq. */
2162static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2163 struct rx_ring *rx_ring)
2164{
2165 int i;
2166 struct bq_desc *sbq_desc;
2167 struct sk_buff *skb;
2168 u64 map;
2169 struct bq_element *bq = rx_ring->sbq_base;
2170
2171 for (i = 0; i < rx_ring->sbq_len; i++) {
2172 sbq_desc = &rx_ring->sbq[i];
2173 memset(sbq_desc, 0, sizeof(sbq_desc));
2174 sbq_desc->index = i;
2175 sbq_desc->bq = bq;
2176 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2177 if (unlikely(!skb)) {
2178 /* Better luck next round */
2179 QPRINTK(qdev, IFUP, ERR,
2180 "small buff alloc failed for %d bytes at index %d.\n",
2181 rx_ring->sbq_buf_size, i);
2182 goto mem_err;
2183 }
2184 skb_reserve(skb, QLGE_SB_PAD);
2185 sbq_desc->p.skb = skb;
2186 /*
2187 * Map only half the buffer. Because the
2188 * other half may get some data copied to it
2189 * when the completion arrives.
2190 */
2191 map = pci_map_single(qdev->pdev,
2192 skb->data,
2193 rx_ring->sbq_buf_size / 2,
2194 PCI_DMA_FROMDEVICE);
2195 if (pci_dma_mapping_error(qdev->pdev, map)) {
2196 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2197 goto mem_err;
2198 }
2199 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2200 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2201 bq->addr_lo = /*sbq_desc->addr_lo = */
2202 cpu_to_le32(map);
2203 bq->addr_hi = /*sbq_desc->addr_hi = */
2204 cpu_to_le32(map >> 32);
2205 bq++;
2206 }
2207 return 0;
2208mem_err:
2209 ql_free_sbq_buffers(qdev, rx_ring);
2210 return -ENOMEM;
2211}
2212
2213static void ql_free_rx_resources(struct ql_adapter *qdev,
2214 struct rx_ring *rx_ring)
2215{
2216 if (rx_ring->sbq_len)
2217 ql_free_sbq_buffers(qdev, rx_ring);
2218 if (rx_ring->lbq_len)
2219 ql_free_lbq_buffers(qdev, rx_ring);
2220
2221 /* Free the small buffer queue. */
2222 if (rx_ring->sbq_base) {
2223 pci_free_consistent(qdev->pdev,
2224 rx_ring->sbq_size,
2225 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2226 rx_ring->sbq_base = NULL;
2227 }
2228
2229 /* Free the small buffer queue control blocks. */
2230 kfree(rx_ring->sbq);
2231 rx_ring->sbq = NULL;
2232
2233 /* Free the large buffer queue. */
2234 if (rx_ring->lbq_base) {
2235 pci_free_consistent(qdev->pdev,
2236 rx_ring->lbq_size,
2237 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2238 rx_ring->lbq_base = NULL;
2239 }
2240
2241 /* Free the large buffer queue control blocks. */
2242 kfree(rx_ring->lbq);
2243 rx_ring->lbq = NULL;
2244
2245 /* Free the rx queue. */
2246 if (rx_ring->cq_base) {
2247 pci_free_consistent(qdev->pdev,
2248 rx_ring->cq_size,
2249 rx_ring->cq_base, rx_ring->cq_base_dma);
2250 rx_ring->cq_base = NULL;
2251 }
2252}
2253
2254/* Allocate queues and buffers for this completions queue based
2255 * on the values in the parameter structure. */
2256static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2257 struct rx_ring *rx_ring)
2258{
2259
2260 /*
2261 * Allocate the completion queue for this rx_ring.
2262 */
2263 rx_ring->cq_base =
2264 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2265 &rx_ring->cq_base_dma);
2266
2267 if (rx_ring->cq_base == NULL) {
2268 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2269 return -ENOMEM;
2270 }
2271
2272 if (rx_ring->sbq_len) {
2273 /*
2274 * Allocate small buffer queue.
2275 */
2276 rx_ring->sbq_base =
2277 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2278 &rx_ring->sbq_base_dma);
2279
2280 if (rx_ring->sbq_base == NULL) {
2281 QPRINTK(qdev, IFUP, ERR,
2282 "Small buffer queue allocation failed.\n");
2283 goto err_mem;
2284 }
2285
2286 /*
2287 * Allocate small buffer queue control blocks.
2288 */
2289 rx_ring->sbq =
2290 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2291 GFP_KERNEL);
2292 if (rx_ring->sbq == NULL) {
2293 QPRINTK(qdev, IFUP, ERR,
2294 "Small buffer queue control block allocation failed.\n");
2295 goto err_mem;
2296 }
2297
2298 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2299 QPRINTK(qdev, IFUP, ERR,
2300 "Small buffer allocation failed.\n");
2301 goto err_mem;
2302 }
2303 }
2304
2305 if (rx_ring->lbq_len) {
2306 /*
2307 * Allocate large buffer queue.
2308 */
2309 rx_ring->lbq_base =
2310 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2311 &rx_ring->lbq_base_dma);
2312
2313 if (rx_ring->lbq_base == NULL) {
2314 QPRINTK(qdev, IFUP, ERR,
2315 "Large buffer queue allocation failed.\n");
2316 goto err_mem;
2317 }
2318 /*
2319 * Allocate large buffer queue control blocks.
2320 */
2321 rx_ring->lbq =
2322 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2323 GFP_KERNEL);
2324 if (rx_ring->lbq == NULL) {
2325 QPRINTK(qdev, IFUP, ERR,
2326 "Large buffer queue control block allocation failed.\n");
2327 goto err_mem;
2328 }
2329
2330 /*
2331 * Allocate the buffers.
2332 */
2333 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2334 QPRINTK(qdev, IFUP, ERR,
2335 "Large buffer allocation failed.\n");
2336 goto err_mem;
2337 }
2338 }
2339
2340 return 0;
2341
2342err_mem:
2343 ql_free_rx_resources(qdev, rx_ring);
2344 return -ENOMEM;
2345}
2346
2347static void ql_tx_ring_clean(struct ql_adapter *qdev)
2348{
2349 struct tx_ring *tx_ring;
2350 struct tx_ring_desc *tx_ring_desc;
2351 int i, j;
2352
2353 /*
2354 * Loop through all queues and free
2355 * any resources.
2356 */
2357 for (j = 0; j < qdev->tx_ring_count; j++) {
2358 tx_ring = &qdev->tx_ring[j];
2359 for (i = 0; i < tx_ring->wq_len; i++) {
2360 tx_ring_desc = &tx_ring->q[i];
2361 if (tx_ring_desc && tx_ring_desc->skb) {
2362 QPRINTK(qdev, IFDOWN, ERR,
2363 "Freeing lost SKB %p, from queue %d, index %d.\n",
2364 tx_ring_desc->skb, j,
2365 tx_ring_desc->index);
2366 ql_unmap_send(qdev, tx_ring_desc,
2367 tx_ring_desc->map_cnt);
2368 dev_kfree_skb(tx_ring_desc->skb);
2369 tx_ring_desc->skb = NULL;
2370 }
2371 }
2372 }
2373}
2374
2375static void ql_free_ring_cb(struct ql_adapter *qdev)
2376{
2377 kfree(qdev->ring_mem);
2378}
2379
2380static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2381{
2382 /* Allocate space for tx/rx ring control blocks. */
2383 qdev->ring_mem_size =
2384 (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2385 (qdev->rx_ring_count * sizeof(struct rx_ring));
2386 qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2387 if (qdev->ring_mem == NULL) {
2388 return -ENOMEM;
2389 } else {
2390 qdev->rx_ring = qdev->ring_mem;
2391 qdev->tx_ring = qdev->ring_mem +
2392 (qdev->rx_ring_count * sizeof(struct rx_ring));
2393 }
2394 return 0;
2395}
2396
2397static void ql_free_mem_resources(struct ql_adapter *qdev)
2398{
2399 int i;
2400
2401 for (i = 0; i < qdev->tx_ring_count; i++)
2402 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2403 for (i = 0; i < qdev->rx_ring_count; i++)
2404 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2405 ql_free_shadow_space(qdev);
2406}
2407
2408static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2409{
2410 int i;
2411
2412 /* Allocate space for our shadow registers and such. */
2413 if (ql_alloc_shadow_space(qdev))
2414 return -ENOMEM;
2415
2416 for (i = 0; i < qdev->rx_ring_count; i++) {
2417 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2418 QPRINTK(qdev, IFUP, ERR,
2419 "RX resource allocation failed.\n");
2420 goto err_mem;
2421 }
2422 }
2423 /* Allocate tx queue resources */
2424 for (i = 0; i < qdev->tx_ring_count; i++) {
2425 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2426 QPRINTK(qdev, IFUP, ERR,
2427 "TX resource allocation failed.\n");
2428 goto err_mem;
2429 }
2430 }
2431 return 0;
2432
2433err_mem:
2434 ql_free_mem_resources(qdev);
2435 return -ENOMEM;
2436}
2437
2438/* Set up the rx ring control block and pass it to the chip.
2439 * The control block is defined as
2440 * "Completion Queue Initialization Control Block", or cqicb.
2441 */
2442static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2443{
2444 struct cqicb *cqicb = &rx_ring->cqicb;
2445 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2446 (rx_ring->cq_id * sizeof(u64) * 4);
2447 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2448 (rx_ring->cq_id * sizeof(u64) * 4);
2449 void __iomem *doorbell_area =
2450 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2451 int err = 0;
2452 u16 bq_len;
2453
2454 /* Set up the shadow registers for this ring. */
2455 rx_ring->prod_idx_sh_reg = shadow_reg;
2456 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2457 shadow_reg += sizeof(u64);
2458 shadow_reg_dma += sizeof(u64);
2459 rx_ring->lbq_base_indirect = shadow_reg;
2460 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2461 shadow_reg += sizeof(u64);
2462 shadow_reg_dma += sizeof(u64);
2463 rx_ring->sbq_base_indirect = shadow_reg;
2464 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2465
2466 /* PCI doorbell mem area + 0x00 for consumer index register */
2467 rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
2468 rx_ring->cnsmr_idx = 0;
2469 rx_ring->curr_entry = rx_ring->cq_base;
2470
2471 /* PCI doorbell mem area + 0x04 for valid register */
2472 rx_ring->valid_db_reg = doorbell_area + 0x04;
2473
2474 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2475 rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
2476
2477 /* PCI doorbell mem area + 0x1c */
2478 rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
2479
2480 memset((void *)cqicb, 0, sizeof(struct cqicb));
2481 cqicb->msix_vect = rx_ring->irq;
2482
2483 cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
2484
2485 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2486 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2487
2488 cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
2489 cqicb->prod_idx_addr_hi =
2490 cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2491
2492 /*
2493 * Set up the control block load flags.
2494 */
2495 cqicb->flags = FLAGS_LC | /* Load queue base address */
2496 FLAGS_LV | /* Load MSI-X vector */
2497 FLAGS_LI; /* Load irq delay values */
2498 if (rx_ring->lbq_len) {
2499 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2500 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2501 cqicb->lbq_addr_lo =
2502 cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2503 cqicb->lbq_addr_hi =
2504 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2505 cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
2506 bq_len = (u16) rx_ring->lbq_len;
2507 cqicb->lbq_len = cpu_to_le16(bq_len);
2508 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2509 rx_ring->lbq_curr_idx = 0;
2510 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2511 rx_ring->lbq_free_cnt = 16;
2512 }
2513 if (rx_ring->sbq_len) {
2514 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2515 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2516 cqicb->sbq_addr_lo =
2517 cpu_to_le32(rx_ring->sbq_base_indirect_dma);
2518 cqicb->sbq_addr_hi =
2519 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2520 cqicb->sbq_buf_size =
2521 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2522 bq_len = (u16) rx_ring->sbq_len;
2523 cqicb->sbq_len = cpu_to_le16(bq_len);
2524 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2525 rx_ring->sbq_curr_idx = 0;
2526 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2527 rx_ring->sbq_free_cnt = 16;
2528 }
2529 switch (rx_ring->type) {
2530 case TX_Q:
2531 /* If there's only one interrupt, then we use
2532 * worker threads to process the outbound
2533 * completion handling rx_rings. We do this so
2534 * they can be run on multiple CPUs. There is
2535 * room to play with this more where we would only
2536 * run in a worker if there are more than x number
2537 * of outbound completions on the queue and more
2538 * than one queue active. Some threshold that
2539 * would indicate a benefit in spite of the cost
2540 * of a context switch.
2541 * If there's more than one interrupt, then the
2542 * outbound completions are processed in the ISR.
2543 */
2544 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2545 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2546 else {
2547 /* With all debug warnings on we see a WARN_ON message
2548 * when we free the skb in the interrupt context.
2549 */
2550 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2551 }
2552 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2553 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2554 break;
2555 case DEFAULT_Q:
2556 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2557 cqicb->irq_delay = 0;
2558 cqicb->pkt_delay = 0;
2559 break;
2560 case RX_Q:
2561 /* Inbound completion handling rx_rings run in
2562 * separate NAPI contexts.
2563 */
2564 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2565 64);
2566 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2567 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2568 break;
2569 default:
2570 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2571 rx_ring->type);
2572 }
2573 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2574 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2575 CFG_LCQ, rx_ring->cq_id);
2576 if (err) {
2577 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2578 return err;
2579 }
2580 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2581 /*
2582 * Advance the producer index for the buffer queues.
2583 */
2584 wmb();
2585 if (rx_ring->lbq_len)
2586 ql_write_db_reg(rx_ring->lbq_prod_idx,
2587 rx_ring->lbq_prod_idx_db_reg);
2588 if (rx_ring->sbq_len)
2589 ql_write_db_reg(rx_ring->sbq_prod_idx,
2590 rx_ring->sbq_prod_idx_db_reg);
2591 return err;
2592}
2593
2594static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2595{
2596 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2597 void __iomem *doorbell_area =
2598 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2599 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2600 (tx_ring->wq_id * sizeof(u64));
2601 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2602 (tx_ring->wq_id * sizeof(u64));
2603 int err = 0;
2604
2605 /*
2606 * Assign doorbell registers for this tx_ring.
2607 */
2608 /* TX PCI doorbell mem area for tx producer index */
2609 tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
2610 tx_ring->prod_idx = 0;
2611 /* TX PCI doorbell mem area + 0x04 */
2612 tx_ring->valid_db_reg = doorbell_area + 0x04;
2613
2614 /*
2615 * Assign shadow registers for this tx_ring.
2616 */
2617 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2618 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2619
2620 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2621 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2622 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2623 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2624 wqicb->rid = 0;
2625 wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
2626 wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2627
2628 wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
2629 wqicb->cnsmr_idx_addr_hi =
2630 cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2631
2632 ql_init_tx_ring(qdev, tx_ring);
2633
2634 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2635 (u16) tx_ring->wq_id);
2636 if (err) {
2637 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2638 return err;
2639 }
2640 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2641 return err;
2642}
2643
2644static void ql_disable_msix(struct ql_adapter *qdev)
2645{
2646 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2647 pci_disable_msix(qdev->pdev);
2648 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2649 kfree(qdev->msi_x_entry);
2650 qdev->msi_x_entry = NULL;
2651 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2652 pci_disable_msi(qdev->pdev);
2653 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2654 }
2655}
2656
2657static void ql_enable_msix(struct ql_adapter *qdev)
2658{
2659 int i;
2660
2661 qdev->intr_count = 1;
2662 /* Get the MSIX vectors. */
2663 if (irq_type == MSIX_IRQ) {
2664 /* Try to alloc space for the msix struct,
2665 * if it fails then go to MSI/legacy.
2666 */
2667 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2668 sizeof(struct msix_entry),
2669 GFP_KERNEL);
2670 if (!qdev->msi_x_entry) {
2671 irq_type = MSI_IRQ;
2672 goto msi;
2673 }
2674
2675 for (i = 0; i < qdev->rx_ring_count; i++)
2676 qdev->msi_x_entry[i].entry = i;
2677
2678 if (!pci_enable_msix
2679 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2680 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2681 qdev->intr_count = qdev->rx_ring_count;
2682 QPRINTK(qdev, IFUP, INFO,
2683 "MSI-X Enabled, got %d vectors.\n",
2684 qdev->intr_count);
2685 return;
2686 } else {
2687 kfree(qdev->msi_x_entry);
2688 qdev->msi_x_entry = NULL;
2689 QPRINTK(qdev, IFUP, WARNING,
2690 "MSI-X Enable failed, trying MSI.\n");
2691 irq_type = MSI_IRQ;
2692 }
2693 }
2694msi:
2695 if (irq_type == MSI_IRQ) {
2696 if (!pci_enable_msi(qdev->pdev)) {
2697 set_bit(QL_MSI_ENABLED, &qdev->flags);
2698 QPRINTK(qdev, IFUP, INFO,
2699 "Running with MSI interrupts.\n");
2700 return;
2701 }
2702 }
2703 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707}
2708
2709/*
2710 * Here we build the intr_context structures based on
2711 * our rx_ring count and intr vector count.
2712 * The intr_context structure is used to hook each vector
2713 * to possibly different handlers.
2714 */
2715static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2716{
2717 int i = 0;
2718 struct intr_context *intr_context = &qdev->intr_context[0];
2719
2720 ql_enable_msix(qdev);
2721
2722 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2723 /* Each rx_ring has it's
2724 * own intr_context since we have separate
2725 * vectors for each queue.
2726 * This only true when MSI-X is enabled.
2727 */
2728 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2729 qdev->rx_ring[i].irq = i;
2730 intr_context->intr = i;
2731 intr_context->qdev = qdev;
2732 /*
2733 * We set up each vectors enable/disable/read bits so
2734 * there's no bit/mask calculations in the critical path.
2735 */
2736 intr_context->intr_en_mask =
2737 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2738 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2739 | i;
2740 intr_context->intr_dis_mask =
2741 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2742 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2743 INTR_EN_IHD | i;
2744 intr_context->intr_read_mask =
2745 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2746 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2747 i;
2748
2749 if (i == 0) {
2750 /*
2751 * Default queue handles bcast/mcast plus
2752 * async events. Needs buffers.
2753 */
2754 intr_context->handler = qlge_isr;
2755 sprintf(intr_context->name, "%s-default-queue",
2756 qdev->ndev->name);
2757 } else if (i < qdev->rss_ring_first_cq_id) {
2758 /*
2759 * Outbound queue is for outbound completions only.
2760 */
2761 intr_context->handler = qlge_msix_tx_isr;
2762 sprintf(intr_context->name, "%s-txq-%d",
2763 qdev->ndev->name, i);
2764 } else {
2765 /*
2766 * Inbound queues handle unicast frames only.
2767 */
2768 intr_context->handler = qlge_msix_rx_isr;
2769 sprintf(intr_context->name, "%s-rxq-%d",
2770 qdev->ndev->name, i);
2771 }
2772 }
2773 } else {
2774 /*
2775 * All rx_rings use the same intr_context since
2776 * there is only one vector.
2777 */
2778 intr_context->intr = 0;
2779 intr_context->qdev = qdev;
2780 /*
2781 * We set up each vectors enable/disable/read bits so
2782 * there's no bit/mask calculations in the critical path.
2783 */
2784 intr_context->intr_en_mask =
2785 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2786 intr_context->intr_dis_mask =
2787 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2788 INTR_EN_TYPE_DISABLE;
2789 intr_context->intr_read_mask =
2790 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2791 /*
2792 * Single interrupt means one handler for all rings.
2793 */
2794 intr_context->handler = qlge_isr;
2795 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2796 for (i = 0; i < qdev->rx_ring_count; i++)
2797 qdev->rx_ring[i].irq = 0;
2798 }
2799}
2800
2801static void ql_free_irq(struct ql_adapter *qdev)
2802{
2803 int i;
2804 struct intr_context *intr_context = &qdev->intr_context[0];
2805
2806 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2807 if (intr_context->hooked) {
2808 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2809 free_irq(qdev->msi_x_entry[i].vector,
2810 &qdev->rx_ring[i]);
2811 QPRINTK(qdev, IFDOWN, ERR,
2812 "freeing msix interrupt %d.\n", i);
2813 } else {
2814 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2815 QPRINTK(qdev, IFDOWN, ERR,
2816 "freeing msi interrupt %d.\n", i);
2817 }
2818 }
2819 }
2820 ql_disable_msix(qdev);
2821}
2822
2823static int ql_request_irq(struct ql_adapter *qdev)
2824{
2825 int i;
2826 int status = 0;
2827 struct pci_dev *pdev = qdev->pdev;
2828 struct intr_context *intr_context = &qdev->intr_context[0];
2829
2830 ql_resolve_queues_to_irqs(qdev);
2831
2832 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2833 atomic_set(&intr_context->irq_cnt, 0);
2834 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2835 status = request_irq(qdev->msi_x_entry[i].vector,
2836 intr_context->handler,
2837 0,
2838 intr_context->name,
2839 &qdev->rx_ring[i]);
2840 if (status) {
2841 QPRINTK(qdev, IFUP, ERR,
2842 "Failed request for MSIX interrupt %d.\n",
2843 i);
2844 goto err_irq;
2845 } else {
2846 QPRINTK(qdev, IFUP, INFO,
2847 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2848 i,
2849 qdev->rx_ring[i].type ==
2850 DEFAULT_Q ? "DEFAULT_Q" : "",
2851 qdev->rx_ring[i].type ==
2852 TX_Q ? "TX_Q" : "",
2853 qdev->rx_ring[i].type ==
2854 RX_Q ? "RX_Q" : "", intr_context->name);
2855 }
2856 } else {
2857 QPRINTK(qdev, IFUP, DEBUG,
2858 "trying msi or legacy interrupts.\n");
2859 QPRINTK(qdev, IFUP, DEBUG,
2860 "%s: irq = %d.\n", __func__, pdev->irq);
2861 QPRINTK(qdev, IFUP, DEBUG,
2862 "%s: context->name = %s.\n", __func__,
2863 intr_context->name);
2864 QPRINTK(qdev, IFUP, DEBUG,
2865 "%s: dev_id = 0x%p.\n", __func__,
2866 &qdev->rx_ring[0]);
2867 status =
2868 request_irq(pdev->irq, qlge_isr,
2869 test_bit(QL_MSI_ENABLED,
2870 &qdev->
2871 flags) ? 0 : IRQF_SHARED,
2872 intr_context->name, &qdev->rx_ring[0]);
2873 if (status)
2874 goto err_irq;
2875
2876 QPRINTK(qdev, IFUP, ERR,
2877 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2878 i,
2879 qdev->rx_ring[0].type ==
2880 DEFAULT_Q ? "DEFAULT_Q" : "",
2881 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2882 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2883 intr_context->name);
2884 }
2885 intr_context->hooked = 1;
2886 }
2887 return status;
2888err_irq:
2889 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2890 ql_free_irq(qdev);
2891 return status;
2892}
2893
2894static int ql_start_rss(struct ql_adapter *qdev)
2895{
2896 struct ricb *ricb = &qdev->ricb;
2897 int status = 0;
2898 int i;
2899 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2900
2901 memset((void *)ricb, 0, sizeof(ricb));
2902
2903 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2904 ricb->flags =
2905 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2906 RSS_RT6);
2907 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2908
2909 /*
2910 * Fill out the Indirection Table.
2911 */
2912 for (i = 0; i < 32; i++)
2913 hash_id[i] = i & 1;
2914
2915 /*
2916 * Random values for the IPv6 and IPv4 Hash Keys.
2917 */
2918 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2919 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2920
2921 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2922
2923 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2924 if (status) {
2925 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2926 return status;
2927 }
2928 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2929 return status;
2930}
2931
2932/* Initialize the frame-to-queue routing. */
2933static int ql_route_initialize(struct ql_adapter *qdev)
2934{
2935 int status = 0;
2936 int i;
2937
2938 /* Clear all the entries in the routing table. */
2939 for (i = 0; i < 16; i++) {
2940 status = ql_set_routing_reg(qdev, i, 0, 0);
2941 if (status) {
2942 QPRINTK(qdev, IFUP, ERR,
2943 "Failed to init routing register for CAM packets.\n");
2944 return status;
2945 }
2946 }
2947
2948 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2949 if (status) {
2950 QPRINTK(qdev, IFUP, ERR,
2951 "Failed to init routing register for error packets.\n");
2952 return status;
2953 }
2954 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2955 if (status) {
2956 QPRINTK(qdev, IFUP, ERR,
2957 "Failed to init routing register for broadcast packets.\n");
2958 return status;
2959 }
2960 /* If we have more than one inbound queue, then turn on RSS in the
2961 * routing block.
2962 */
2963 if (qdev->rss_ring_count > 1) {
2964 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2965 RT_IDX_RSS_MATCH, 1);
2966 if (status) {
2967 QPRINTK(qdev, IFUP, ERR,
2968 "Failed to init routing register for MATCH RSS packets.\n");
2969 return status;
2970 }
2971 }
2972
2973 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2974 RT_IDX_CAM_HIT, 1);
2975 if (status) {
2976 QPRINTK(qdev, IFUP, ERR,
2977 "Failed to init routing register for CAM packets.\n");
2978 return status;
2979 }
2980 return status;
2981}
2982
2983static int ql_adapter_initialize(struct ql_adapter *qdev)
2984{
2985 u32 value, mask;
2986 int i;
2987 int status = 0;
2988
2989 /*
2990 * Set up the System register to halt on errors.
2991 */
2992 value = SYS_EFE | SYS_FAE;
2993 mask = value << 16;
2994 ql_write32(qdev, SYS, mask | value);
2995
2996 /* Set the default queue. */
2997 value = NIC_RCV_CFG_DFQ;
2998 mask = NIC_RCV_CFG_DFQ_MASK;
2999 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3000
3001 /* Set the MPI interrupt to enabled. */
3002 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3003
3004 /* Enable the function, set pagesize, enable error checking. */
3005 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3006 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3007
3008 /* Set/clear header splitting. */
3009 mask = FSC_VM_PAGESIZE_MASK |
3010 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3011 ql_write32(qdev, FSC, mask | value);
3012
3013 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3014 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3015
3016 /* Start up the rx queues. */
3017 for (i = 0; i < qdev->rx_ring_count; i++) {
3018 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3019 if (status) {
3020 QPRINTK(qdev, IFUP, ERR,
3021 "Failed to start rx ring[%d].\n", i);
3022 return status;
3023 }
3024 }
3025
3026 /* If there is more than one inbound completion queue
3027 * then download a RICB to configure RSS.
3028 */
3029 if (qdev->rss_ring_count > 1) {
3030 status = ql_start_rss(qdev);
3031 if (status) {
3032 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3033 return status;
3034 }
3035 }
3036
3037 /* Start up the tx queues. */
3038 for (i = 0; i < qdev->tx_ring_count; i++) {
3039 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3040 if (status) {
3041 QPRINTK(qdev, IFUP, ERR,
3042 "Failed to start tx ring[%d].\n", i);
3043 return status;
3044 }
3045 }
3046
3047 status = ql_port_initialize(qdev);
3048 if (status) {
3049 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3050 return status;
3051 }
3052
3053 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3054 MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3055 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3057 return status;
3058 }
3059
3060 status = ql_route_initialize(qdev);
3061 if (status) {
3062 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3063 return status;
3064 }
3065
3066 /* Start NAPI for the RSS queues. */
3067 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3068 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3069 i);
3070 napi_enable(&qdev->rx_ring[i].napi);
3071 }
3072
3073 return status;
3074}
3075
3076/* Issue soft reset to chip. */
3077static int ql_adapter_reset(struct ql_adapter *qdev)
3078{
3079 u32 value;
3080 int max_wait_time;
3081 int status = 0;
3082 int resetCnt = 0;
3083
3084#define MAX_RESET_CNT 1
3085issueReset:
3086 resetCnt++;
3087 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3088 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3089 /* Wait for reset to complete. */
3090 max_wait_time = 3;
3091 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3092 max_wait_time);
3093 do {
3094 value = ql_read32(qdev, RST_FO);
3095 if ((value & RST_FO_FR) == 0)
3096 break;
3097
3098 ssleep(1);
3099 } while ((--max_wait_time));
3100 if (value & RST_FO_FR) {
3101 QPRINTK(qdev, IFDOWN, ERR,
3102 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3103 if (resetCnt < MAX_RESET_CNT)
3104 goto issueReset;
3105 }
3106 if (max_wait_time == 0) {
3107 status = -ETIMEDOUT;
3108 QPRINTK(qdev, IFDOWN, ERR,
3109 "ETIMEOUT!!! errored out of resetting the chip!\n");
3110 }
3111
3112 return status;
3113}
3114
3115static void ql_display_dev_info(struct net_device *ndev)
3116{
3117 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3118
3119 QPRINTK(qdev, PROBE, INFO,
3120 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3121 "XG Roll = %d, XG Rev = %d.\n",
3122 qdev->func,
3123 qdev->chip_rev_id & 0x0000000f,
3124 qdev->chip_rev_id >> 4 & 0x0000000f,
3125 qdev->chip_rev_id >> 8 & 0x0000000f,
3126 qdev->chip_rev_id >> 12 & 0x0000000f);
3127 QPRINTK(qdev, PROBE, INFO,
3128 "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3129 ndev->dev_addr[0], ndev->dev_addr[1],
3130 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3131 ndev->dev_addr[5]);
3132}
3133
3134static int ql_adapter_down(struct ql_adapter *qdev)
3135{
3136 struct net_device *ndev = qdev->ndev;
3137 int i, status = 0;
3138 struct rx_ring *rx_ring;
3139
3140 netif_stop_queue(ndev);
3141 netif_carrier_off(ndev);
3142
3143 cancel_delayed_work_sync(&qdev->asic_reset_work);
3144 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3145 cancel_delayed_work_sync(&qdev->mpi_work);
3146
3147 /* The default queue at index 0 is always processed in
3148 * a workqueue.
3149 */
3150 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3151
3152 /* The rest of the rx_rings are processed in
3153 * a workqueue only if it's a single interrupt
3154 * environment (MSI/Legacy).
3155 */
3156 for (i = 1; i > qdev->rx_ring_count; i++) {
3157 rx_ring = &qdev->rx_ring[i];
3158 /* Only the RSS rings use NAPI on multi irq
3159 * environment. Outbound completion processing
3160 * is done in interrupt context.
3161 */
3162 if (i >= qdev->rss_ring_first_cq_id) {
3163 napi_disable(&rx_ring->napi);
3164 } else {
3165 cancel_delayed_work_sync(&rx_ring->rx_work);
3166 }
3167 }
3168
3169 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3170
3171 ql_disable_interrupts(qdev);
3172
3173 ql_tx_ring_clean(qdev);
3174
3175 spin_lock(&qdev->hw_lock);
3176 status = ql_adapter_reset(qdev);
3177 if (status)
3178 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3179 qdev->func);
3180 spin_unlock(&qdev->hw_lock);
3181 return status;
3182}
3183
3184static int ql_adapter_up(struct ql_adapter *qdev)
3185{
3186 int err = 0;
3187
3188 spin_lock(&qdev->hw_lock);
3189 err = ql_adapter_initialize(qdev);
3190 if (err) {
3191 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3192 spin_unlock(&qdev->hw_lock);
3193 goto err_init;
3194 }
3195 spin_unlock(&qdev->hw_lock);
3196 set_bit(QL_ADAPTER_UP, &qdev->flags);
3197 ql_enable_interrupts(qdev);
3198 ql_enable_all_completion_interrupts(qdev);
3199 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3200 netif_carrier_on(qdev->ndev);
3201 netif_start_queue(qdev->ndev);
3202 }
3203
3204 return 0;
3205err_init:
3206 ql_adapter_reset(qdev);
3207 return err;
3208}
3209
3210static int ql_cycle_adapter(struct ql_adapter *qdev)
3211{
3212 int status;
3213
3214 status = ql_adapter_down(qdev);
3215 if (status)
3216 goto error;
3217
3218 status = ql_adapter_up(qdev);
3219 if (status)
3220 goto error;
3221
3222 return status;
3223error:
3224 QPRINTK(qdev, IFUP, ALERT,
3225 "Driver up/down cycle failed, closing device\n");
3226 rtnl_lock();
3227 dev_close(qdev->ndev);
3228 rtnl_unlock();
3229 return status;
3230}
3231
3232static void ql_release_adapter_resources(struct ql_adapter *qdev)
3233{
3234 ql_free_mem_resources(qdev);
3235 ql_free_irq(qdev);
3236}
3237
3238static int ql_get_adapter_resources(struct ql_adapter *qdev)
3239{
3240 int status = 0;
3241
3242 if (ql_alloc_mem_resources(qdev)) {
3243 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3244 return -ENOMEM;
3245 }
3246 status = ql_request_irq(qdev);
3247 if (status)
3248 goto err_irq;
3249 return status;
3250err_irq:
3251 ql_free_mem_resources(qdev);
3252 return status;
3253}
3254
3255static int qlge_close(struct net_device *ndev)
3256{
3257 struct ql_adapter *qdev = netdev_priv(ndev);
3258
3259 /*
3260 * Wait for device to recover from a reset.
3261 * (Rarely happens, but possible.)
3262 */
3263 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3264 msleep(1);
3265 ql_adapter_down(qdev);
3266 ql_release_adapter_resources(qdev);
3267 ql_free_ring_cb(qdev);
3268 return 0;
3269}
3270
3271static int ql_configure_rings(struct ql_adapter *qdev)
3272{
3273 int i;
3274 struct rx_ring *rx_ring;
3275 struct tx_ring *tx_ring;
3276 int cpu_cnt = num_online_cpus();
3277
3278 /*
3279 * For each processor present we allocate one
3280 * rx_ring for outbound completions, and one
3281 * rx_ring for inbound completions. Plus there is
3282 * always the one default queue. For the CPU
3283 * counts we end up with the following rx_rings:
3284 * rx_ring count =
3285 * one default queue +
3286 * (CPU count * outbound completion rx_ring) +
3287 * (CPU count * inbound (RSS) completion rx_ring)
3288 * To keep it simple we limit the total number of
3289 * queues to < 32, so we truncate CPU to 8.
3290 * This limitation can be removed when requested.
3291 */
3292
3293 if (cpu_cnt > 8)
3294 cpu_cnt = 8;
3295
3296 /*
3297 * rx_ring[0] is always the default queue.
3298 */
3299 /* Allocate outbound completion ring for each CPU. */
3300 qdev->tx_ring_count = cpu_cnt;
3301 /* Allocate inbound completion (RSS) ring for each CPU. */
3302 qdev->rss_ring_count = cpu_cnt;
3303 /* cq_id for the first inbound ring handler. */
3304 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3305 /*
3306 * qdev->rx_ring_count:
3307 * Total number of rx_rings. This includes the one
3308 * default queue, a number of outbound completion
3309 * handler rx_rings, and the number of inbound
3310 * completion handler rx_rings.
3311 */
3312 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3313
3314 if (ql_alloc_ring_cb(qdev))
3315 return -ENOMEM;
3316
3317 for (i = 0; i < qdev->tx_ring_count; i++) {
3318 tx_ring = &qdev->tx_ring[i];
3319 memset((void *)tx_ring, 0, sizeof(tx_ring));
3320 tx_ring->qdev = qdev;
3321 tx_ring->wq_id = i;
3322 tx_ring->wq_len = qdev->tx_ring_size;
3323 tx_ring->wq_size =
3324 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3325
3326 /*
3327 * The completion queue ID for the tx rings start
3328 * immediately after the default Q ID, which is zero.
3329 */
3330 tx_ring->cq_id = i + 1;
3331 }
3332
3333 for (i = 0; i < qdev->rx_ring_count; i++) {
3334 rx_ring = &qdev->rx_ring[i];
3335 memset((void *)rx_ring, 0, sizeof(rx_ring));
3336 rx_ring->qdev = qdev;
3337 rx_ring->cq_id = i;
3338 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3339 if (i == 0) { /* Default queue at index 0. */
3340 /*
3341 * Default queue handles bcast/mcast plus
3342 * async events. Needs buffers.
3343 */
3344 rx_ring->cq_len = qdev->rx_ring_size;
3345 rx_ring->cq_size =
3346 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3347 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3348 rx_ring->lbq_size =
3349 rx_ring->lbq_len * sizeof(struct bq_element);
3350 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3351 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3352 rx_ring->sbq_size =
3353 rx_ring->sbq_len * sizeof(struct bq_element);
3354 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3355 rx_ring->type = DEFAULT_Q;
3356 } else if (i < qdev->rss_ring_first_cq_id) {
3357 /*
3358 * Outbound queue handles outbound completions only.
3359 */
3360 /* outbound cq is same size as tx_ring it services. */
3361 rx_ring->cq_len = qdev->tx_ring_size;
3362 rx_ring->cq_size =
3363 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3364 rx_ring->lbq_len = 0;
3365 rx_ring->lbq_size = 0;
3366 rx_ring->lbq_buf_size = 0;
3367 rx_ring->sbq_len = 0;
3368 rx_ring->sbq_size = 0;
3369 rx_ring->sbq_buf_size = 0;
3370 rx_ring->type = TX_Q;
3371 } else { /* Inbound completions (RSS) queues */
3372 /*
3373 * Inbound queues handle unicast frames only.
3374 */
3375 rx_ring->cq_len = qdev->rx_ring_size;
3376 rx_ring->cq_size =
3377 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3378 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3379 rx_ring->lbq_size =
3380 rx_ring->lbq_len * sizeof(struct bq_element);
3381 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3382 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3383 rx_ring->sbq_size =
3384 rx_ring->sbq_len * sizeof(struct bq_element);
3385 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3386 rx_ring->type = RX_Q;
3387 }
3388 }
3389 return 0;
3390}
3391
3392static int qlge_open(struct net_device *ndev)
3393{
3394 int err = 0;
3395 struct ql_adapter *qdev = netdev_priv(ndev);
3396
3397 err = ql_configure_rings(qdev);
3398 if (err)
3399 return err;
3400
3401 err = ql_get_adapter_resources(qdev);
3402 if (err)
3403 goto error_up;
3404
3405 err = ql_adapter_up(qdev);
3406 if (err)
3407 goto error_up;
3408
3409 return err;
3410
3411error_up:
3412 ql_release_adapter_resources(qdev);
3413 ql_free_ring_cb(qdev);
3414 return err;
3415}
3416
3417static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3418{
3419 struct ql_adapter *qdev = netdev_priv(ndev);
3420
3421 if (ndev->mtu == 1500 && new_mtu == 9000) {
3422 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3423 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3424 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3425 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3426 (ndev->mtu == 9000 && new_mtu == 9000)) {
3427 return 0;
3428 } else
3429 return -EINVAL;
3430 ndev->mtu = new_mtu;
3431 return 0;
3432}
3433
3434static struct net_device_stats *qlge_get_stats(struct net_device
3435 *ndev)
3436{
3437 struct ql_adapter *qdev = netdev_priv(ndev);
3438 return &qdev->stats;
3439}
3440
3441static void qlge_set_multicast_list(struct net_device *ndev)
3442{
3443 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3444 struct dev_mc_list *mc_ptr;
3445 int i;
3446
3447 spin_lock(&qdev->hw_lock);
3448 /*
3449 * Set or clear promiscuous mode if a
3450 * transition is taking place.
3451 */
3452 if (ndev->flags & IFF_PROMISC) {
3453 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3454 if (ql_set_routing_reg
3455 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3456 QPRINTK(qdev, HW, ERR,
3457 "Failed to set promiscous mode.\n");
3458 } else {
3459 set_bit(QL_PROMISCUOUS, &qdev->flags);
3460 }
3461 }
3462 } else {
3463 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3464 if (ql_set_routing_reg
3465 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3466 QPRINTK(qdev, HW, ERR,
3467 "Failed to clear promiscous mode.\n");
3468 } else {
3469 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3470 }
3471 }
3472 }
3473
3474 /*
3475 * Set or clear all multicast mode if a
3476 * transition is taking place.
3477 */
3478 if ((ndev->flags & IFF_ALLMULTI) ||
3479 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3480 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3481 if (ql_set_routing_reg
3482 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3483 QPRINTK(qdev, HW, ERR,
3484 "Failed to set all-multi mode.\n");
3485 } else {
3486 set_bit(QL_ALLMULTI, &qdev->flags);
3487 }
3488 }
3489 } else {
3490 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3491 if (ql_set_routing_reg
3492 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3493 QPRINTK(qdev, HW, ERR,
3494 "Failed to clear all-multi mode.\n");
3495 } else {
3496 clear_bit(QL_ALLMULTI, &qdev->flags);
3497 }
3498 }
3499 }
3500
3501 if (ndev->mc_count) {
3502 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3503 i++, mc_ptr = mc_ptr->next)
3504 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3505 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3506 QPRINTK(qdev, HW, ERR,
3507 "Failed to loadmulticast address.\n");
3508 goto exit;
3509 }
3510 if (ql_set_routing_reg
3511 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3512 QPRINTK(qdev, HW, ERR,
3513 "Failed to set multicast match mode.\n");
3514 } else {
3515 set_bit(QL_ALLMULTI, &qdev->flags);
3516 }
3517 }
3518exit:
3519 spin_unlock(&qdev->hw_lock);
3520}
3521
3522static int qlge_set_mac_address(struct net_device *ndev, void *p)
3523{
3524 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3525 struct sockaddr *addr = p;
3526
3527 if (netif_running(ndev))
3528 return -EBUSY;
3529
3530 if (!is_valid_ether_addr(addr->sa_data))
3531 return -EADDRNOTAVAIL;
3532 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3533
3534 spin_lock(&qdev->hw_lock);
3535 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3536 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3537 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3538 return -1;
3539 }
3540 spin_unlock(&qdev->hw_lock);
3541
3542 return 0;
3543}
3544
3545static void qlge_tx_timeout(struct net_device *ndev)
3546{
3547 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3548 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3549}
3550
3551static void ql_asic_reset_work(struct work_struct *work)
3552{
3553 struct ql_adapter *qdev =
3554 container_of(work, struct ql_adapter, asic_reset_work.work);
3555 ql_cycle_adapter(qdev);
3556}
3557
3558static void ql_get_board_info(struct ql_adapter *qdev)
3559{
3560 qdev->func =
3561 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3562 if (qdev->func) {
3563 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3564 qdev->port_link_up = STS_PL1;
3565 qdev->port_init = STS_PI1;
3566 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3567 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3568 } else {
3569 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3570 qdev->port_link_up = STS_PL0;
3571 qdev->port_init = STS_PI0;
3572 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3573 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3574 }
3575 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3576}
3577
3578static void ql_release_all(struct pci_dev *pdev)
3579{
3580 struct net_device *ndev = pci_get_drvdata(pdev);
3581 struct ql_adapter *qdev = netdev_priv(ndev);
3582
3583 if (qdev->workqueue) {
3584 destroy_workqueue(qdev->workqueue);
3585 qdev->workqueue = NULL;
3586 }
3587 if (qdev->q_workqueue) {
3588 destroy_workqueue(qdev->q_workqueue);
3589 qdev->q_workqueue = NULL;
3590 }
3591 if (qdev->reg_base)
3592 iounmap((void *)qdev->reg_base);
3593 if (qdev->doorbell_area)
3594 iounmap(qdev->doorbell_area);
3595 pci_release_regions(pdev);
3596 pci_set_drvdata(pdev, NULL);
3597}
3598
3599static int __devinit ql_init_device(struct pci_dev *pdev,
3600 struct net_device *ndev, int cards_found)
3601{
3602 struct ql_adapter *qdev = netdev_priv(ndev);
3603 int pos, err = 0;
3604 u16 val16;
3605
3606 memset((void *)qdev, 0, sizeof(qdev));
3607 err = pci_enable_device(pdev);
3608 if (err) {
3609 dev_err(&pdev->dev, "PCI device enable failed.\n");
3610 return err;
3611 }
3612
3613 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3614 if (pos <= 0) {
3615 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3616 "aborting.\n");
3617 goto err_out;
3618 } else {
3619 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3620 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3621 val16 |= (PCI_EXP_DEVCTL_CERE |
3622 PCI_EXP_DEVCTL_NFERE |
3623 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3624 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3625 }
3626
3627 err = pci_request_regions(pdev, DRV_NAME);
3628 if (err) {
3629 dev_err(&pdev->dev, "PCI region request failed.\n");
3630 goto err_out;
3631 }
3632
3633 pci_set_master(pdev);
3634 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3635 set_bit(QL_DMA64, &qdev->flags);
3636 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3637 } else {
3638 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3639 if (!err)
3640 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3641 }
3642
3643 if (err) {
3644 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3645 goto err_out;
3646 }
3647
3648 pci_set_drvdata(pdev, ndev);
3649 qdev->reg_base =
3650 ioremap_nocache(pci_resource_start(pdev, 1),
3651 pci_resource_len(pdev, 1));
3652 if (!qdev->reg_base) {
3653 dev_err(&pdev->dev, "Register mapping failed.\n");
3654 err = -ENOMEM;
3655 goto err_out;
3656 }
3657
3658 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3659 qdev->doorbell_area =
3660 ioremap_nocache(pci_resource_start(pdev, 3),
3661 pci_resource_len(pdev, 3));
3662 if (!qdev->doorbell_area) {
3663 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3664 err = -ENOMEM;
3665 goto err_out;
3666 }
3667
3668 ql_get_board_info(qdev);
3669 qdev->ndev = ndev;
3670 qdev->pdev = pdev;
3671 qdev->msg_enable = netif_msg_init(debug, default_msg);
3672 spin_lock_init(&qdev->hw_lock);
3673 spin_lock_init(&qdev->stats_lock);
3674
3675 /* make sure the EEPROM is good */
3676 err = ql_get_flash_params(qdev);
3677 if (err) {
3678 dev_err(&pdev->dev, "Invalid FLASH.\n");
3679 goto err_out;
3680 }
3681
3682 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3683 goto err_out;
3684
3685 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3686 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3687
3688 /* Set up the default ring sizes. */
3689 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3690 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3691
3692 /* Set up the coalescing parameters. */
3693 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3694 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3695 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3696 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3697
3698 /*
3699 * Set up the operating parameters.
3700 */
3701 qdev->rx_csum = 1;
3702
3703 qdev->q_workqueue = create_workqueue(ndev->name);
3704 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3705 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3706 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3707 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3708
3709 if (!cards_found) {
3710 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3711 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3712 DRV_NAME, DRV_VERSION);
3713 }
3714 return 0;
3715err_out:
3716 ql_release_all(pdev);
3717 pci_disable_device(pdev);
3718 return err;
3719}
3720
3721static int __devinit qlge_probe(struct pci_dev *pdev,
3722 const struct pci_device_id *pci_entry)
3723{
3724 struct net_device *ndev = NULL;
3725 struct ql_adapter *qdev = NULL;
3726 static int cards_found = 0;
3727 int err = 0;
3728
3729 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3730 if (!ndev)
3731 return -ENOMEM;
3732
3733 err = ql_init_device(pdev, ndev, cards_found);
3734 if (err < 0) {
3735 free_netdev(ndev);
3736 return err;
3737 }
3738
3739 qdev = netdev_priv(ndev);
3740 SET_NETDEV_DEV(ndev, &pdev->dev);
3741 ndev->features = (0
3742 | NETIF_F_IP_CSUM
3743 | NETIF_F_SG
3744 | NETIF_F_TSO
3745 | NETIF_F_TSO6
3746 | NETIF_F_TSO_ECN
3747 | NETIF_F_HW_VLAN_TX
3748 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3749
3750 if (test_bit(QL_DMA64, &qdev->flags))
3751 ndev->features |= NETIF_F_HIGHDMA;
3752
3753 /*
3754 * Set up net_device structure.
3755 */
3756 ndev->tx_queue_len = qdev->tx_ring_size;
3757 ndev->irq = pdev->irq;
3758 ndev->open = qlge_open;
3759 ndev->stop = qlge_close;
3760 ndev->hard_start_xmit = qlge_send;
3761 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3762 ndev->change_mtu = qlge_change_mtu;
3763 ndev->get_stats = qlge_get_stats;
3764 ndev->set_multicast_list = qlge_set_multicast_list;
3765 ndev->set_mac_address = qlge_set_mac_address;
3766 ndev->tx_timeout = qlge_tx_timeout;
3767 ndev->watchdog_timeo = 10 * HZ;
3768 ndev->vlan_rx_register = ql_vlan_rx_register;
3769 ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3770 ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3771 err = register_netdev(ndev);
3772 if (err) {
3773 dev_err(&pdev->dev, "net device registration failed.\n");
3774 ql_release_all(pdev);
3775 pci_disable_device(pdev);
3776 return err;
3777 }
3778 netif_carrier_off(ndev);
3779 netif_stop_queue(ndev);
3780 ql_display_dev_info(ndev);
3781 cards_found++;
3782 return 0;
3783}
3784
3785static void __devexit qlge_remove(struct pci_dev *pdev)
3786{
3787 struct net_device *ndev = pci_get_drvdata(pdev);
3788 unregister_netdev(ndev);
3789 ql_release_all(pdev);
3790 pci_disable_device(pdev);
3791 free_netdev(ndev);
3792}
3793
3794/*
3795 * This callback is called by the PCI subsystem whenever
3796 * a PCI bus error is detected.
3797 */
3798static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3799 enum pci_channel_state state)
3800{
3801 struct net_device *ndev = pci_get_drvdata(pdev);
3802 struct ql_adapter *qdev = netdev_priv(ndev);
3803
3804 if (netif_running(ndev))
3805 ql_adapter_down(qdev);
3806
3807 pci_disable_device(pdev);
3808
3809 /* Request a slot reset. */
3810 return PCI_ERS_RESULT_NEED_RESET;
3811}
3812
3813/*
3814 * This callback is called after the PCI buss has been reset.
3815 * Basically, this tries to restart the card from scratch.
3816 * This is a shortened version of the device probe/discovery code,
3817 * it resembles the first-half of the () routine.
3818 */
3819static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3820{
3821 struct net_device *ndev = pci_get_drvdata(pdev);
3822 struct ql_adapter *qdev = netdev_priv(ndev);
3823
3824 if (pci_enable_device(pdev)) {
3825 QPRINTK(qdev, IFUP, ERR,
3826 "Cannot re-enable PCI device after reset.\n");
3827 return PCI_ERS_RESULT_DISCONNECT;
3828 }
3829
3830 pci_set_master(pdev);
3831
3832 netif_carrier_off(ndev);
3833 netif_stop_queue(ndev);
3834 ql_adapter_reset(qdev);
3835
3836 /* Make sure the EEPROM is good */
3837 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3838
3839 if (!is_valid_ether_addr(ndev->perm_addr)) {
3840 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3841 return PCI_ERS_RESULT_DISCONNECT;
3842 }
3843
3844 return PCI_ERS_RESULT_RECOVERED;
3845}
3846
3847static void qlge_io_resume(struct pci_dev *pdev)
3848{
3849 struct net_device *ndev = pci_get_drvdata(pdev);
3850 struct ql_adapter *qdev = netdev_priv(ndev);
3851
3852 pci_set_master(pdev);
3853
3854 if (netif_running(ndev)) {
3855 if (ql_adapter_up(qdev)) {
3856 QPRINTK(qdev, IFUP, ERR,
3857 "Device initialization failed after reset.\n");
3858 return;
3859 }
3860 }
3861
3862 netif_device_attach(ndev);
3863}
3864
3865static struct pci_error_handlers qlge_err_handler = {
3866 .error_detected = qlge_io_error_detected,
3867 .slot_reset = qlge_io_slot_reset,
3868 .resume = qlge_io_resume,
3869};
3870
3871static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3872{
3873 struct net_device *ndev = pci_get_drvdata(pdev);
3874 struct ql_adapter *qdev = netdev_priv(ndev);
3875 int err;
3876
3877 netif_device_detach(ndev);
3878
3879 if (netif_running(ndev)) {
3880 err = ql_adapter_down(qdev);
3881 if (!err)
3882 return err;
3883 }
3884
3885 err = pci_save_state(pdev);
3886 if (err)
3887 return err;
3888
3889 pci_disable_device(pdev);
3890
3891 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3892
3893 return 0;
3894}
3895
3896#ifdef CONFIG_PM
3897static int qlge_resume(struct pci_dev *pdev)
3898{
3899 struct net_device *ndev = pci_get_drvdata(pdev);
3900 struct ql_adapter *qdev = netdev_priv(ndev);
3901 int err;
3902
3903 pci_set_power_state(pdev, PCI_D0);
3904 pci_restore_state(pdev);
3905 err = pci_enable_device(pdev);
3906 if (err) {
3907 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3908 return err;
3909 }
3910 pci_set_master(pdev);
3911
3912 pci_enable_wake(pdev, PCI_D3hot, 0);
3913 pci_enable_wake(pdev, PCI_D3cold, 0);
3914
3915 if (netif_running(ndev)) {
3916 err = ql_adapter_up(qdev);
3917 if (err)
3918 return err;
3919 }
3920
3921 netif_device_attach(ndev);
3922
3923 return 0;
3924}
3925#endif /* CONFIG_PM */
3926
3927static void qlge_shutdown(struct pci_dev *pdev)
3928{
3929 qlge_suspend(pdev, PMSG_SUSPEND);
3930}
3931
3932static struct pci_driver qlge_driver = {
3933 .name = DRV_NAME,
3934 .id_table = qlge_pci_tbl,
3935 .probe = qlge_probe,
3936 .remove = __devexit_p(qlge_remove),
3937#ifdef CONFIG_PM
3938 .suspend = qlge_suspend,
3939 .resume = qlge_resume,
3940#endif
3941 .shutdown = qlge_shutdown,
3942 .err_handler = &qlge_err_handler
3943};
3944
3945static int __init qlge_init_module(void)
3946{
3947 return pci_register_driver(&qlge_driver);
3948}
3949
3950static void __exit qlge_exit(void)
3951{
3952 pci_unregister_driver(&qlge_driver);
3953}
3954
3955module_init(qlge_init_module);
3956module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 000000000000..24fe344bcf1f
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,150 @@
1#include "qlge.h"
2
3static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{
5 int status;
6 /* wait for reg to come ready */
7 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
8 if (status)
9 goto exit;
10 /* set up for reg read */
11 ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
12 /* wait for reg to come ready */
13 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
14 if (status)
15 goto exit;
16 /* get the data */
17 *data = ql_read32(qdev, PROC_DATA);
18exit:
19 return status;
20}
21
22int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
23{
24 int i, status;
25
26 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
27 if (status)
28 return -EBUSY;
29 for (i = 0; i < mbcp->out_count; i++) {
30 status =
31 ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
32 &mbcp->mbox_out[i]);
33 if (status) {
34 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
35 break;
36 }
37 }
38 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
39 return status;
40}
41
42static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
43{
44 mbcp->out_count = 2;
45
46 if (ql_get_mb_sts(qdev, mbcp))
47 goto exit;
48
49 qdev->link_status = mbcp->mbox_out[1];
50 QPRINTK(qdev, DRV, ERR, "Link Up.\n");
51 QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
52 if (!netif_carrier_ok(qdev->ndev)) {
53 QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
54 netif_carrier_on(qdev->ndev);
55 netif_wake_queue(qdev->ndev);
56 }
57exit:
58 /* Clear the MPI firmware status. */
59 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
60}
61
62static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
63{
64 mbcp->out_count = 3;
65
66 if (ql_get_mb_sts(qdev, mbcp)) {
67 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
68 goto exit;
69 }
70
71 if (netif_carrier_ok(qdev->ndev)) {
72 QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
73 netif_carrier_off(qdev->ndev);
74 netif_stop_queue(qdev->ndev);
75 }
76 QPRINTK(qdev, DRV, ERR, "Link Down.\n");
77 QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
78exit:
79 /* Clear the MPI firmware status. */
80 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
81}
82
83static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
84{
85 mbcp->out_count = 2;
86
87 if (ql_get_mb_sts(qdev, mbcp)) {
88 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
89 goto exit;
90 }
91 QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
92 QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
93 mbcp->mbox_out[0]);
94 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
95 mbcp->mbox_out[1]);
96exit:
97 /* Clear the MPI firmware status. */
98 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
99}
100
101void ql_mpi_work(struct work_struct *work)
102{
103 struct ql_adapter *qdev =
104 container_of(work, struct ql_adapter, mpi_work.work);
105 struct mbox_params mbc;
106 struct mbox_params *mbcp = &mbc;
107 mbcp->out_count = 1;
108
109 while (ql_read32(qdev, STS) & STS_PI) {
110 if (ql_get_mb_sts(qdev, mbcp)) {
111 QPRINTK(qdev, DRV, ERR,
112 "Could not read MPI, resetting ASIC!\n");
113 ql_queue_asic_error(qdev);
114 }
115
116 switch (mbcp->mbox_out[0]) {
117 case AEN_LINK_UP:
118 ql_link_up(qdev, mbcp);
119 break;
120 case AEN_LINK_DOWN:
121 ql_link_down(qdev, mbcp);
122 break;
123 case AEN_FW_INIT_DONE:
124 ql_init_fw_done(qdev, mbcp);
125 break;
126 case MB_CMD_STS_GOOD:
127 break;
128 case AEN_FW_INIT_FAIL:
129 case AEN_SYS_ERR:
130 case MB_CMD_STS_ERR:
131 ql_queue_fw_error(qdev);
132 default:
133 /* Clear the MPI firmware status. */
134 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
135 break;
136 }
137 }
138 ql_enable_completion_interrupt(qdev, 0);
139}
140
141void ql_mpi_reset_work(struct work_struct *work)
142{
143 struct ql_adapter *qdev =
144 container_of(work, struct ql_adapter, mpi_reset_work.work);
145 QPRINTK(qdev, DRV, ERR,
146 "Enter, qdev = %p..\n", qdev);
147 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
148 msleep(50);
149 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
150}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5d86281d9363..025f526558bc 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -370,7 +370,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
370 /* Reset internal state machine */ 370 /* Reset internal state machine */
371 iowrite16(2, ioaddr + MAC_SM); 371 iowrite16(2, ioaddr + MAC_SM);
372 iowrite16(0, ioaddr + MAC_SM); 372 iowrite16(0, ioaddr + MAC_SM);
373 udelay(5000); 373 mdelay(5);
374 374
375 /* MAC Bus Control Register */ 375 /* MAC Bus Control Register */
376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -806,7 +806,7 @@ static void r6040_mac_address(struct net_device *dev)
806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ 806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ 807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
808 iowrite16(0, ioaddr + MAC_SM); 808 iowrite16(0, ioaddr + MAC_SM);
809 udelay(5000); 809 mdelay(5);
810 810
811 /* Restore MAC Address */ 811 /* Restore MAC Address */
812 adrp = (u16 *) dev->dev_addr; 812 adrp = (u16 *) dev->dev_addr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0f6f9747d255..fb899c675f47 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -36,7 +36,7 @@
36#define assert(expr) \ 36#define assert(expr) \
37 if (!(expr)) { \ 37 if (!(expr)) { \
38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
39 #expr,__FILE__,__FUNCTION__,__LINE__); \ 39 #expr,__FILE__,__func__,__LINE__); \
40 } 40 }
41#define dprintk(fmt, args...) \ 41#define dprintk(fmt, args...) \
42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) 42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
@@ -61,6 +61,7 @@ static const int multicast_filter_limit = 32;
61/* MAC address length */ 61/* MAC address length */
62#define MAC_ADDR_LEN 6 62#define MAC_ADDR_LEN 6
63 63
64#define MAX_READ_REQUEST_SHIFT 12
64#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ 65#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
65#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
66#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
@@ -95,6 +96,10 @@ enum mac_version {
95 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB 96 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
96 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 97 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
97 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 98 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
99 RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
100 RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
101 RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
102 RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
98 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 103 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
99 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 104 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
100 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 105 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
@@ -121,6 +126,10 @@ static const struct {
121 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB 126 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
122 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd 127 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
123 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe 128 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
129 _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
130 _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
131 _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
132 _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
124 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E 133 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
125 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 134 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
126 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 135 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
@@ -196,9 +205,6 @@ enum rtl_registers {
196 Config5 = 0x56, 205 Config5 = 0x56,
197 MultiIntr = 0x5c, 206 MultiIntr = 0x5c,
198 PHYAR = 0x60, 207 PHYAR = 0x60,
199 TBICSR = 0x64,
200 TBI_ANAR = 0x68,
201 TBI_LPAR = 0x6a,
202 PHYstatus = 0x6c, 208 PHYstatus = 0x6c,
203 RxMaxSize = 0xda, 209 RxMaxSize = 0xda,
204 CPlusCmd = 0xe0, 210 CPlusCmd = 0xe0,
@@ -212,6 +218,32 @@ enum rtl_registers {
212 FuncForceEvent = 0xfc, 218 FuncForceEvent = 0xfc,
213}; 219};
214 220
221enum rtl8110_registers {
222 TBICSR = 0x64,
223 TBI_ANAR = 0x68,
224 TBI_LPAR = 0x6a,
225};
226
227enum rtl8168_8101_registers {
228 CSIDR = 0x64,
229 CSIAR = 0x68,
230#define CSIAR_FLAG 0x80000000
231#define CSIAR_WRITE_CMD 0x80000000
232#define CSIAR_BYTE_ENABLE 0x0f
233#define CSIAR_BYTE_ENABLE_SHIFT 12
234#define CSIAR_ADDR_MASK 0x0fff
235
236 EPHYAR = 0x80,
237#define EPHYAR_FLAG 0x80000000
238#define EPHYAR_WRITE_CMD 0x80000000
239#define EPHYAR_REG_MASK 0x1f
240#define EPHYAR_REG_SHIFT 16
241#define EPHYAR_DATA_MASK 0xffff
242 DBG_REG = 0xd1,
243#define FIX_NAK_1 (1 << 4)
244#define FIX_NAK_2 (1 << 3)
245};
246
215enum rtl_register_content { 247enum rtl_register_content {
216 /* InterruptStatusBits */ 248 /* InterruptStatusBits */
217 SYSErr = 0x8000, 249 SYSErr = 0x8000,
@@ -265,7 +297,13 @@ enum rtl_register_content {
265 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 297 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
266 298
267 /* Config1 register p.24 */ 299 /* Config1 register p.24 */
300 LEDS1 = (1 << 7),
301 LEDS0 = (1 << 6),
268 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ 302 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
303 Speed_down = (1 << 4),
304 MEMMAP = (1 << 3),
305 IOMAP = (1 << 2),
306 VPD = (1 << 1),
269 PMEnable = (1 << 0), /* Power Management Enable */ 307 PMEnable = (1 << 0), /* Power Management Enable */
270 308
271 /* Config2 register p. 25 */ 309 /* Config2 register p. 25 */
@@ -275,6 +313,7 @@ enum rtl_register_content {
275 /* Config3 register p.25 */ 313 /* Config3 register p.25 */
276 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 314 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
277 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 315 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
316 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
278 317
279 /* Config5 register p.27 */ 318 /* Config5 register p.27 */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */ 319 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
@@ -292,7 +331,16 @@ enum rtl_register_content {
292 TBINwComplete = 0x01000000, 331 TBINwComplete = 0x01000000,
293 332
294 /* CPlusCmd p.31 */ 333 /* CPlusCmd p.31 */
295 PktCntrDisable = (1 << 7), // 8168 334 EnableBist = (1 << 15), // 8168 8101
335 Mac_dbgo_oe = (1 << 14), // 8168 8101
336 Normal_mode = (1 << 13), // unused
337 Force_half_dup = (1 << 12), // 8168 8101
338 Force_rxflow_en = (1 << 11), // 8168 8101
339 Force_txflow_en = (1 << 10), // 8168 8101
340 Cxpl_dbg_sel = (1 << 9), // 8168 8101
341 ASF = (1 << 8), // 8168 8101
342 PktCntrDisable = (1 << 7), // 8168 8101
343 Mac_dbgo_sel = 0x001c, // 8168
296 RxVlan = (1 << 6), 344 RxVlan = (1 << 6),
297 RxChkSum = (1 << 5), 345 RxChkSum = (1 << 5),
298 PCIDAC = (1 << 4), 346 PCIDAC = (1 << 4),
@@ -370,8 +418,9 @@ struct ring_info {
370}; 418};
371 419
372enum features { 420enum features {
373 RTL_FEATURE_WOL = (1 << 0), 421 RTL_FEATURE_WOL = (1 << 0),
374 RTL_FEATURE_MSI = (1 << 1), 422 RTL_FEATURE_MSI = (1 << 1),
423 RTL_FEATURE_GMII = (1 << 2),
375}; 424};
376 425
377struct rtl8169_private { 426struct rtl8169_private {
@@ -406,13 +455,16 @@ struct rtl8169_private {
406 struct vlan_group *vlgrp; 455 struct vlan_group *vlgrp;
407#endif 456#endif
408 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 457 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
409 void (*get_settings)(struct net_device *, struct ethtool_cmd *); 458 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
410 void (*phy_reset_enable)(void __iomem *); 459 void (*phy_reset_enable)(void __iomem *);
411 void (*hw_start)(struct net_device *); 460 void (*hw_start)(struct net_device *);
412 unsigned int (*phy_reset_pending)(void __iomem *); 461 unsigned int (*phy_reset_pending)(void __iomem *);
413 unsigned int (*link_ok)(void __iomem *); 462 unsigned int (*link_ok)(void __iomem *);
463 int pcie_cap;
414 struct delayed_work task; 464 struct delayed_work task;
415 unsigned features; 465 unsigned features;
466
467 struct mii_if_info mii;
416}; 468};
417 469
418MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 470MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -482,6 +534,94 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
482 return value; 534 return value;
483} 535}
484 536
537static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
538{
539 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
540}
541
542static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
543 int val)
544{
545 struct rtl8169_private *tp = netdev_priv(dev);
546 void __iomem *ioaddr = tp->mmio_addr;
547
548 mdio_write(ioaddr, location, val);
549}
550
551static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
552{
553 struct rtl8169_private *tp = netdev_priv(dev);
554 void __iomem *ioaddr = tp->mmio_addr;
555
556 return mdio_read(ioaddr, location);
557}
558
559static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
560{
561 unsigned int i;
562
563 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
564 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
565
566 for (i = 0; i < 100; i++) {
567 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
568 break;
569 udelay(10);
570 }
571}
572
573static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
574{
575 u16 value = 0xffff;
576 unsigned int i;
577
578 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
579
580 for (i = 0; i < 100; i++) {
581 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
582 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
583 break;
584 }
585 udelay(10);
586 }
587
588 return value;
589}
590
591static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
592{
593 unsigned int i;
594
595 RTL_W32(CSIDR, value);
596 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
597 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
598
599 for (i = 0; i < 100; i++) {
600 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
601 break;
602 udelay(10);
603 }
604}
605
606static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
607{
608 u32 value = ~0x00;
609 unsigned int i;
610
611 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
612 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
613
614 for (i = 0; i < 100; i++) {
615 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
616 value = RTL_R32(CSIDR);
617 break;
618 }
619 udelay(10);
620 }
621
622 return value;
623}
624
485static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 625static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
486{ 626{
487 RTL_W16(IntrMask, 0x0000); 627 RTL_W16(IntrMask, 0x0000);
@@ -705,8 +845,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
705 } 845 }
706 } 846 }
707 847
708 /* The 8100e/8101e do Fast Ethernet only. */ 848 /* The 8100e/8101e/8102e do Fast Ethernet only. */
709 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 849 if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
850 (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
851 (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
852 (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
853 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
710 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 854 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
711 (tp->mac_version == RTL_GIGA_MAC_VER_15) || 855 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
712 (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 856 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
@@ -850,7 +994,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
850 994
851#endif 995#endif
852 996
853static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 997static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
854{ 998{
855 struct rtl8169_private *tp = netdev_priv(dev); 999 struct rtl8169_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->mmio_addr; 1000 void __iomem *ioaddr = tp->mmio_addr;
@@ -867,65 +1011,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
867 1011
868 cmd->speed = SPEED_1000; 1012 cmd->speed = SPEED_1000;
869 cmd->duplex = DUPLEX_FULL; /* Always set */ 1013 cmd->duplex = DUPLEX_FULL; /* Always set */
1014
1015 return 0;
870} 1016}
871 1017
872static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) 1018static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
873{ 1019{
874 struct rtl8169_private *tp = netdev_priv(dev); 1020 struct rtl8169_private *tp = netdev_priv(dev);
875 void __iomem *ioaddr = tp->mmio_addr; 1021
876 u8 status; 1022 return mii_ethtool_gset(&tp->mii, cmd);
877
878 cmd->supported = SUPPORTED_10baseT_Half |
879 SUPPORTED_10baseT_Full |
880 SUPPORTED_100baseT_Half |
881 SUPPORTED_100baseT_Full |
882 SUPPORTED_1000baseT_Full |
883 SUPPORTED_Autoneg |
884 SUPPORTED_TP;
885
886 cmd->autoneg = 1;
887 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
888
889 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
890 cmd->advertising |= ADVERTISED_10baseT_Half;
891 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
892 cmd->advertising |= ADVERTISED_10baseT_Full;
893 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
894 cmd->advertising |= ADVERTISED_100baseT_Half;
895 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
896 cmd->advertising |= ADVERTISED_100baseT_Full;
897 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
898 cmd->advertising |= ADVERTISED_1000baseT_Full;
899
900 status = RTL_R8(PHYstatus);
901
902 if (status & _1000bpsF)
903 cmd->speed = SPEED_1000;
904 else if (status & _100bps)
905 cmd->speed = SPEED_100;
906 else if (status & _10bps)
907 cmd->speed = SPEED_10;
908
909 if (status & TxFlowCtrl)
910 cmd->advertising |= ADVERTISED_Asym_Pause;
911 if (status & RxFlowCtrl)
912 cmd->advertising |= ADVERTISED_Pause;
913
914 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
915 DUPLEX_FULL : DUPLEX_HALF;
916} 1023}
917 1024
918static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1025static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
919{ 1026{
920 struct rtl8169_private *tp = netdev_priv(dev); 1027 struct rtl8169_private *tp = netdev_priv(dev);
921 unsigned long flags; 1028 unsigned long flags;
1029 int rc;
922 1030
923 spin_lock_irqsave(&tp->lock, flags); 1031 spin_lock_irqsave(&tp->lock, flags);
924 1032
925 tp->get_settings(dev, cmd); 1033 rc = tp->get_settings(dev, cmd);
926 1034
927 spin_unlock_irqrestore(&tp->lock, flags); 1035 spin_unlock_irqrestore(&tp->lock, flags);
928 return 0; 1036 return rc;
929} 1037}
930 1038
931static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1039static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1116,8 +1224,17 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1116 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1224 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1117 1225
1118 /* 8101 family. */ 1226 /* 8101 family. */
1227 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1228 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1229 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1230 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1231 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1232 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1119 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, 1233 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1234 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1120 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, 1235 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1236 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1237 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1121 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, 1238 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1122 /* FIXME: where did these entries come from ? -- FR */ 1239 /* FIXME: where did these entries come from ? -- FR */
1123 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, 1240 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
@@ -1279,6 +1396,22 @@ static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
1279 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1396 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1280} 1397}
1281 1398
1399static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
1400{
1401 struct phy_reg phy_reg_init[] = {
1402 { 0x1f, 0x0003 },
1403 { 0x08, 0x441d },
1404 { 0x01, 0x9100 },
1405 { 0x1f, 0x0000 }
1406 };
1407
1408 mdio_write(ioaddr, 0x1f, 0x0000);
1409 mdio_patch(ioaddr, 0x11, 1 << 12);
1410 mdio_patch(ioaddr, 0x19, 1 << 13);
1411
1412 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1413}
1414
1282static void rtl_hw_phy_config(struct net_device *dev) 1415static void rtl_hw_phy_config(struct net_device *dev)
1283{ 1416{
1284 struct rtl8169_private *tp = netdev_priv(dev); 1417 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1296,6 +1429,11 @@ static void rtl_hw_phy_config(struct net_device *dev)
1296 case RTL_GIGA_MAC_VER_04: 1429 case RTL_GIGA_MAC_VER_04:
1297 rtl8169sb_hw_phy_config(ioaddr); 1430 rtl8169sb_hw_phy_config(ioaddr);
1298 break; 1431 break;
1432 case RTL_GIGA_MAC_VER_07:
1433 case RTL_GIGA_MAC_VER_08:
1434 case RTL_GIGA_MAC_VER_09:
1435 rtl8102e_hw_phy_config(ioaddr);
1436 break;
1299 case RTL_GIGA_MAC_VER_18: 1437 case RTL_GIGA_MAC_VER_18:
1300 rtl8168cp_hw_phy_config(ioaddr); 1438 rtl8168cp_hw_phy_config(ioaddr);
1301 break; 1439 break;
@@ -1513,7 +1651,7 @@ static const struct rtl_cfg_info {
1513 unsigned int align; 1651 unsigned int align;
1514 u16 intr_event; 1652 u16 intr_event;
1515 u16 napi_event; 1653 u16 napi_event;
1516 unsigned msi; 1654 unsigned features;
1517} rtl_cfg_infos [] = { 1655} rtl_cfg_infos [] = {
1518 [RTL_CFG_0] = { 1656 [RTL_CFG_0] = {
1519 .hw_start = rtl_hw_start_8169, 1657 .hw_start = rtl_hw_start_8169,
@@ -1522,7 +1660,7 @@ static const struct rtl_cfg_info {
1522 .intr_event = SYSErr | LinkChg | RxOverflow | 1660 .intr_event = SYSErr | LinkChg | RxOverflow |
1523 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1661 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1524 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1662 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1525 .msi = 0 1663 .features = RTL_FEATURE_GMII
1526 }, 1664 },
1527 [RTL_CFG_1] = { 1665 [RTL_CFG_1] = {
1528 .hw_start = rtl_hw_start_8168, 1666 .hw_start = rtl_hw_start_8168,
@@ -1531,7 +1669,7 @@ static const struct rtl_cfg_info {
1531 .intr_event = SYSErr | LinkChg | RxOverflow | 1669 .intr_event = SYSErr | LinkChg | RxOverflow |
1532 TxErr | TxOK | RxOK | RxErr, 1670 TxErr | TxOK | RxOK | RxErr,
1533 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 1671 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
1534 .msi = RTL_FEATURE_MSI 1672 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
1535 }, 1673 },
1536 [RTL_CFG_2] = { 1674 [RTL_CFG_2] = {
1537 .hw_start = rtl_hw_start_8101, 1675 .hw_start = rtl_hw_start_8101,
@@ -1540,7 +1678,7 @@ static const struct rtl_cfg_info {
1540 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 1678 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
1541 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1679 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1542 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1680 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1543 .msi = RTL_FEATURE_MSI 1681 .features = RTL_FEATURE_MSI
1544 } 1682 }
1545}; 1683};
1546 1684
@@ -1552,7 +1690,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
1552 u8 cfg2; 1690 u8 cfg2;
1553 1691
1554 cfg2 = RTL_R8(Config2) & ~MSIEnable; 1692 cfg2 = RTL_R8(Config2) & ~MSIEnable;
1555 if (cfg->msi) { 1693 if (cfg->features & RTL_FEATURE_MSI) {
1556 if (pci_enable_msi(pdev)) { 1694 if (pci_enable_msi(pdev)) {
1557 dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); 1695 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
1558 } else { 1696 } else {
@@ -1578,6 +1716,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1578 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 1716 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
1579 const unsigned int region = cfg->region; 1717 const unsigned int region = cfg->region;
1580 struct rtl8169_private *tp; 1718 struct rtl8169_private *tp;
1719 struct mii_if_info *mii;
1581 struct net_device *dev; 1720 struct net_device *dev;
1582 void __iomem *ioaddr; 1721 void __iomem *ioaddr;
1583 unsigned int i; 1722 unsigned int i;
@@ -1602,6 +1741,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1602 tp->pci_dev = pdev; 1741 tp->pci_dev = pdev;
1603 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1742 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1604 1743
1744 mii = &tp->mii;
1745 mii->dev = dev;
1746 mii->mdio_read = rtl_mdio_read;
1747 mii->mdio_write = rtl_mdio_write;
1748 mii->phy_id_mask = 0x1f;
1749 mii->reg_num_mask = 0x1f;
1750 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
1751
1605 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1752 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1606 rc = pci_enable_device(pdev); 1753 rc = pci_enable_device(pdev);
1607 if (rc < 0) { 1754 if (rc < 0) {
@@ -1670,6 +1817,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1670 goto err_out_free_res_4; 1817 goto err_out_free_res_4;
1671 } 1818 }
1672 1819
1820 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1821 if (!tp->pcie_cap && netif_msg_probe(tp))
1822 dev_info(&pdev->dev, "no PCI Express capability\n");
1823
1673 /* Unneeded ? Don't mess with Mrs. Murphy. */ 1824 /* Unneeded ? Don't mess with Mrs. Murphy. */
1674 rtl8169_irq_mask_and_ack(ioaddr); 1825 rtl8169_irq_mask_and_ack(ioaddr);
1675 1826
@@ -2061,12 +2212,51 @@ static void rtl_hw_start_8169(struct net_device *dev)
2061 RTL_W16(IntrMask, tp->intr_event); 2212 RTL_W16(IntrMask, tp->intr_event);
2062} 2213}
2063 2214
2215static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
2216{
2217 struct net_device *dev = pci_get_drvdata(pdev);
2218 struct rtl8169_private *tp = netdev_priv(dev);
2219 int cap = tp->pcie_cap;
2220
2221 if (cap) {
2222 u16 ctl;
2223
2224 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
2225 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
2226 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
2227 }
2228}
2229
2230static void rtl_csi_access_enable(void __iomem *ioaddr)
2231{
2232 u32 csi;
2233
2234 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
2235 rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
2236}
2237
2238struct ephy_info {
2239 unsigned int offset;
2240 u16 mask;
2241 u16 bits;
2242};
2243
2244static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
2245{
2246 u16 w;
2247
2248 while (len-- > 0) {
2249 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
2250 rtl_ephy_write(ioaddr, e->offset, w);
2251 e++;
2252 }
2253}
2254
2064static void rtl_hw_start_8168(struct net_device *dev) 2255static void rtl_hw_start_8168(struct net_device *dev)
2065{ 2256{
2066 struct rtl8169_private *tp = netdev_priv(dev); 2257 struct rtl8169_private *tp = netdev_priv(dev);
2067 void __iomem *ioaddr = tp->mmio_addr; 2258 void __iomem *ioaddr = tp->mmio_addr;
2068 struct pci_dev *pdev = tp->pci_dev; 2259 struct pci_dev *pdev = tp->pci_dev;
2069 u8 ctl;
2070 2260
2071 RTL_W8(Cfg9346, Cfg9346_Unlock); 2261 RTL_W8(Cfg9346, Cfg9346_Unlock);
2072 2262
@@ -2080,10 +2270,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
2080 2270
2081 RTL_W16(CPlusCmd, tp->cp_cmd); 2271 RTL_W16(CPlusCmd, tp->cp_cmd);
2082 2272
2083 /* Tx performance tweak. */ 2273 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2084 pci_read_config_byte(pdev, 0x69, &ctl);
2085 ctl = (ctl & ~0x70) | 0x50;
2086 pci_write_config_byte(pdev, 0x69, ctl);
2087 2274
2088 RTL_W16(IntrMitigate, 0x5151); 2275 RTL_W16(IntrMitigate, 0x5151);
2089 2276
@@ -2099,8 +2286,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
2099 2286
2100 RTL_R8(IntrMask); 2287 RTL_R8(IntrMask);
2101 2288
2102 RTL_W32(RxMissed, 0);
2103
2104 rtl_set_rx_mode(dev); 2289 rtl_set_rx_mode(dev);
2105 2290
2106 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2291 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2110,6 +2295,70 @@ static void rtl_hw_start_8168(struct net_device *dev)
2110 RTL_W16(IntrMask, tp->intr_event); 2295 RTL_W16(IntrMask, tp->intr_event);
2111} 2296}
2112 2297
2298#define R810X_CPCMD_QUIRK_MASK (\
2299 EnableBist | \
2300 Mac_dbgo_oe | \
2301 Force_half_dup | \
2302 Force_half_dup | \
2303 Force_txflow_en | \
2304 Cxpl_dbg_sel | \
2305 ASF | \
2306 PktCntrDisable | \
2307 PCIDAC | \
2308 PCIMulRW)
2309
2310static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
2311{
2312 static struct ephy_info e_info_8102e_1[] = {
2313 { 0x01, 0, 0x6e65 },
2314 { 0x02, 0, 0x091f },
2315 { 0x03, 0, 0xc2f9 },
2316 { 0x06, 0, 0xafb5 },
2317 { 0x07, 0, 0x0e00 },
2318 { 0x19, 0, 0xec80 },
2319 { 0x01, 0, 0x2e65 },
2320 { 0x01, 0, 0x6e65 }
2321 };
2322 u8 cfg1;
2323
2324 rtl_csi_access_enable(ioaddr);
2325
2326 RTL_W8(DBG_REG, FIX_NAK_1);
2327
2328 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2329
2330 RTL_W8(Config1,
2331 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
2332 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2333
2334 cfg1 = RTL_R8(Config1);
2335 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
2336 RTL_W8(Config1, cfg1 & ~LEDS0);
2337
2338 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
2339
2340 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
2341}
2342
2343static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
2344{
2345 rtl_csi_access_enable(ioaddr);
2346
2347 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2348
2349 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
2350 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2351
2352 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
2353}
2354
2355static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
2356{
2357 rtl_hw_start_8102e_2(ioaddr, pdev);
2358
2359 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
2360}
2361
2113static void rtl_hw_start_8101(struct net_device *dev) 2362static void rtl_hw_start_8101(struct net_device *dev)
2114{ 2363{
2115 struct rtl8169_private *tp = netdev_priv(dev); 2364 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2118,8 +2367,26 @@ static void rtl_hw_start_8101(struct net_device *dev)
2118 2367
2119 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 2368 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2120 (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 2369 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
2121 pci_write_config_word(pdev, 0x68, 0x00); 2370 int cap = tp->pcie_cap;
2122 pci_write_config_word(pdev, 0x69, 0x08); 2371
2372 if (cap) {
2373 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
2374 PCI_EXP_DEVCTL_NOSNOOP_EN);
2375 }
2376 }
2377
2378 switch (tp->mac_version) {
2379 case RTL_GIGA_MAC_VER_07:
2380 rtl_hw_start_8102e_1(ioaddr, pdev);
2381 break;
2382
2383 case RTL_GIGA_MAC_VER_08:
2384 rtl_hw_start_8102e_3(ioaddr, pdev);
2385 break;
2386
2387 case RTL_GIGA_MAC_VER_09:
2388 rtl_hw_start_8102e_2(ioaddr, pdev);
2389 break;
2123 } 2390 }
2124 2391
2125 RTL_W8(Cfg9346, Cfg9346_Unlock); 2392 RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -2143,8 +2410,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
2143 2410
2144 RTL_R8(IntrMask); 2411 RTL_R8(IntrMask);
2145 2412
2146 RTL_W32(RxMissed, 0);
2147
2148 rtl_set_rx_mode(dev); 2413 rtl_set_rx_mode(dev);
2149 2414
2150 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2415 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2922,6 +3187,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
2922 return work_done; 3187 return work_done;
2923} 3188}
2924 3189
3190static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
3191{
3192 struct rtl8169_private *tp = netdev_priv(dev);
3193
3194 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
3195 return;
3196
3197 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
3198 RTL_W32(RxMissed, 0);
3199}
3200
2925static void rtl8169_down(struct net_device *dev) 3201static void rtl8169_down(struct net_device *dev)
2926{ 3202{
2927 struct rtl8169_private *tp = netdev_priv(dev); 3203 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2939,9 +3215,7 @@ core_down:
2939 3215
2940 rtl8169_asic_down(ioaddr); 3216 rtl8169_asic_down(ioaddr);
2941 3217
2942 /* Update the error counts. */ 3218 rtl8169_rx_missed(dev, ioaddr);
2943 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
2944 RTL_W32(RxMissed, 0);
2945 3219
2946 spin_unlock_irq(&tp->lock); 3220 spin_unlock_irq(&tp->lock);
2947 3221
@@ -3063,8 +3337,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3063 3337
3064 if (netif_running(dev)) { 3338 if (netif_running(dev)) {
3065 spin_lock_irqsave(&tp->lock, flags); 3339 spin_lock_irqsave(&tp->lock, flags);
3066 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3340 rtl8169_rx_missed(dev, ioaddr);
3067 RTL_W32(RxMissed, 0);
3068 spin_unlock_irqrestore(&tp->lock, flags); 3341 spin_unlock_irqrestore(&tp->lock, flags);
3069 } 3342 }
3070 3343
@@ -3089,8 +3362,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3089 3362
3090 rtl8169_asic_down(ioaddr); 3363 rtl8169_asic_down(ioaddr);
3091 3364
3092 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3365 rtl8169_rx_missed(dev, ioaddr);
3093 RTL_W32(RxMissed, 0);
3094 3366
3095 spin_unlock_irq(&tp->lock); 3367 spin_unlock_irq(&tp->lock);
3096 3368
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index a2b073097e5c..6a1375f9cbb8 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -371,9 +371,6 @@ static void s2io_vlan_rx_register(struct net_device *dev,
371 flags[i]); 371 flags[i]);
372} 372}
373 373
374/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375static int vlan_strip_flag;
376
377/* Unregister the vlan */ 374/* Unregister the vlan */
378static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 375static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379{ 376{
@@ -2303,7 +2300,7 @@ static int start_nic(struct s2io_nic *nic)
2303 val64 = readq(&bar0->rx_pa_cfg); 2300 val64 = readq(&bar0->rx_pa_cfg);
2304 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 2301 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2305 writeq(val64, &bar0->rx_pa_cfg); 2302 writeq(val64, &bar0->rx_pa_cfg);
2306 vlan_strip_flag = 0; 2303 nic->vlan_strip_flag = 0;
2307 } 2304 }
2308 2305
2309 /* 2306 /*
@@ -3136,7 +3133,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3136 if (skb == NULL) { 3133 if (skb == NULL) {
3137 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3134 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3138 DBG_PRINT(ERR_DBG, "%s: Null skb ", 3135 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3139 __FUNCTION__); 3136 __func__);
3140 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 3137 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3141 return; 3138 return;
3142 } 3139 }
@@ -3496,7 +3493,7 @@ static void s2io_reset(struct s2io_nic * sp)
3496 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; 3493 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3497 3494
3498 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", 3495 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3499 __FUNCTION__, sp->dev->name); 3496 __func__, sp->dev->name);
3500 3497
3501 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3498 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3502 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
@@ -3518,7 +3515,7 @@ static void s2io_reset(struct s2io_nic * sp)
3518 } 3515 }
3519 3516
3520 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) { 3517 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3521 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__); 3518 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3522 } 3519 }
3523 3520
3524 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); 3521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
@@ -3768,7 +3765,7 @@ static void restore_xmsi_data(struct s2io_nic *nic)
3768 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3765 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3769 writeq(val64, &bar0->xmsi_access); 3766 writeq(val64, &bar0->xmsi_access);
3770 if (wait_for_msix_trans(nic, msix_index)) { 3767 if (wait_for_msix_trans(nic, msix_index)) {
3771 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3768 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3772 continue; 3769 continue;
3773 } 3770 }
3774 } 3771 }
@@ -3789,7 +3786,7 @@ static void store_xmsi_data(struct s2io_nic *nic)
3789 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3786 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3790 writeq(val64, &bar0->xmsi_access); 3787 writeq(val64, &bar0->xmsi_access);
3791 if (wait_for_msix_trans(nic, msix_index)) { 3788 if (wait_for_msix_trans(nic, msix_index)) {
3792 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3789 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3793 continue; 3790 continue;
3794 } 3791 }
3795 addr = readq(&bar0->xmsi_address); 3792 addr = readq(&bar0->xmsi_address);
@@ -3812,7 +3809,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3812 GFP_KERNEL); 3809 GFP_KERNEL);
3813 if (!nic->entries) { 3810 if (!nic->entries) {
3814 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3815 __FUNCTION__); 3812 __func__);
3816 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3813 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3817 return -ENOMEM; 3814 return -ENOMEM;
3818 } 3815 }
@@ -3826,7 +3823,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3826 GFP_KERNEL); 3823 GFP_KERNEL);
3827 if (!nic->s2io_entries) { 3824 if (!nic->s2io_entries) {
3828 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3825 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3829 __FUNCTION__); 3826 __func__);
3830 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3827 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3831 kfree(nic->entries); 3828 kfree(nic->entries);
3832 nic->mac_control.stats_info->sw_stat.mem_freed 3829 nic->mac_control.stats_info->sw_stat.mem_freed
@@ -5010,7 +5007,7 @@ static void s2io_set_multicast(struct net_device *dev)
5010 val64 = readq(&bar0->rx_pa_cfg); 5007 val64 = readq(&bar0->rx_pa_cfg);
5011 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 5008 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5012 writeq(val64, &bar0->rx_pa_cfg); 5009 writeq(val64, &bar0->rx_pa_cfg);
5013 vlan_strip_flag = 0; 5010 sp->vlan_strip_flag = 0;
5014 } 5011 }
5015 5012
5016 val64 = readq(&bar0->mac_cfg); 5013 val64 = readq(&bar0->mac_cfg);
@@ -5032,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev)
5032 val64 = readq(&bar0->rx_pa_cfg); 5029 val64 = readq(&bar0->rx_pa_cfg);
5033 val64 |= RX_PA_CFG_STRIP_VLAN_TAG; 5030 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5034 writeq(val64, &bar0->rx_pa_cfg); 5031 writeq(val64, &bar0->rx_pa_cfg);
5035 vlan_strip_flag = 1; 5032 sp->vlan_strip_flag = 1;
5036 } 5033 }
5037 5034
5038 val64 = readq(&bar0->mac_cfg); 5035 val64 = readq(&bar0->mac_cfg);
@@ -6746,7 +6743,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6746 ret = s2io_card_up(sp); 6743 ret = s2io_card_up(sp);
6747 if (ret) { 6744 if (ret) {
6748 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6745 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6749 __FUNCTION__); 6746 __func__);
6750 return ret; 6747 return ret;
6751 } 6748 }
6752 s2io_wake_all_tx_queue(sp); 6749 s2io_wake_all_tx_queue(sp);
@@ -7530,7 +7527,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7530 default: 7527 default:
7531 DBG_PRINT(ERR_DBG, 7528 DBG_PRINT(ERR_DBG,
7532 "%s: Samadhana!!\n", 7529 "%s: Samadhana!!\n",
7533 __FUNCTION__); 7530 __func__);
7534 BUG(); 7531 BUG();
7535 } 7532 }
7536 } 7533 }
@@ -7781,7 +7778,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7781 return -ENOMEM; 7778 return -ENOMEM;
7782 } 7779 }
7783 if ((ret = pci_request_regions(pdev, s2io_driver_name))) { 7780 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7784 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); 7781 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7785 pci_disable_device(pdev); 7782 pci_disable_device(pdev);
7786 return -ENODEV; 7783 return -ENODEV;
7787 } 7784 }
@@ -7998,7 +7995,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7998 if (sp->device_type & XFRAME_II_DEVICE) { 7995 if (sp->device_type & XFRAME_II_DEVICE) {
7999 mode = s2io_verify_pci_mode(sp); 7996 mode = s2io_verify_pci_mode(sp);
8000 if (mode < 0) { 7997 if (mode < 0) {
8001 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__); 7998 DBG_PRINT(ERR_DBG, "%s: ", __func__);
8002 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n"); 7999 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8003 ret = -EBADSLT; 8000 ret = -EBADSLT;
8004 goto set_swap_failed; 8001 goto set_swap_failed;
@@ -8175,8 +8172,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8175 break; 8172 break;
8176 } 8173 }
8177 if (sp->config.multiq) { 8174 if (sp->config.multiq) {
8178 for (i = 0; i < sp->config.tx_fifo_num; i++) 8175 for (i = 0; i < sp->config.tx_fifo_num; i++)
8179 mac_control->fifos[i].multiq = config->multiq; 8176 mac_control->fifos[i].multiq = config->multiq;
8180 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", 8177 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8181 dev->name); 8178 dev->name);
8182 } else 8179 } else
@@ -8206,6 +8203,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8206 /* Initialize device name */ 8203 /* Initialize device name */
8207 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 8204 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8208 8205
8206 if (vlan_tag_strip)
8207 sp->vlan_strip_flag = 1;
8208 else
8209 sp->vlan_strip_flag = 0;
8210
8209 /* 8211 /*
8210 * Make Link state as off at this point, when the Link change 8212 * Make Link state as off at this point, when the Link change
8211 * interrupt comes the state will be automatically changed to 8213 * interrupt comes the state will be automatically changed to
@@ -8299,7 +8301,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8299 8301
8300 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { 8302 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8301 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", 8303 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8302 __FUNCTION__); 8304 __func__);
8303 return -1; 8305 return -1;
8304 } 8306 }
8305 8307
@@ -8311,7 +8313,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8311 * If vlan stripping is disabled and the frame is VLAN tagged, 8313 * If vlan stripping is disabled and the frame is VLAN tagged,
8312 * shift the offset by the VLAN header size bytes. 8314 * shift the offset by the VLAN header size bytes.
8313 */ 8315 */
8314 if ((!vlan_strip_flag) && 8316 if ((!sp->vlan_strip_flag) &&
8315 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) 8317 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8316 ip_off += HEADER_VLAN_SIZE; 8318 ip_off += HEADER_VLAN_SIZE;
8317 } else { 8319 } else {
@@ -8330,7 +8332,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8330static int check_for_socket_match(struct lro *lro, struct iphdr *ip, 8332static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8331 struct tcphdr *tcp) 8333 struct tcphdr *tcp)
8332{ 8334{
8333 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8335 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8334 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || 8336 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8335 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) 8337 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8336 return -1; 8338 return -1;
@@ -8345,7 +8347,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8345static void initiate_new_session(struct lro *lro, u8 *l2h, 8347static void initiate_new_session(struct lro *lro, u8 *l2h,
8346 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) 8348 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8347{ 8349{
8348 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8349 lro->l2h = l2h; 8351 lro->l2h = l2h;
8350 lro->iph = ip; 8352 lro->iph = ip;
8351 lro->tcph = tcp; 8353 lro->tcph = tcp;
@@ -8375,7 +8377,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8375 struct tcphdr *tcp = lro->tcph; 8377 struct tcphdr *tcp = lro->tcph;
8376 __sum16 nchk; 8378 __sum16 nchk;
8377 struct stat_block *statinfo = sp->mac_control.stats_info; 8379 struct stat_block *statinfo = sp->mac_control.stats_info;
8378 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8380 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8379 8381
8380 /* Update L3 header */ 8382 /* Update L3 header */
8381 ip->tot_len = htons(lro->total_len); 8383 ip->tot_len = htons(lro->total_len);
@@ -8403,7 +8405,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8403static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, 8405static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8404 struct tcphdr *tcp, u32 l4_pyld) 8406 struct tcphdr *tcp, u32 l4_pyld)
8405{ 8407{
8406 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8408 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8407 lro->total_len += l4_pyld; 8409 lro->total_len += l4_pyld;
8408 lro->frags_len += l4_pyld; 8410 lro->frags_len += l4_pyld;
8409 lro->tcp_next_seq += l4_pyld; 8411 lro->tcp_next_seq += l4_pyld;
@@ -8427,7 +8429,7 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8427{ 8429{
8428 u8 *ptr; 8430 u8 *ptr;
8429 8431
8430 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8432 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8431 8433
8432 if (!tcp_pyld_len) { 8434 if (!tcp_pyld_len) {
8433 /* Runt frame or a pure ack */ 8435 /* Runt frame or a pure ack */
@@ -8509,7 +8511,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8509 8511
8510 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { 8512 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8511 DBG_PRINT(INFO_DBG, "%s:Out of order. expected " 8513 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8512 "0x%x, actual 0x%x\n", __FUNCTION__, 8514 "0x%x, actual 0x%x\n", __func__,
8513 (*lro)->tcp_next_seq, 8515 (*lro)->tcp_next_seq,
8514 ntohl(tcph->seq)); 8516 ntohl(tcph->seq));
8515 8517
@@ -8549,7 +8551,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8549 8551
8550 if (ret == 0) { /* sessions exceeded */ 8552 if (ret == 0) { /* sessions exceeded */
8551 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", 8553 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8552 __FUNCTION__); 8554 __func__);
8553 *lro = NULL; 8555 *lro = NULL;
8554 return ret; 8556 return ret;
8555 } 8557 }
@@ -8571,7 +8573,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8571 break; 8573 break;
8572 default: 8574 default:
8573 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", 8575 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8574 __FUNCTION__); 8576 __func__);
8575 break; 8577 break;
8576 } 8578 }
8577 8579
@@ -8592,7 +8594,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8592 8594
8593 skb->protocol = eth_type_trans(skb, dev); 8595 skb->protocol = eth_type_trans(skb, dev);
8594 if (sp->vlgrp && vlan_tag 8596 if (sp->vlgrp && vlan_tag
8595 && (vlan_strip_flag)) { 8597 && (sp->vlan_strip_flag)) {
8596 /* Queueing the vlan frame to the upper layer */ 8598 /* Queueing the vlan frame to the upper layer */
8597 if (sp->config.napi) 8599 if (sp->config.napi)
8598 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); 8600 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 6722a2f7d091..55cb943f23f8 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -962,6 +962,7 @@ struct s2io_nic {
962 int task_flag; 962 int task_flag;
963 unsigned long long start_time; 963 unsigned long long start_time;
964 struct vlan_group *vlgrp; 964 struct vlan_group *vlgrp;
965 int vlan_strip_flag;
965#define MSIX_FLG 0xA5 966#define MSIX_FLG 0xA5
966 int num_entries; 967 int num_entries;
967 struct msix_entry *entries; 968 struct msix_entry *entries;
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2c79d27404e0..d95c21828014 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -52,9 +52,9 @@
52 * 52 *
53 * The maximum width mask that can be generated is 64 bits. 53 * The maximum width mask that can be generated is 64 bits.
54 */ 54 */
55#define EFX_MASK64(field) \ 55#define EFX_MASK64(width) \
56 (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \ 56 ((width) == 64 ? ~((u64) 0) : \
57 (((((u64) 1) << EFX_WIDTH(field))) - 1)) 57 (((((u64) 1) << (width))) - 1))
58 58
59/* Mask equal in width to the specified field. 59/* Mask equal in width to the specified field.
60 * 60 *
@@ -63,9 +63,9 @@
63 * The maximum width mask that can be generated is 32 bits. Use 63 * The maximum width mask that can be generated is 32 bits. Use
64 * EFX_MASK64 for higher width fields. 64 * EFX_MASK64 for higher width fields.
65 */ 65 */
66#define EFX_MASK32(field) \ 66#define EFX_MASK32(width) \
67 (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \ 67 ((width) == 32 ? ~((u32) 0) : \
68 (((((u32) 1) << EFX_WIDTH(field))) - 1)) 68 (((((u32) 1) << (width))) - 1))
69 69
70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ 70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
71typedef union efx_dword { 71typedef union efx_dword {
@@ -138,44 +138,49 @@ typedef union efx_oword {
138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) 138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
139 139
140#define EFX_EXTRACT_OWORD64(oword, low, high) \ 140#define EFX_EXTRACT_OWORD64(oword, low, high) \
141 (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ 141 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) 142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
143 EFX_MASK64(high + 1 - low))
143 144
144#define EFX_EXTRACT_QWORD64(qword, low, high) \ 145#define EFX_EXTRACT_QWORD64(qword, low, high) \
145 EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) 146 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
147 EFX_MASK64(high + 1 - low))
146 148
147#define EFX_EXTRACT_OWORD32(oword, low, high) \ 149#define EFX_EXTRACT_OWORD32(oword, low, high) \
148 (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ 150 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
149 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ 151 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
150 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ 152 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
151 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) 153 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
154 EFX_MASK32(high + 1 - low))
152 155
153#define EFX_EXTRACT_QWORD32(qword, low, high) \ 156#define EFX_EXTRACT_QWORD32(qword, low, high) \
154 (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ 157 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
155 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) 158 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
159 EFX_MASK32(high + 1 - low))
156 160
157#define EFX_EXTRACT_DWORD(dword, low, high) \ 161#define EFX_EXTRACT_DWORD(dword, low, high) \
158 EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) 162 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
163 EFX_MASK32(high + 1 - low))
159 164
160#define EFX_OWORD_FIELD64(oword, field) \ 165#define EFX_OWORD_FIELD64(oword, field) \
161 (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 166 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
162 & EFX_MASK64(field)) 167 EFX_HIGH_BIT(field))
163 168
164#define EFX_QWORD_FIELD64(qword, field) \ 169#define EFX_QWORD_FIELD64(qword, field) \
165 (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 170 EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
166 & EFX_MASK64(field)) 171 EFX_HIGH_BIT(field))
167 172
168#define EFX_OWORD_FIELD32(oword, field) \ 173#define EFX_OWORD_FIELD32(oword, field) \
169 (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 174 EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
170 & EFX_MASK32(field)) 175 EFX_HIGH_BIT(field))
171 176
172#define EFX_QWORD_FIELD32(qword, field) \ 177#define EFX_QWORD_FIELD32(qword, field) \
173 (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 178 EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
174 & EFX_MASK32(field)) 179 EFX_HIGH_BIT(field))
175 180
176#define EFX_DWORD_FIELD(dword, field) \ 181#define EFX_DWORD_FIELD(dword, field) \
177 (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 182 EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
178 & EFX_MASK32(field)) 183 EFX_HIGH_BIT(field))
179 184
180#define EFX_OWORD_IS_ZERO64(oword) \ 185#define EFX_OWORD_IS_ZERO64(oword) \
181 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) 186 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
@@ -411,69 +416,102 @@ typedef union efx_oword {
411 * for read-modify-write operations. 416 * for read-modify-write operations.
412 * 417 *
413 */ 418 */
414
415#define EFX_INVERT_OWORD(oword) do { \ 419#define EFX_INVERT_OWORD(oword) do { \
416 (oword).u64[0] = ~((oword).u64[0]); \ 420 (oword).u64[0] = ~((oword).u64[0]); \
417 (oword).u64[1] = ~((oword).u64[1]); \ 421 (oword).u64[1] = ~((oword).u64[1]); \
418 } while (0) 422 } while (0)
419 423
420#define EFX_INSERT_FIELD64(...) \ 424#define EFX_AND_OWORD(oword, from, mask) \
421 cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) 425 do { \
426 (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
427 (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
428 } while (0)
429
430#define EFX_OR_OWORD(oword, from, mask) \
431 do { \
432 (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
433 (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
434 } while (0)
422 435
423#define EFX_INSERT_FIELD32(...) \ 436#define EFX_INSERT64(min, max, low, high, value) \
424 cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) 437 cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
425 438
426#define EFX_INPLACE_MASK64(min, max, field) \ 439#define EFX_INSERT32(min, max, low, high, value) \
427 EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field)) 440 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
428 441
429#define EFX_INPLACE_MASK32(min, max, field) \ 442#define EFX_INPLACE_MASK64(min, max, low, high) \
430 EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field)) 443 EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
431 444
432#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \ 445#define EFX_INPLACE_MASK32(min, max, low, high) \
446 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
447
448#define EFX_SET_OWORD64(oword, low, high, value) do { \
433 (oword).u64[0] = (((oword).u64[0] \ 449 (oword).u64[0] = (((oword).u64[0] \
434 & ~EFX_INPLACE_MASK64(0, 63, field)) \ 450 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
435 | EFX_INSERT_FIELD64(0, 63, field, value)); \ 451 | EFX_INSERT64(0, 63, low, high, value)); \
436 (oword).u64[1] = (((oword).u64[1] \ 452 (oword).u64[1] = (((oword).u64[1] \
437 & ~EFX_INPLACE_MASK64(64, 127, field)) \ 453 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
438 | EFX_INSERT_FIELD64(64, 127, field, value)); \ 454 | EFX_INSERT64(64, 127, low, high, value)); \
439 } while (0) 455 } while (0)
440 456
441#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \ 457#define EFX_SET_QWORD64(qword, low, high, value) do { \
442 (qword).u64[0] = (((qword).u64[0] \ 458 (qword).u64[0] = (((qword).u64[0] \
443 & ~EFX_INPLACE_MASK64(0, 63, field)) \ 459 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
444 | EFX_INSERT_FIELD64(0, 63, field, value)); \ 460 | EFX_INSERT64(0, 63, low, high, value)); \
445 } while (0) 461 } while (0)
446 462
447#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \ 463#define EFX_SET_OWORD32(oword, low, high, value) do { \
448 (oword).u32[0] = (((oword).u32[0] \ 464 (oword).u32[0] = (((oword).u32[0] \
449 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 465 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
450 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 466 | EFX_INSERT32(0, 31, low, high, value)); \
451 (oword).u32[1] = (((oword).u32[1] \ 467 (oword).u32[1] = (((oword).u32[1] \
452 & ~EFX_INPLACE_MASK32(32, 63, field)) \ 468 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
453 | EFX_INSERT_FIELD32(32, 63, field, value)); \ 469 | EFX_INSERT32(32, 63, low, high, value)); \
454 (oword).u32[2] = (((oword).u32[2] \ 470 (oword).u32[2] = (((oword).u32[2] \
455 & ~EFX_INPLACE_MASK32(64, 95, field)) \ 471 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
456 | EFX_INSERT_FIELD32(64, 95, field, value)); \ 472 | EFX_INSERT32(64, 95, low, high, value)); \
457 (oword).u32[3] = (((oword).u32[3] \ 473 (oword).u32[3] = (((oword).u32[3] \
458 & ~EFX_INPLACE_MASK32(96, 127, field)) \ 474 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
459 | EFX_INSERT_FIELD32(96, 127, field, value)); \ 475 | EFX_INSERT32(96, 127, low, high, value)); \
460 } while (0) 476 } while (0)
461 477
462#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \ 478#define EFX_SET_QWORD32(qword, low, high, value) do { \
463 (qword).u32[0] = (((qword).u32[0] \ 479 (qword).u32[0] = (((qword).u32[0] \
464 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 480 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
465 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 481 | EFX_INSERT32(0, 31, low, high, value)); \
466 (qword).u32[1] = (((qword).u32[1] \ 482 (qword).u32[1] = (((qword).u32[1] \
467 & ~EFX_INPLACE_MASK32(32, 63, field)) \ 483 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
468 | EFX_INSERT_FIELD32(32, 63, field, value)); \ 484 | EFX_INSERT32(32, 63, low, high, value)); \
469 } while (0) 485 } while (0)
470 486
471#define EFX_SET_DWORD_FIELD(dword, field, value) do { \ 487#define EFX_SET_DWORD32(dword, low, high, value) do { \
472 (dword).u32[0] = (((dword).u32[0] \ 488 (dword).u32[0] = (((dword).u32[0] \
473 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 489 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
474 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 490 | EFX_INSERT32(0, 31, low, high, value)); \
475 } while (0) 491 } while (0)
476 492
493#define EFX_SET_OWORD_FIELD64(oword, field, value) \
494 EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
495 EFX_HIGH_BIT(field), value)
496
497#define EFX_SET_QWORD_FIELD64(qword, field, value) \
498 EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
499 EFX_HIGH_BIT(field), value)
500
501#define EFX_SET_OWORD_FIELD32(oword, field, value) \
502 EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
503 EFX_HIGH_BIT(field), value)
504
505#define EFX_SET_QWORD_FIELD32(qword, field, value) \
506 EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
507 EFX_HIGH_BIT(field), value)
508
509#define EFX_SET_DWORD_FIELD(dword, field, value) \
510 EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
511 EFX_HIGH_BIT(field), value)
512
513
514
477#if BITS_PER_LONG == 64 515#if BITS_PER_LONG == 64
478#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 516#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
479#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 517#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
@@ -502,4 +540,10 @@ typedef union efx_oword {
502#define EFX_DMA_TYPE_WIDTH(width) \ 540#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 541 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504 542
543
544/* Static initialiser */
545#define EFX_OWORD32(a, b, c, d) \
546 { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \
547 __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } }
548
505#endif /* EFX_BITFIELD_H */ 549#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index d3d3dd0a1170..99e602373269 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -31,23 +31,23 @@ static void blink_led_timer(unsigned long context)
31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); 31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
32} 32}
33 33
34static void board_blink(struct efx_nic *efx, int blink) 34static void board_blink(struct efx_nic *efx, bool blink)
35{ 35{
36 struct efx_blinker *blinker = &efx->board_info.blinker; 36 struct efx_blinker *blinker = &efx->board_info.blinker;
37 37
38 /* The rtnl mutex serialises all ethtool ioctls, so 38 /* The rtnl mutex serialises all ethtool ioctls, so
39 * nothing special needs doing here. */ 39 * nothing special needs doing here. */
40 if (blink) { 40 if (blink) {
41 blinker->resubmit = 1; 41 blinker->resubmit = true;
42 blinker->state = 0; 42 blinker->state = false;
43 setup_timer(&blinker->timer, blink_led_timer, 43 setup_timer(&blinker->timer, blink_led_timer,
44 (unsigned long)efx); 44 (unsigned long)efx);
45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); 45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
46 } else { 46 } else {
47 blinker->resubmit = 0; 47 blinker->resubmit = false;
48 if (blinker->timer.function) 48 if (blinker->timer.function)
49 del_timer_sync(&blinker->timer); 49 del_timer_sync(&blinker->timer);
50 efx->board_info.set_fault_led(efx, 0); 50 efx->board_info.set_fault_led(efx, false);
51 } 51 }
52} 52}
53 53
@@ -78,7 +78,7 @@ static int sfe4002_init_leds(struct efx_nic *efx)
78 return 0; 78 return 0;
79} 79}
80 80
81static void sfe4002_fault_led(struct efx_nic *efx, int state) 81static void sfe4002_fault_led(struct efx_nic *efx, bool state)
82{ 82{
83 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON : 83 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
84 QUAKE_LED_OFF); 84 QUAKE_LED_OFF);
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index e5e844359ce7..c6e01b64bfb4 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -21,7 +21,5 @@ enum efx_board_type {
21 21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_init(struct efx_nic *efx); 23extern int sfe4001_init(struct efx_nic *efx);
24/* Are we putting the PHY into flash config mode */
25extern unsigned int sfe4001_phy_flash_cfg;
26 24
27#endif 25#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 45c72eebb3a7..06ea71c7e34e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -28,7 +28,6 @@
28#include "efx.h" 28#include "efx.h"
29#include "mdio_10g.h" 29#include "mdio_10g.h"
30#include "falcon.h" 30#include "falcon.h"
31#include "workarounds.h"
32#include "mac.h" 31#include "mac.h"
33 32
34#define EFX_MAX_MTU (9 * 1024) 33#define EFX_MAX_MTU (9 * 1024)
@@ -52,7 +51,7 @@ static struct workqueue_struct *refill_workqueue;
52 * This sets the default for new devices. It can be controlled later 51 * This sets the default for new devices. It can be controlled later
53 * using ethtool. 52 * using ethtool.
54 */ 53 */
55static int lro = 1; 54static int lro = true;
56module_param(lro, int, 0644); 55module_param(lro, int, 0644);
57MODULE_PARM_DESC(lro, "Large receive offload acceleration"); 56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
58 57
@@ -65,7 +64,7 @@ MODULE_PARM_DESC(lro, "Large receive offload acceleration");
65 * This is forced to 0 for MSI interrupt mode as the interrupt vector 64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
66 * is not written 65 * is not written
67 */ 66 */
68static unsigned int separate_tx_and_rx_channels = 1; 67static unsigned int separate_tx_and_rx_channels = true;
69 68
70/* This is the weight assigned to each of the (per-channel) virtual 69/* This is the weight assigned to each of the (per-channel) virtual
71 * NAPI devices. 70 * NAPI devices.
@@ -81,7 +80,7 @@ unsigned int efx_monitor_interval = 1 * HZ;
81/* This controls whether or not the hardware monitor will trigger a 80/* This controls whether or not the hardware monitor will trigger a
82 * reset when it detects an error condition. 81 * reset when it detects an error condition.
83 */ 82 */
84static unsigned int monitor_reset = 1; 83static unsigned int monitor_reset = true;
85 84
86/* This controls whether or not the driver will initialise devices 85/* This controls whether or not the driver will initialise devices
87 * with invalid MAC addresses stored in the EEPROM or flash. If true, 86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
@@ -141,8 +140,7 @@ static void efx_fini_channels(struct efx_nic *efx);
141 140
142#define EFX_ASSERT_RESET_SERIALISED(efx) \ 141#define EFX_ASSERT_RESET_SERIALISED(efx) \
143 do { \ 142 do { \
144 if ((efx->state == STATE_RUNNING) || \ 143 if (efx->state == STATE_RUNNING) \
145 (efx->state == STATE_RESETTING)) \
146 ASSERT_RTNL(); \ 144 ASSERT_RTNL(); \
147 } while (0) 145 } while (0)
148 146
@@ -159,16 +157,18 @@ static void efx_fini_channels(struct efx_nic *efx);
159 * never be concurrently called more than once on the same channel, 157 * never be concurrently called more than once on the same channel,
160 * though different channels may be being processed concurrently. 158 * though different channels may be being processed concurrently.
161 */ 159 */
162static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) 160static int efx_process_channel(struct efx_channel *channel, int rx_quota)
163{ 161{
164 int rxdmaqs; 162 struct efx_nic *efx = channel->efx;
165 struct efx_rx_queue *rx_queue; 163 int rx_packets;
166 164
167 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE || 165 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
168 !channel->enabled)) 166 !channel->enabled))
169 return rx_quota; 167 return 0;
170 168
171 rxdmaqs = falcon_process_eventq(channel, &rx_quota); 169 rx_packets = falcon_process_eventq(channel, rx_quota);
170 if (rx_packets == 0)
171 return 0;
172 172
173 /* Deliver last RX packet. */ 173 /* Deliver last RX packet. */
174 if (channel->rx_pkt) { 174 if (channel->rx_pkt) {
@@ -180,16 +180,9 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
180 efx_flush_lro(channel); 180 efx_flush_lro(channel);
181 efx_rx_strategy(channel); 181 efx_rx_strategy(channel);
182 182
183 /* Refill descriptor rings as necessary */ 183 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
184 rx_queue = &channel->efx->rx_queue[0];
185 while (rxdmaqs) {
186 if (rxdmaqs & 0x01)
187 efx_fast_push_rx_descriptors(rx_queue);
188 rx_queue++;
189 rxdmaqs >>= 1;
190 }
191 184
192 return rx_quota; 185 return rx_packets;
193} 186}
194 187
195/* Mark channel as finished processing 188/* Mark channel as finished processing
@@ -203,7 +196,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
203 /* The interrupt handler for this channel may set work_pending 196 /* The interrupt handler for this channel may set work_pending
204 * as soon as we acknowledge the events we've seen. Make sure 197 * as soon as we acknowledge the events we've seen. Make sure
205 * it's cleared before then. */ 198 * it's cleared before then. */
206 channel->work_pending = 0; 199 channel->work_pending = false;
207 smp_wmb(); 200 smp_wmb();
208 201
209 falcon_eventq_read_ack(channel); 202 falcon_eventq_read_ack(channel);
@@ -219,14 +212,12 @@ static int efx_poll(struct napi_struct *napi, int budget)
219 struct efx_channel *channel = 212 struct efx_channel *channel =
220 container_of(napi, struct efx_channel, napi_str); 213 container_of(napi, struct efx_channel, napi_str);
221 struct net_device *napi_dev = channel->napi_dev; 214 struct net_device *napi_dev = channel->napi_dev;
222 int unused;
223 int rx_packets; 215 int rx_packets;
224 216
225 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 217 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
226 channel->channel, raw_smp_processor_id()); 218 channel->channel, raw_smp_processor_id());
227 219
228 unused = efx_process_channel(channel, budget); 220 rx_packets = efx_process_channel(channel, budget);
229 rx_packets = (budget - unused);
230 221
231 if (rx_packets < budget) { 222 if (rx_packets < budget) {
232 /* There is no race here; although napi_disable() will 223 /* There is no race here; although napi_disable() will
@@ -260,7 +251,7 @@ void efx_process_channel_now(struct efx_channel *channel)
260 falcon_disable_interrupts(efx); 251 falcon_disable_interrupts(efx);
261 if (efx->legacy_irq) 252 if (efx->legacy_irq)
262 synchronize_irq(efx->legacy_irq); 253 synchronize_irq(efx->legacy_irq);
263 if (channel->has_interrupt && channel->irq) 254 if (channel->irq)
264 synchronize_irq(channel->irq); 255 synchronize_irq(channel->irq);
265 256
266 /* Wait for any NAPI processing to complete */ 257 /* Wait for any NAPI processing to complete */
@@ -290,13 +281,13 @@ static int efx_probe_eventq(struct efx_channel *channel)
290} 281}
291 282
292/* Prepare channel's event queue */ 283/* Prepare channel's event queue */
293static int efx_init_eventq(struct efx_channel *channel) 284static void efx_init_eventq(struct efx_channel *channel)
294{ 285{
295 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); 286 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
296 287
297 channel->eventq_read_ptr = 0; 288 channel->eventq_read_ptr = 0;
298 289
299 return falcon_init_eventq(channel); 290 falcon_init_eventq(channel);
300} 291}
301 292
302static void efx_fini_eventq(struct efx_channel *channel) 293static void efx_fini_eventq(struct efx_channel *channel)
@@ -362,12 +353,11 @@ static int efx_probe_channel(struct efx_channel *channel)
362 * to propagate configuration changes (mtu, checksum offload), or 353 * to propagate configuration changes (mtu, checksum offload), or
363 * to clear hardware error conditions 354 * to clear hardware error conditions
364 */ 355 */
365static int efx_init_channels(struct efx_nic *efx) 356static void efx_init_channels(struct efx_nic *efx)
366{ 357{
367 struct efx_tx_queue *tx_queue; 358 struct efx_tx_queue *tx_queue;
368 struct efx_rx_queue *rx_queue; 359 struct efx_rx_queue *rx_queue;
369 struct efx_channel *channel; 360 struct efx_channel *channel;
370 int rc = 0;
371 361
372 /* Calculate the rx buffer allocation parameters required to 362 /* Calculate the rx buffer allocation parameters required to
373 * support the current MTU, including padding for header 363 * support the current MTU, including padding for header
@@ -382,36 +372,20 @@ static int efx_init_channels(struct efx_nic *efx)
382 efx_for_each_channel(channel, efx) { 372 efx_for_each_channel(channel, efx) {
383 EFX_LOG(channel->efx, "init chan %d\n", channel->channel); 373 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
384 374
385 rc = efx_init_eventq(channel); 375 efx_init_eventq(channel);
386 if (rc)
387 goto err;
388 376
389 efx_for_each_channel_tx_queue(tx_queue, channel) { 377 efx_for_each_channel_tx_queue(tx_queue, channel)
390 rc = efx_init_tx_queue(tx_queue); 378 efx_init_tx_queue(tx_queue);
391 if (rc)
392 goto err;
393 }
394 379
395 /* The rx buffer allocation strategy is MTU dependent */ 380 /* The rx buffer allocation strategy is MTU dependent */
396 efx_rx_strategy(channel); 381 efx_rx_strategy(channel);
397 382
398 efx_for_each_channel_rx_queue(rx_queue, channel) { 383 efx_for_each_channel_rx_queue(rx_queue, channel)
399 rc = efx_init_rx_queue(rx_queue); 384 efx_init_rx_queue(rx_queue);
400 if (rc)
401 goto err;
402 }
403 385
404 WARN_ON(channel->rx_pkt != NULL); 386 WARN_ON(channel->rx_pkt != NULL);
405 efx_rx_strategy(channel); 387 efx_rx_strategy(channel);
406 } 388 }
407
408 return 0;
409
410 err:
411 EFX_ERR(efx, "failed to initialise channel %d\n",
412 channel ? channel->channel : -1);
413 efx_fini_channels(efx);
414 return rc;
415} 389}
416 390
417/* This enables event queue processing and packet transmission. 391/* This enables event queue processing and packet transmission.
@@ -432,8 +406,8 @@ static void efx_start_channel(struct efx_channel *channel)
432 /* The interrupt handler for this channel may set work_pending 406 /* The interrupt handler for this channel may set work_pending
433 * as soon as we enable it. Make sure it's cleared before 407 * as soon as we enable it. Make sure it's cleared before
434 * then. Similarly, make sure it sees the enabled flag set. */ 408 * then. Similarly, make sure it sees the enabled flag set. */
435 channel->work_pending = 0; 409 channel->work_pending = false;
436 channel->enabled = 1; 410 channel->enabled = true;
437 smp_wmb(); 411 smp_wmb();
438 412
439 napi_enable(&channel->napi_str); 413 napi_enable(&channel->napi_str);
@@ -456,7 +430,7 @@ static void efx_stop_channel(struct efx_channel *channel)
456 430
457 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); 431 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
458 432
459 channel->enabled = 0; 433 channel->enabled = false;
460 napi_disable(&channel->napi_str); 434 napi_disable(&channel->napi_str);
461 435
462 /* Ensure that any worker threads have exited or will be no-ops */ 436 /* Ensure that any worker threads have exited or will be no-ops */
@@ -471,10 +445,17 @@ static void efx_fini_channels(struct efx_nic *efx)
471 struct efx_channel *channel; 445 struct efx_channel *channel;
472 struct efx_tx_queue *tx_queue; 446 struct efx_tx_queue *tx_queue;
473 struct efx_rx_queue *rx_queue; 447 struct efx_rx_queue *rx_queue;
448 int rc;
474 449
475 EFX_ASSERT_RESET_SERIALISED(efx); 450 EFX_ASSERT_RESET_SERIALISED(efx);
476 BUG_ON(efx->port_enabled); 451 BUG_ON(efx->port_enabled);
477 452
453 rc = falcon_flush_queues(efx);
454 if (rc)
455 EFX_ERR(efx, "failed to flush queues\n");
456 else
457 EFX_LOG(efx, "successfully flushed all queues\n");
458
478 efx_for_each_channel(channel, efx) { 459 efx_for_each_channel(channel, efx) {
479 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
480 461
@@ -482,13 +463,6 @@ static void efx_fini_channels(struct efx_nic *efx)
482 efx_fini_rx_queue(rx_queue); 463 efx_fini_rx_queue(rx_queue);
483 efx_for_each_channel_tx_queue(tx_queue, channel) 464 efx_for_each_channel_tx_queue(tx_queue, channel)
484 efx_fini_tx_queue(tx_queue); 465 efx_fini_tx_queue(tx_queue);
485 }
486
487 /* Do the event queues last so that we can handle flush events
488 * for all DMA queues. */
489 efx_for_each_channel(channel, efx) {
490 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
491
492 efx_fini_eventq(channel); 466 efx_fini_eventq(channel);
493 } 467 }
494} 468}
@@ -526,8 +500,6 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
526 */ 500 */
527static void efx_link_status_changed(struct efx_nic *efx) 501static void efx_link_status_changed(struct efx_nic *efx)
528{ 502{
529 int carrier_ok;
530
531 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 503 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
532 * that no events are triggered between unregister_netdev() and the 504 * that no events are triggered between unregister_netdev() and the
533 * driver unloading. A more general condition is that NETDEV_CHANGE 505 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -535,8 +507,12 @@ static void efx_link_status_changed(struct efx_nic *efx)
535 if (!netif_running(efx->net_dev)) 507 if (!netif_running(efx->net_dev))
536 return; 508 return;
537 509
538 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0; 510 if (efx->port_inhibited) {
539 if (efx->link_up != carrier_ok) { 511 netif_carrier_off(efx->net_dev);
512 return;
513 }
514
515 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
540 efx->n_link_state_changes++; 516 efx->n_link_state_changes++;
541 517
542 if (efx->link_up) 518 if (efx->link_up)
@@ -577,13 +553,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
577 553
578/* This call reinitialises the MAC to pick up new PHY settings. The 554/* This call reinitialises the MAC to pick up new PHY settings. The
579 * caller must hold the mac_lock */ 555 * caller must hold the mac_lock */
580static void __efx_reconfigure_port(struct efx_nic *efx) 556void __efx_reconfigure_port(struct efx_nic *efx)
581{ 557{
582 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 558 WARN_ON(!mutex_is_locked(&efx->mac_lock));
583 559
584 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", 560 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
585 raw_smp_processor_id()); 561 raw_smp_processor_id());
586 562
563 /* Serialise the promiscuous flag with efx_set_multicast_list. */
564 if (efx_dev_registered(efx)) {
565 netif_addr_lock_bh(efx->net_dev);
566 netif_addr_unlock_bh(efx->net_dev);
567 }
568
587 falcon_reconfigure_xmac(efx); 569 falcon_reconfigure_xmac(efx);
588 570
589 /* Inform kernel of loss/gain of carrier */ 571 /* Inform kernel of loss/gain of carrier */
@@ -661,7 +643,8 @@ static int efx_init_port(struct efx_nic *efx)
661 if (rc) 643 if (rc)
662 return rc; 644 return rc;
663 645
664 efx->port_initialized = 1; 646 efx->port_initialized = true;
647 efx->stats_enabled = true;
665 648
666 /* Reconfigure port to program MAC registers */ 649 /* Reconfigure port to program MAC registers */
667 falcon_reconfigure_xmac(efx); 650 falcon_reconfigure_xmac(efx);
@@ -678,7 +661,7 @@ static void efx_start_port(struct efx_nic *efx)
678 BUG_ON(efx->port_enabled); 661 BUG_ON(efx->port_enabled);
679 662
680 mutex_lock(&efx->mac_lock); 663 mutex_lock(&efx->mac_lock);
681 efx->port_enabled = 1; 664 efx->port_enabled = true;
682 __efx_reconfigure_port(efx); 665 __efx_reconfigure_port(efx);
683 mutex_unlock(&efx->mac_lock); 666 mutex_unlock(&efx->mac_lock);
684} 667}
@@ -692,7 +675,7 @@ static void efx_stop_port(struct efx_nic *efx)
692 EFX_LOG(efx, "stop port\n"); 675 EFX_LOG(efx, "stop port\n");
693 676
694 mutex_lock(&efx->mac_lock); 677 mutex_lock(&efx->mac_lock);
695 efx->port_enabled = 0; 678 efx->port_enabled = false;
696 mutex_unlock(&efx->mac_lock); 679 mutex_unlock(&efx->mac_lock);
697 680
698 /* Serialise against efx_set_multicast_list() */ 681 /* Serialise against efx_set_multicast_list() */
@@ -710,9 +693,9 @@ static void efx_fini_port(struct efx_nic *efx)
710 return; 693 return;
711 694
712 falcon_fini_xmac(efx); 695 falcon_fini_xmac(efx);
713 efx->port_initialized = 0; 696 efx->port_initialized = false;
714 697
715 efx->link_up = 0; 698 efx->link_up = false;
716 efx_link_status_changed(efx); 699 efx_link_status_changed(efx);
717} 700}
718 701
@@ -797,7 +780,7 @@ static int efx_init_io(struct efx_nic *efx)
797 return 0; 780 return 0;
798 781
799 fail4: 782 fail4:
800 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 783 pci_release_region(efx->pci_dev, efx->type->mem_bar);
801 fail3: 784 fail3:
802 efx->membase_phys = 0; 785 efx->membase_phys = 0;
803 fail2: 786 fail2:
@@ -823,53 +806,61 @@ static void efx_fini_io(struct efx_nic *efx)
823 pci_disable_device(efx->pci_dev); 806 pci_disable_device(efx->pci_dev);
824} 807}
825 808
826/* Probe the number and type of interrupts we are able to obtain. */ 809/* Get number of RX queues wanted. Return number of online CPU
810 * packages in the expectation that an IRQ balancer will spread
811 * interrupts across them. */
812static int efx_wanted_rx_queues(void)
813{
814 cpumask_t core_mask;
815 int count;
816 int cpu;
817
818 cpus_clear(core_mask);
819 count = 0;
820 for_each_online_cpu(cpu) {
821 if (!cpu_isset(cpu, core_mask)) {
822 ++count;
823 cpus_or(core_mask, core_mask,
824 topology_core_siblings(cpu));
825 }
826 }
827
828 return count;
829}
830
831/* Probe the number and type of interrupts we are able to obtain, and
832 * the resulting numbers of channels and RX queues.
833 */
827static void efx_probe_interrupts(struct efx_nic *efx) 834static void efx_probe_interrupts(struct efx_nic *efx)
828{ 835{
829 int max_channel = efx->type->phys_addr_channels - 1; 836 int max_channels =
830 struct msix_entry xentries[EFX_MAX_CHANNELS]; 837 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
831 int rc, i; 838 int rc, i;
832 839
833 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
834 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX)); 841 struct msix_entry xentries[EFX_MAX_CHANNELS];
835 842 int wanted_ints;
836 if (rss_cpus == 0) {
837 cpumask_t core_mask;
838 int cpu;
839
840 cpus_clear(core_mask);
841 efx->rss_queues = 0;
842 for_each_online_cpu(cpu) {
843 if (!cpu_isset(cpu, core_mask)) {
844 ++efx->rss_queues;
845 cpus_or(core_mask, core_mask,
846 topology_core_siblings(cpu));
847 }
848 }
849 } else {
850 efx->rss_queues = rss_cpus;
851 }
852 843
853 efx->rss_queues = min(efx->rss_queues, max_channel + 1); 844 /* We want one RX queue and interrupt per CPU package
854 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS); 845 * (or as specified by the rss_cpus module parameter).
846 * We will need one channel per interrupt.
847 */
848 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
849 efx->n_rx_queues = min(wanted_ints, max_channels);
855 850
856 /* Request maximum number of MSI interrupts, and fill out 851 for (i = 0; i < efx->n_rx_queues; i++)
857 * the channel interrupt information the allowed allocation */
858 for (i = 0; i < efx->rss_queues; i++)
859 xentries[i].entry = i; 852 xentries[i].entry = i;
860 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues); 853 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
861 if (rc > 0) { 854 if (rc > 0) {
862 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues); 855 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
863 efx->rss_queues = rc; 856 efx->n_rx_queues = rc;
864 rc = pci_enable_msix(efx->pci_dev, xentries, 857 rc = pci_enable_msix(efx->pci_dev, xentries,
865 efx->rss_queues); 858 efx->n_rx_queues);
866 } 859 }
867 860
868 if (rc == 0) { 861 if (rc == 0) {
869 for (i = 0; i < efx->rss_queues; i++) { 862 for (i = 0; i < efx->n_rx_queues; i++)
870 efx->channel[i].has_interrupt = 1;
871 efx->channel[i].irq = xentries[i].vector; 863 efx->channel[i].irq = xentries[i].vector;
872 }
873 } else { 864 } else {
874 /* Fall back to single channel MSI */ 865 /* Fall back to single channel MSI */
875 efx->interrupt_mode = EFX_INT_MODE_MSI; 866 efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -879,11 +870,10 @@ static void efx_probe_interrupts(struct efx_nic *efx)
879 870
880 /* Try single interrupt MSI */ 871 /* Try single interrupt MSI */
881 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
882 efx->rss_queues = 1; 873 efx->n_rx_queues = 1;
883 rc = pci_enable_msi(efx->pci_dev); 874 rc = pci_enable_msi(efx->pci_dev);
884 if (rc == 0) { 875 if (rc == 0) {
885 efx->channel[0].irq = efx->pci_dev->irq; 876 efx->channel[0].irq = efx->pci_dev->irq;
886 efx->channel[0].has_interrupt = 1;
887 } else { 877 } else {
888 EFX_ERR(efx, "could not enable MSI\n"); 878 EFX_ERR(efx, "could not enable MSI\n");
889 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 879 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
@@ -892,10 +882,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
892 882
893 /* Assume legacy interrupts */ 883 /* Assume legacy interrupts */
894 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 884 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
895 efx->rss_queues = 1; 885 efx->n_rx_queues = 1;
896 /* Every channel is interruptible */
897 for (i = 0; i < EFX_MAX_CHANNELS; i++)
898 efx->channel[i].has_interrupt = 1;
899 efx->legacy_irq = efx->pci_dev->irq; 886 efx->legacy_irq = efx->pci_dev->irq;
900 } 887 }
901} 888}
@@ -905,7 +892,7 @@ static void efx_remove_interrupts(struct efx_nic *efx)
905 struct efx_channel *channel; 892 struct efx_channel *channel;
906 893
907 /* Remove MSI/MSI-X interrupts */ 894 /* Remove MSI/MSI-X interrupts */
908 efx_for_each_channel_with_interrupt(channel, efx) 895 efx_for_each_channel(channel, efx)
909 channel->irq = 0; 896 channel->irq = 0;
910 pci_disable_msi(efx->pci_dev); 897 pci_disable_msi(efx->pci_dev);
911 pci_disable_msix(efx->pci_dev); 898 pci_disable_msix(efx->pci_dev);
@@ -914,45 +901,22 @@ static void efx_remove_interrupts(struct efx_nic *efx)
914 efx->legacy_irq = 0; 901 efx->legacy_irq = 0;
915} 902}
916 903
917/* Select number of used resources 904static void efx_set_channels(struct efx_nic *efx)
918 * Should be called after probe_interrupts()
919 */
920static void efx_select_used(struct efx_nic *efx)
921{ 905{
922 struct efx_tx_queue *tx_queue; 906 struct efx_tx_queue *tx_queue;
923 struct efx_rx_queue *rx_queue; 907 struct efx_rx_queue *rx_queue;
924 int i;
925 908
926 /* TX queues. One per port per channel with TX capability 909 efx_for_each_tx_queue(tx_queue, efx) {
927 * (more than one per port won't work on Linux, due to out 910 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
928 * of order issues... but will be fine on Solaris) 911 tx_queue->channel = &efx->channel[1];
929 */ 912 else
930 tx_queue = &efx->tx_queue[0]; 913 tx_queue->channel = &efx->channel[0];
931 914 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 /* Perform this for each channel with TX capabilities. 915 }
933 * At the moment, we only support a single TX queue
934 */
935 tx_queue->used = 1;
936 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
937 tx_queue->channel = &efx->channel[1];
938 else
939 tx_queue->channel = &efx->channel[0];
940 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
941 tx_queue++;
942
943 /* RX queues. Each has a dedicated channel. */
944 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
945 rx_queue = &efx->rx_queue[i];
946 916
947 if (i < efx->rss_queues) { 917 efx_for_each_rx_queue(rx_queue, efx) {
948 rx_queue->used = 1; 918 rx_queue->channel = &efx->channel[rx_queue->queue];
949 /* If we allow multiple RX queues per channel 919 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
950 * we need to decide that here
951 */
952 rx_queue->channel = &efx->channel[rx_queue->queue];
953 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
954 rx_queue++;
955 }
956 } 920 }
957} 921}
958 922
@@ -971,8 +935,7 @@ static int efx_probe_nic(struct efx_nic *efx)
971 * in MSI-X interrupts. */ 935 * in MSI-X interrupts. */
972 efx_probe_interrupts(efx); 936 efx_probe_interrupts(efx);
973 937
974 /* Determine number of RX queues and TX queues */ 938 efx_set_channels(efx);
975 efx_select_used(efx);
976 939
977 /* Initialise the interrupt moderation settings */ 940 /* Initialise the interrupt moderation settings */
978 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec); 941 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
@@ -1058,7 +1021,8 @@ static void efx_start_all(struct efx_nic *efx)
1058 /* Mark the port as enabled so port reconfigurations can start, then 1021 /* Mark the port as enabled so port reconfigurations can start, then
1059 * restart the transmit interface early so the watchdog timer stops */ 1022 * restart the transmit interface early so the watchdog timer stops */
1060 efx_start_port(efx); 1023 efx_start_port(efx);
1061 efx_wake_queue(efx); 1024 if (efx_dev_registered(efx))
1025 efx_wake_queue(efx);
1062 1026
1063 efx_for_each_channel(channel, efx) 1027 efx_for_each_channel(channel, efx)
1064 efx_start_channel(channel); 1028 efx_start_channel(channel);
@@ -1109,7 +1073,7 @@ static void efx_stop_all(struct efx_nic *efx)
1109 falcon_disable_interrupts(efx); 1073 falcon_disable_interrupts(efx);
1110 if (efx->legacy_irq) 1074 if (efx->legacy_irq)
1111 synchronize_irq(efx->legacy_irq); 1075 synchronize_irq(efx->legacy_irq);
1112 efx_for_each_channel_with_interrupt(channel, efx) { 1076 efx_for_each_channel(channel, efx) {
1113 if (channel->irq) 1077 if (channel->irq)
1114 synchronize_irq(channel->irq); 1078 synchronize_irq(channel->irq);
1115 } 1079 }
@@ -1128,13 +1092,12 @@ static void efx_stop_all(struct efx_nic *efx)
1128 1092
1129 /* Isolate the MAC from the TX and RX engines, so that queue 1093 /* Isolate the MAC from the TX and RX engines, so that queue
1130 * flushes will complete in a timely fashion. */ 1094 * flushes will complete in a timely fashion. */
1131 falcon_deconfigure_mac_wrapper(efx);
1132 falcon_drain_tx_fifo(efx); 1095 falcon_drain_tx_fifo(efx);
1133 1096
1134 /* Stop the kernel transmit interface late, so the watchdog 1097 /* Stop the kernel transmit interface late, so the watchdog
1135 * timer isn't ticking over the flush */ 1098 * timer isn't ticking over the flush */
1136 efx_stop_queue(efx);
1137 if (efx_dev_registered(efx)) { 1099 if (efx_dev_registered(efx)) {
1100 efx_stop_queue(efx);
1138 netif_tx_lock_bh(efx->net_dev); 1101 netif_tx_lock_bh(efx->net_dev);
1139 netif_tx_unlock_bh(efx->net_dev); 1102 netif_tx_unlock_bh(efx->net_dev);
1140 } 1103 }
@@ -1151,24 +1114,16 @@ static void efx_remove_all(struct efx_nic *efx)
1151} 1114}
1152 1115
1153/* A convinience function to safely flush all the queues */ 1116/* A convinience function to safely flush all the queues */
1154int efx_flush_queues(struct efx_nic *efx) 1117void efx_flush_queues(struct efx_nic *efx)
1155{ 1118{
1156 int rc;
1157
1158 EFX_ASSERT_RESET_SERIALISED(efx); 1119 EFX_ASSERT_RESET_SERIALISED(efx);
1159 1120
1160 efx_stop_all(efx); 1121 efx_stop_all(efx);
1161 1122
1162 efx_fini_channels(efx); 1123 efx_fini_channels(efx);
1163 rc = efx_init_channels(efx); 1124 efx_init_channels(efx);
1164 if (rc) {
1165 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1166 return rc;
1167 }
1168 1125
1169 efx_start_all(efx); 1126 efx_start_all(efx);
1170
1171 return 0;
1172} 1127}
1173 1128
1174/************************************************************************** 1129/**************************************************************************
@@ -1249,7 +1204,7 @@ static void efx_monitor(struct work_struct *data)
1249 */ 1204 */
1250static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1205static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1251{ 1206{
1252 struct efx_nic *efx = net_dev->priv; 1207 struct efx_nic *efx = netdev_priv(net_dev);
1253 1208
1254 EFX_ASSERT_RESET_SERIALISED(efx); 1209 EFX_ASSERT_RESET_SERIALISED(efx);
1255 1210
@@ -1303,10 +1258,10 @@ static void efx_fini_napi(struct efx_nic *efx)
1303 */ 1258 */
1304static void efx_netpoll(struct net_device *net_dev) 1259static void efx_netpoll(struct net_device *net_dev)
1305{ 1260{
1306 struct efx_nic *efx = net_dev->priv; 1261 struct efx_nic *efx = netdev_priv(net_dev);
1307 struct efx_channel *channel; 1262 struct efx_channel *channel;
1308 1263
1309 efx_for_each_channel_with_interrupt(channel, efx) 1264 efx_for_each_channel(channel, efx)
1310 efx_schedule_channel(channel); 1265 efx_schedule_channel(channel);
1311} 1266}
1312 1267
@@ -1321,12 +1276,15 @@ static void efx_netpoll(struct net_device *net_dev)
1321/* Context: process, rtnl_lock() held. */ 1276/* Context: process, rtnl_lock() held. */
1322static int efx_net_open(struct net_device *net_dev) 1277static int efx_net_open(struct net_device *net_dev)
1323{ 1278{
1324 struct efx_nic *efx = net_dev->priv; 1279 struct efx_nic *efx = netdev_priv(net_dev);
1325 EFX_ASSERT_RESET_SERIALISED(efx); 1280 EFX_ASSERT_RESET_SERIALISED(efx);
1326 1281
1327 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1282 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1328 raw_smp_processor_id()); 1283 raw_smp_processor_id());
1329 1284
1285 if (efx->phy_mode & PHY_MODE_SPECIAL)
1286 return -EBUSY;
1287
1330 efx_start_all(efx); 1288 efx_start_all(efx);
1331 return 0; 1289 return 0;
1332} 1290}
@@ -1337,8 +1295,7 @@ static int efx_net_open(struct net_device *net_dev)
1337 */ 1295 */
1338static int efx_net_stop(struct net_device *net_dev) 1296static int efx_net_stop(struct net_device *net_dev)
1339{ 1297{
1340 struct efx_nic *efx = net_dev->priv; 1298 struct efx_nic *efx = netdev_priv(net_dev);
1341 int rc;
1342 1299
1343 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1300 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1344 raw_smp_processor_id()); 1301 raw_smp_processor_id());
@@ -1346,9 +1303,7 @@ static int efx_net_stop(struct net_device *net_dev)
1346 /* Stop the device and flush all the channels */ 1303 /* Stop the device and flush all the channels */
1347 efx_stop_all(efx); 1304 efx_stop_all(efx);
1348 efx_fini_channels(efx); 1305 efx_fini_channels(efx);
1349 rc = efx_init_channels(efx); 1306 efx_init_channels(efx);
1350 if (rc)
1351 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1352 1307
1353 return 0; 1308 return 0;
1354} 1309}
@@ -1356,7 +1311,7 @@ static int efx_net_stop(struct net_device *net_dev)
1356/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1311/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1357static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1312static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1358{ 1313{
1359 struct efx_nic *efx = net_dev->priv; 1314 struct efx_nic *efx = netdev_priv(net_dev);
1360 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1315 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1361 struct net_device_stats *stats = &net_dev->stats; 1316 struct net_device_stats *stats = &net_dev->stats;
1362 1317
@@ -1366,7 +1321,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1366 */ 1321 */
1367 if (!spin_trylock(&efx->stats_lock)) 1322 if (!spin_trylock(&efx->stats_lock))
1368 return stats; 1323 return stats;
1369 if (efx->state == STATE_RUNNING) { 1324 if (efx->stats_enabled) {
1370 falcon_update_stats_xmac(efx); 1325 falcon_update_stats_xmac(efx);
1371 falcon_update_nic_stats(efx); 1326 falcon_update_nic_stats(efx);
1372 } 1327 }
@@ -1403,7 +1358,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1403/* Context: netif_tx_lock held, BHs disabled. */ 1358/* Context: netif_tx_lock held, BHs disabled. */
1404static void efx_watchdog(struct net_device *net_dev) 1359static void efx_watchdog(struct net_device *net_dev)
1405{ 1360{
1406 struct efx_nic *efx = net_dev->priv; 1361 struct efx_nic *efx = netdev_priv(net_dev);
1407 1362
1408 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n", 1363 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1409 atomic_read(&efx->netif_stop_count), efx->port_enabled, 1364 atomic_read(&efx->netif_stop_count), efx->port_enabled,
@@ -1417,7 +1372,7 @@ static void efx_watchdog(struct net_device *net_dev)
1417/* Context: process, rtnl_lock() held. */ 1372/* Context: process, rtnl_lock() held. */
1418static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1373static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1419{ 1374{
1420 struct efx_nic *efx = net_dev->priv; 1375 struct efx_nic *efx = netdev_priv(net_dev);
1421 int rc = 0; 1376 int rc = 0;
1422 1377
1423 EFX_ASSERT_RESET_SERIALISED(efx); 1378 EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1431,21 +1386,15 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1431 1386
1432 efx_fini_channels(efx); 1387 efx_fini_channels(efx);
1433 net_dev->mtu = new_mtu; 1388 net_dev->mtu = new_mtu;
1434 rc = efx_init_channels(efx); 1389 efx_init_channels(efx);
1435 if (rc)
1436 goto fail;
1437 1390
1438 efx_start_all(efx); 1391 efx_start_all(efx);
1439 return rc; 1392 return rc;
1440
1441 fail:
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1443 return rc;
1444} 1393}
1445 1394
1446static int efx_set_mac_address(struct net_device *net_dev, void *data) 1395static int efx_set_mac_address(struct net_device *net_dev, void *data)
1447{ 1396{
1448 struct efx_nic *efx = net_dev->priv; 1397 struct efx_nic *efx = netdev_priv(net_dev);
1449 struct sockaddr *addr = data; 1398 struct sockaddr *addr = data;
1450 char *new_addr = addr->sa_data; 1399 char *new_addr = addr->sa_data;
1451 1400
@@ -1466,26 +1415,19 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1466 return 0; 1415 return 0;
1467} 1416}
1468 1417
1469/* Context: netif_tx_lock held, BHs disabled. */ 1418/* Context: netif_addr_lock held, BHs disabled. */
1470static void efx_set_multicast_list(struct net_device *net_dev) 1419static void efx_set_multicast_list(struct net_device *net_dev)
1471{ 1420{
1472 struct efx_nic *efx = net_dev->priv; 1421 struct efx_nic *efx = netdev_priv(net_dev);
1473 struct dev_mc_list *mc_list = net_dev->mc_list; 1422 struct dev_mc_list *mc_list = net_dev->mc_list;
1474 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1423 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1475 int promiscuous; 1424 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1425 bool changed = (efx->promiscuous != promiscuous);
1476 u32 crc; 1426 u32 crc;
1477 int bit; 1427 int bit;
1478 int i; 1428 int i;
1479 1429
1480 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */ 1430 efx->promiscuous = promiscuous;
1481 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1482 if (efx->promiscuous != promiscuous) {
1483 efx->promiscuous = promiscuous;
1484 /* Close the window between efx_stop_port() and efx_flush_all()
1485 * by only queuing work when the port is enabled. */
1486 if (efx->port_enabled)
1487 queue_work(efx->workqueue, &efx->reconfigure_work);
1488 }
1489 1431
1490 /* Build multicast hash table */ 1432 /* Build multicast hash table */
1491 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1433 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
@@ -1500,6 +1442,13 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1500 } 1442 }
1501 } 1443 }
1502 1444
1445 if (!efx->port_enabled)
1446 /* Delay pushing settings until efx_start_port() */
1447 return;
1448
1449 if (changed)
1450 queue_work(efx->workqueue, &efx->reconfigure_work);
1451
1503 /* Create and activate new global multicast hash table */ 1452 /* Create and activate new global multicast hash table */
1504 falcon_set_multicast_hash(efx); 1453 falcon_set_multicast_hash(efx);
1505} 1454}
@@ -1510,7 +1459,7 @@ static int efx_netdev_event(struct notifier_block *this,
1510 struct net_device *net_dev = ptr; 1459 struct net_device *net_dev = ptr;
1511 1460
1512 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1461 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1513 struct efx_nic *efx = net_dev->priv; 1462 struct efx_nic *efx = netdev_priv(net_dev);
1514 1463
1515 strcpy(efx->name, net_dev->name); 1464 strcpy(efx->name, net_dev->name);
1516 } 1465 }
@@ -1568,7 +1517,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1568 if (!efx->net_dev) 1517 if (!efx->net_dev)
1569 return; 1518 return;
1570 1519
1571 BUG_ON(efx->net_dev->priv != efx); 1520 BUG_ON(netdev_priv(efx->net_dev) != efx);
1572 1521
1573 /* Free up any skbs still remaining. This has to happen before 1522 /* Free up any skbs still remaining. This has to happen before
1574 * we try to unregister the netdev as running their destructors 1523 * we try to unregister the netdev as running their destructors
@@ -1588,49 +1537,60 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1588 * 1537 *
1589 **************************************************************************/ 1538 **************************************************************************/
1590 1539
1591/* The final hardware and software finalisation before reset. */ 1540/* Tears down the entire software state and most of the hardware state
1592static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1541 * before reset. */
1542void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1593{ 1543{
1594 int rc; 1544 int rc;
1595 1545
1596 EFX_ASSERT_RESET_SERIALISED(efx); 1546 EFX_ASSERT_RESET_SERIALISED(efx);
1597 1547
1548 /* The net_dev->get_stats handler is quite slow, and will fail
1549 * if a fetch is pending over reset. Serialise against it. */
1550 spin_lock(&efx->stats_lock);
1551 efx->stats_enabled = false;
1552 spin_unlock(&efx->stats_lock);
1553
1554 efx_stop_all(efx);
1555 mutex_lock(&efx->mac_lock);
1556
1598 rc = falcon_xmac_get_settings(efx, ecmd); 1557 rc = falcon_xmac_get_settings(efx, ecmd);
1599 if (rc) { 1558 if (rc)
1600 EFX_ERR(efx, "could not back up PHY settings\n"); 1559 EFX_ERR(efx, "could not back up PHY settings\n");
1601 goto fail;
1602 }
1603 1560
1604 efx_fini_channels(efx); 1561 efx_fini_channels(efx);
1605 return 0;
1606
1607 fail:
1608 return rc;
1609} 1562}
1610 1563
1611/* The first part of software initialisation after a hardware reset 1564/* This function will always ensure that the locks acquired in
1612 * This function does not handle serialisation with the kernel, it 1565 * efx_reset_down() are released. A failure return code indicates
1613 * assumes the caller has done this */ 1566 * that we were unable to reinitialise the hardware, and the
1614static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1567 * driver should be disabled. If ok is false, then the rx and tx
1568 * engines are not restarted, pending a RESET_DISABLE. */
1569int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1615{ 1570{
1616 int rc; 1571 int rc;
1617 1572
1618 rc = efx_init_channels(efx); 1573 EFX_ASSERT_RESET_SERIALISED(efx);
1619 if (rc)
1620 goto fail1;
1621 1574
1622 /* Restore MAC and PHY settings. */ 1575 rc = falcon_init_nic(efx);
1623 rc = falcon_xmac_set_settings(efx, ecmd);
1624 if (rc) { 1576 if (rc) {
1625 EFX_ERR(efx, "could not restore PHY settings\n"); 1577 EFX_ERR(efx, "failed to initialise NIC\n");
1626 goto fail2; 1578 ok = false;
1627 } 1579 }
1628 1580
1629 return 0; 1581 if (ok) {
1582 efx_init_channels(efx);
1630 1583
1631 fail2: 1584 if (falcon_xmac_set_settings(efx, ecmd))
1632 efx_fini_channels(efx); 1585 EFX_ERR(efx, "could not restore PHY settings\n");
1633 fail1: 1586 }
1587
1588 mutex_unlock(&efx->mac_lock);
1589
1590 if (ok) {
1591 efx_start_all(efx);
1592 efx->stats_enabled = true;
1593 }
1634 return rc; 1594 return rc;
1635} 1595}
1636 1596
@@ -1659,25 +1619,14 @@ static int efx_reset(struct efx_nic *efx)
1659 goto unlock_rtnl; 1619 goto unlock_rtnl;
1660 } 1620 }
1661 1621
1662 efx->state = STATE_RESETTING;
1663 EFX_INFO(efx, "resetting (%d)\n", method); 1622 EFX_INFO(efx, "resetting (%d)\n", method);
1664 1623
1665 /* The net_dev->get_stats handler is quite slow, and will fail 1624 efx_reset_down(efx, &ecmd);
1666 * if a fetch is pending over reset. Serialise against it. */
1667 spin_lock(&efx->stats_lock);
1668 spin_unlock(&efx->stats_lock);
1669
1670 efx_stop_all(efx);
1671 mutex_lock(&efx->mac_lock);
1672
1673 rc = efx_reset_down(efx, &ecmd);
1674 if (rc)
1675 goto fail1;
1676 1625
1677 rc = falcon_reset_hw(efx, method); 1626 rc = falcon_reset_hw(efx, method);
1678 if (rc) { 1627 if (rc) {
1679 EFX_ERR(efx, "failed to reset hardware\n"); 1628 EFX_ERR(efx, "failed to reset hardware\n");
1680 goto fail2; 1629 goto fail;
1681 } 1630 }
1682 1631
1683 /* Allow resets to be rescheduled. */ 1632 /* Allow resets to be rescheduled. */
@@ -1689,46 +1638,27 @@ static int efx_reset(struct efx_nic *efx)
1689 * can respond to requests. */ 1638 * can respond to requests. */
1690 pci_set_master(efx->pci_dev); 1639 pci_set_master(efx->pci_dev);
1691 1640
1692 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1693 * case so the driver can talk to external SRAM */
1694 rc = falcon_init_nic(efx);
1695 if (rc) {
1696 EFX_ERR(efx, "failed to initialise NIC\n");
1697 goto fail3;
1698 }
1699
1700 /* Leave device stopped if necessary */ 1641 /* Leave device stopped if necessary */
1701 if (method == RESET_TYPE_DISABLE) { 1642 if (method == RESET_TYPE_DISABLE) {
1702 /* Reinitialise the device anyway so the driver unload sequence
1703 * can talk to the external SRAM */
1704 falcon_init_nic(efx);
1705 rc = -EIO; 1643 rc = -EIO;
1706 goto fail4; 1644 goto fail;
1707 } 1645 }
1708 1646
1709 rc = efx_reset_up(efx, &ecmd); 1647 rc = efx_reset_up(efx, &ecmd, true);
1710 if (rc) 1648 if (rc)
1711 goto fail5; 1649 goto disable;
1712 1650
1713 mutex_unlock(&efx->mac_lock);
1714 EFX_LOG(efx, "reset complete\n"); 1651 EFX_LOG(efx, "reset complete\n");
1715
1716 efx->state = STATE_RUNNING;
1717 efx_start_all(efx);
1718
1719 unlock_rtnl: 1652 unlock_rtnl:
1720 rtnl_unlock(); 1653 rtnl_unlock();
1721 return 0; 1654 return 0;
1722 1655
1723 fail5: 1656 fail:
1724 fail4: 1657 efx_reset_up(efx, &ecmd, false);
1725 fail3: 1658 disable:
1726 fail2:
1727 fail1:
1728 EFX_ERR(efx, "has been disabled\n"); 1659 EFX_ERR(efx, "has been disabled\n");
1729 efx->state = STATE_DISABLED; 1660 efx->state = STATE_DISABLED;
1730 1661
1731 mutex_unlock(&efx->mac_lock);
1732 rtnl_unlock(); 1662 rtnl_unlock();
1733 efx_unregister_netdev(efx); 1663 efx_unregister_netdev(efx);
1734 efx_fini_port(efx); 1664 efx_fini_port(efx);
@@ -1801,7 +1731,7 @@ static struct pci_device_id efx_pci_table[] __devinitdata = {
1801 * 1731 *
1802 * Dummy PHY/MAC/Board operations 1732 * Dummy PHY/MAC/Board operations
1803 * 1733 *
1804 * Can be used where the MAC does not implement this operation 1734 * Can be used for some unimplemented operations
1805 * Needed so all function pointers are valid and do not have to be tested 1735 * Needed so all function pointers are valid and do not have to be tested
1806 * before use 1736 * before use
1807 * 1737 *
@@ -1811,7 +1741,7 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
1811 return 0; 1741 return 0;
1812} 1742}
1813void efx_port_dummy_op_void(struct efx_nic *efx) {} 1743void efx_port_dummy_op_void(struct efx_nic *efx) {}
1814void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {} 1744void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1815 1745
1816static struct efx_phy_operations efx_dummy_phy_operations = { 1746static struct efx_phy_operations efx_dummy_phy_operations = {
1817 .init = efx_port_dummy_op_int, 1747 .init = efx_port_dummy_op_int,
@@ -1819,20 +1749,14 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
1819 .check_hw = efx_port_dummy_op_int, 1749 .check_hw = efx_port_dummy_op_int,
1820 .fini = efx_port_dummy_op_void, 1750 .fini = efx_port_dummy_op_void,
1821 .clear_interrupt = efx_port_dummy_op_void, 1751 .clear_interrupt = efx_port_dummy_op_void,
1822 .reset_xaui = efx_port_dummy_op_void,
1823}; 1752};
1824 1753
1825/* Dummy board operations */
1826static int efx_nic_dummy_op_int(struct efx_nic *nic)
1827{
1828 return 0;
1829}
1830
1831static struct efx_board efx_dummy_board_info = { 1754static struct efx_board efx_dummy_board_info = {
1832 .init = efx_nic_dummy_op_int, 1755 .init = efx_port_dummy_op_int,
1833 .init_leds = efx_port_dummy_op_int, 1756 .init_leds = efx_port_dummy_op_int,
1834 .set_fault_led = efx_port_dummy_op_blink, 1757 .set_fault_led = efx_port_dummy_op_blink,
1835 .fini = efx_port_dummy_op_void, 1758 .blink = efx_port_dummy_op_blink,
1759 .fini = efx_port_dummy_op_void,
1836}; 1760};
1837 1761
1838/************************************************************************** 1762/**************************************************************************
@@ -1865,7 +1789,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1865 efx->board_info = efx_dummy_board_info; 1789 efx->board_info = efx_dummy_board_info;
1866 1790
1867 efx->net_dev = net_dev; 1791 efx->net_dev = net_dev;
1868 efx->rx_checksum_enabled = 1; 1792 efx->rx_checksum_enabled = true;
1869 spin_lock_init(&efx->netif_stop_lock); 1793 spin_lock_init(&efx->netif_stop_lock);
1870 spin_lock_init(&efx->stats_lock); 1794 spin_lock_init(&efx->stats_lock);
1871 mutex_init(&efx->mac_lock); 1795 mutex_init(&efx->mac_lock);
@@ -1878,10 +1802,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1878 channel = &efx->channel[i]; 1802 channel = &efx->channel[i];
1879 channel->efx = efx; 1803 channel->efx = efx;
1880 channel->channel = i; 1804 channel->channel = i;
1881 channel->evqnum = i; 1805 channel->work_pending = false;
1882 channel->work_pending = 0;
1883 } 1806 }
1884 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { 1807 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1885 tx_queue = &efx->tx_queue[i]; 1808 tx_queue = &efx->tx_queue[i];
1886 tx_queue->efx = efx; 1809 tx_queue->efx = efx;
1887 tx_queue->queue = i; 1810 tx_queue->queue = i;
@@ -2056,19 +1979,16 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2056 goto fail5; 1979 goto fail5;
2057 } 1980 }
2058 1981
2059 rc = efx_init_channels(efx); 1982 efx_init_channels(efx);
2060 if (rc)
2061 goto fail6;
2062 1983
2063 rc = falcon_init_interrupt(efx); 1984 rc = falcon_init_interrupt(efx);
2064 if (rc) 1985 if (rc)
2065 goto fail7; 1986 goto fail6;
2066 1987
2067 return 0; 1988 return 0;
2068 1989
2069 fail7:
2070 efx_fini_channels(efx);
2071 fail6: 1990 fail6:
1991 efx_fini_channels(efx);
2072 efx_fini_port(efx); 1992 efx_fini_port(efx);
2073 fail5: 1993 fail5:
2074 fail4: 1994 fail4:
@@ -2105,7 +2025,10 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2105 NETIF_F_HIGHDMA | NETIF_F_TSO); 2025 NETIF_F_HIGHDMA | NETIF_F_TSO);
2106 if (lro) 2026 if (lro)
2107 net_dev->features |= NETIF_F_LRO; 2027 net_dev->features |= NETIF_F_LRO;
2108 efx = net_dev->priv; 2028 /* Mask for features that also apply to VLAN devices */
2029 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2030 NETIF_F_HIGHDMA | NETIF_F_TSO);
2031 efx = netdev_priv(net_dev);
2109 pci_set_drvdata(pci_dev, efx); 2032 pci_set_drvdata(pci_dev, efx);
2110 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2033 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2111 if (rc) 2034 if (rc)
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 3b2f69f4a9ab..d02937b70eee 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -28,15 +28,21 @@ extern void efx_wake_queue(struct efx_nic *efx);
28/* RX */ 28/* RX */
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
31 unsigned int len, int checksummed, int discard); 31 unsigned int len, bool checksummed, bool discard);
32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
33 33
34/* Channels */ 34/* Channels */
35extern void efx_process_channel_now(struct efx_channel *channel); 35extern void efx_process_channel_now(struct efx_channel *channel);
36extern int efx_flush_queues(struct efx_nic *efx); 36extern void efx_flush_queues(struct efx_nic *efx);
37 37
38/* Ports */ 38/* Ports */
39extern void efx_reconfigure_port(struct efx_nic *efx); 39extern void efx_reconfigure_port(struct efx_nic *efx);
40extern void __efx_reconfigure_port(struct efx_nic *efx);
41
42/* Reset handling */
43extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
44extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
45 bool ok);
40 46
41/* Global */ 47/* Global */
42extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 48extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -50,7 +56,7 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
50/* Dummy PHY ops for PHY drivers */ 56/* Dummy PHY ops for PHY drivers */
51extern int efx_port_dummy_op_int(struct efx_nic *efx); 57extern int efx_port_dummy_op_int(struct efx_nic *efx);
52extern void efx_port_dummy_op_void(struct efx_nic *efx); 58extern void efx_port_dummy_op_void(struct efx_nic *efx);
53extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink); 59extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
54 60
55 61
56extern unsigned int efx_monitor_interval; 62extern unsigned int efx_monitor_interval;
@@ -59,7 +65,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
59{ 65{
60 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", 66 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
61 channel->channel, raw_smp_processor_id()); 67 channel->channel, raw_smp_processor_id());
62 channel->work_pending = 1; 68 channel->work_pending = true;
63 69
64 netif_rx_schedule(channel->napi_dev, &channel->napi_str); 70 netif_rx_schedule(channel->napi_dev, &channel->napi_str);
65} 71}
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index c53290d08e2b..cec15dbb88e4 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -52,12 +52,11 @@ extern const char *efx_loopback_mode_names[];
52#define LOOPBACK_MASK(_efx) \ 52#define LOOPBACK_MASK(_efx) \
53 (1 << (_efx)->loopback_mode) 53 (1 << (_efx)->loopback_mode)
54 54
55#define LOOPBACK_INTERNAL(_efx) \ 55#define LOOPBACK_INTERNAL(_efx) \
56 ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) 56 (!!(LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)))
57 57
58#define LOOPBACK_OUT_OF(_from, _to, _mask) \ 58#define LOOPBACK_OUT_OF(_from, _to, _mask) \
59 (((LOOPBACK_MASK(_from) & (_mask)) && \ 59 ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
60 ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
61 60
62/*****************************************************************************/ 61/*****************************************************************************/
63 62
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index e2c75d101610..fa98af58223e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -17,6 +17,7 @@
17#include "ethtool.h" 17#include "ethtool.h"
18#include "falcon.h" 18#include "falcon.h"
19#include "gmii.h" 19#include "gmii.h"
20#include "spi.h"
20#include "mac.h" 21#include "mac.h"
21 22
22const char *efx_loopback_mode_names[] = { 23const char *efx_loopback_mode_names[] = {
@@ -32,8 +33,6 @@ const char *efx_loopback_mode_names[] = {
32 [LOOPBACK_NETWORK] = "NETWORK", 33 [LOOPBACK_NETWORK] = "NETWORK",
33}; 34};
34 35
35static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
36
37struct ethtool_string { 36struct ethtool_string {
38 char name[ETH_GSTRING_LEN]; 37 char name[ETH_GSTRING_LEN];
39}; 38};
@@ -173,6 +172,11 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
173/* Number of ethtool statistics */ 172/* Number of ethtool statistics */
174#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) 173#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
175 174
175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x100U
178#define EFX_ETHTOOL_EEPROM_MAX 0x400U
179
176/************************************************************************** 180/**************************************************************************
177 * 181 *
178 * Ethtool operations 182 * Ethtool operations
@@ -183,7 +187,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
183/* Identify device by flashing LEDs */ 187/* Identify device by flashing LEDs */
184static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds) 188static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
185{ 189{
186 struct efx_nic *efx = net_dev->priv; 190 struct efx_nic *efx = netdev_priv(net_dev);
187 191
188 efx->board_info.blink(efx, 1); 192 efx->board_info.blink(efx, 1);
189 schedule_timeout_interruptible(seconds * HZ); 193 schedule_timeout_interruptible(seconds * HZ);
@@ -195,7 +199,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
195int efx_ethtool_get_settings(struct net_device *net_dev, 199int efx_ethtool_get_settings(struct net_device *net_dev,
196 struct ethtool_cmd *ecmd) 200 struct ethtool_cmd *ecmd)
197{ 201{
198 struct efx_nic *efx = net_dev->priv; 202 struct efx_nic *efx = netdev_priv(net_dev);
199 int rc; 203 int rc;
200 204
201 mutex_lock(&efx->mac_lock); 205 mutex_lock(&efx->mac_lock);
@@ -209,7 +213,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
209int efx_ethtool_set_settings(struct net_device *net_dev, 213int efx_ethtool_set_settings(struct net_device *net_dev,
210 struct ethtool_cmd *ecmd) 214 struct ethtool_cmd *ecmd)
211{ 215{
212 struct efx_nic *efx = net_dev->priv; 216 struct efx_nic *efx = netdev_priv(net_dev);
213 int rc; 217 int rc;
214 218
215 mutex_lock(&efx->mac_lock); 219 mutex_lock(&efx->mac_lock);
@@ -224,7 +228,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
224static void efx_ethtool_get_drvinfo(struct net_device *net_dev, 228static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
225 struct ethtool_drvinfo *info) 229 struct ethtool_drvinfo *info)
226{ 230{
227 struct efx_nic *efx = net_dev->priv; 231 struct efx_nic *efx = netdev_priv(net_dev);
228 232
229 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); 233 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
230 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 234 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
@@ -329,7 +333,10 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
329 unsigned int n = 0; 333 unsigned int n = 0;
330 enum efx_loopback_mode mode; 334 enum efx_loopback_mode mode;
331 335
332 /* Interrupt */ 336 efx_fill_test(n++, strings, data, &tests->mii,
337 "core", 0, "mii", NULL);
338 efx_fill_test(n++, strings, data, &tests->nvram,
339 "core", 0, "nvram", NULL);
333 efx_fill_test(n++, strings, data, &tests->interrupt, 340 efx_fill_test(n++, strings, data, &tests->interrupt,
334 "core", 0, "interrupt", NULL); 341 "core", 0, "interrupt", NULL);
335 342
@@ -349,16 +356,17 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
349 "eventq.poll", NULL); 356 "eventq.poll", NULL);
350 } 357 }
351 358
352 /* PHY presence */ 359 efx_fill_test(n++, strings, data, &tests->registers,
353 efx_fill_test(n++, strings, data, &tests->phy_ok, 360 "core", 0, "registers", NULL);
354 EFX_PORT_NAME, "phy_ok", NULL); 361 efx_fill_test(n++, strings, data, &tests->phy,
362 EFX_PORT_NAME, "phy", NULL);
355 363
356 /* Loopback tests */ 364 /* Loopback tests */
357 efx_fill_test(n++, strings, data, &tests->loopback_speed, 365 efx_fill_test(n++, strings, data, &tests->loopback_speed,
358 EFX_PORT_NAME, "loopback.speed", NULL); 366 EFX_PORT_NAME, "loopback.speed", NULL);
359 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, 367 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
360 EFX_PORT_NAME, "loopback.full_duplex", NULL); 368 EFX_PORT_NAME, "loopback.full_duplex", NULL);
361 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 369 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
362 if (!(efx->loopback_modes & (1 << mode))) 370 if (!(efx->loopback_modes & (1 << mode)))
363 continue; 371 continue;
364 n = efx_fill_loopback_test(efx, 372 n = efx_fill_loopback_test(efx,
@@ -369,22 +377,24 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
369 return n; 377 return n;
370} 378}
371 379
372static int efx_ethtool_get_stats_count(struct net_device *net_dev) 380static int efx_ethtool_get_sset_count(struct net_device *net_dev,
381 int string_set)
373{ 382{
374 return EFX_ETHTOOL_NUM_STATS; 383 switch (string_set) {
375} 384 case ETH_SS_STATS:
376 385 return EFX_ETHTOOL_NUM_STATS;
377static int efx_ethtool_self_test_count(struct net_device *net_dev) 386 case ETH_SS_TEST:
378{ 387 return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
379 struct efx_nic *efx = net_dev->priv; 388 NULL, NULL, NULL);
380 389 default:
381 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); 390 return -EINVAL;
391 }
382} 392}
383 393
384static void efx_ethtool_get_strings(struct net_device *net_dev, 394static void efx_ethtool_get_strings(struct net_device *net_dev,
385 u32 string_set, u8 *strings) 395 u32 string_set, u8 *strings)
386{ 396{
387 struct efx_nic *efx = net_dev->priv; 397 struct efx_nic *efx = netdev_priv(net_dev);
388 struct ethtool_string *ethtool_strings = 398 struct ethtool_string *ethtool_strings =
389 (struct ethtool_string *)strings; 399 (struct ethtool_string *)strings;
390 int i; 400 int i;
@@ -410,7 +420,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
410 struct ethtool_stats *stats, 420 struct ethtool_stats *stats,
411 u64 *data) 421 u64 *data)
412{ 422{
413 struct efx_nic *efx = net_dev->priv; 423 struct efx_nic *efx = netdev_priv(net_dev);
414 struct efx_mac_stats *mac_stats = &efx->mac_stats; 424 struct efx_mac_stats *mac_stats = &efx->mac_stats;
415 struct efx_ethtool_stat *stat; 425 struct efx_ethtool_stat *stat;
416 struct efx_channel *channel; 426 struct efx_channel *channel;
@@ -442,60 +452,21 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
442 } 452 }
443} 453}
444 454
445static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
446{
447 int rc;
448
449 /* Our TSO requires TX checksumming, so force TX checksumming
450 * on when TSO is enabled.
451 */
452 if (enable) {
453 rc = efx_ethtool_set_tx_csum(net_dev, 1);
454 if (rc)
455 return rc;
456 }
457
458 return ethtool_op_set_tso(net_dev, enable);
459}
460
461static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
462{
463 struct efx_nic *efx = net_dev->priv;
464 int rc;
465
466 rc = ethtool_op_set_tx_csum(net_dev, enable);
467 if (rc)
468 return rc;
469
470 efx_flush_queues(efx);
471
472 /* Our TSO requires TX checksumming, so disable TSO when
473 * checksumming is disabled
474 */
475 if (!enable) {
476 rc = efx_ethtool_set_tso(net_dev, 0);
477 if (rc)
478 return rc;
479 }
480
481 return 0;
482}
483
484static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) 455static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
485{ 456{
486 struct efx_nic *efx = net_dev->priv; 457 struct efx_nic *efx = netdev_priv(net_dev);
487 458
488 /* No way to stop the hardware doing the checks; we just 459 /* No way to stop the hardware doing the checks; we just
489 * ignore the result. 460 * ignore the result.
490 */ 461 */
491 efx->rx_checksum_enabled = (enable ? 1 : 0); 462 efx->rx_checksum_enabled = !!enable;
492 463
493 return 0; 464 return 0;
494} 465}
495 466
496static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) 467static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
497{ 468{
498 struct efx_nic *efx = net_dev->priv; 469 struct efx_nic *efx = netdev_priv(net_dev);
499 470
500 return efx->rx_checksum_enabled; 471 return efx->rx_checksum_enabled;
501} 472}
@@ -503,7 +474,7 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
503static void efx_ethtool_self_test(struct net_device *net_dev, 474static void efx_ethtool_self_test(struct net_device *net_dev,
504 struct ethtool_test *test, u64 *data) 475 struct ethtool_test *test, u64 *data)
505{ 476{
506 struct efx_nic *efx = net_dev->priv; 477 struct efx_nic *efx = netdev_priv(net_dev);
507 struct efx_self_tests efx_tests; 478 struct efx_self_tests efx_tests;
508 int offline, already_up; 479 int offline, already_up;
509 int rc; 480 int rc;
@@ -533,15 +504,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
533 goto out; 504 goto out;
534 505
535 /* Perform offline tests only if online tests passed */ 506 /* Perform offline tests only if online tests passed */
536 if (offline) { 507 if (offline)
537 /* Stop the kernel from sending packets during the test. */ 508 rc = efx_offline_test(efx, &efx_tests,
538 efx_stop_queue(efx); 509 efx->loopback_modes);
539 rc = efx_flush_queues(efx);
540 if (!rc)
541 rc = efx_offline_test(efx, &efx_tests,
542 efx->loopback_modes);
543 efx_wake_queue(efx);
544 }
545 510
546 out: 511 out:
547 if (!already_up) 512 if (!already_up)
@@ -561,22 +526,65 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
561/* Restart autonegotiation */ 526/* Restart autonegotiation */
562static int efx_ethtool_nway_reset(struct net_device *net_dev) 527static int efx_ethtool_nway_reset(struct net_device *net_dev)
563{ 528{
564 struct efx_nic *efx = net_dev->priv; 529 struct efx_nic *efx = netdev_priv(net_dev);
565 530
566 return mii_nway_restart(&efx->mii); 531 return mii_nway_restart(&efx->mii);
567} 532}
568 533
569static u32 efx_ethtool_get_link(struct net_device *net_dev) 534static u32 efx_ethtool_get_link(struct net_device *net_dev)
570{ 535{
571 struct efx_nic *efx = net_dev->priv; 536 struct efx_nic *efx = netdev_priv(net_dev);
572 537
573 return efx->link_up; 538 return efx->link_up;
574} 539}
575 540
541static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
542{
543 struct efx_nic *efx = netdev_priv(net_dev);
544 struct efx_spi_device *spi = efx->spi_eeprom;
545
546 if (!spi)
547 return 0;
548 return min(spi->size, EFX_ETHTOOL_EEPROM_MAX) -
549 min(spi->size, EFX_ETHTOOL_EEPROM_MIN);
550}
551
552static int efx_ethtool_get_eeprom(struct net_device *net_dev,
553 struct ethtool_eeprom *eeprom, u8 *buf)
554{
555 struct efx_nic *efx = netdev_priv(net_dev);
556 struct efx_spi_device *spi = efx->spi_eeprom;
557 size_t len;
558 int rc;
559
560 rc = falcon_spi_read(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
561 eeprom->len, &len, buf);
562 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
563 eeprom->len = len;
564 return rc;
565}
566
567static int efx_ethtool_set_eeprom(struct net_device *net_dev,
568 struct ethtool_eeprom *eeprom, u8 *buf)
569{
570 struct efx_nic *efx = netdev_priv(net_dev);
571 struct efx_spi_device *spi = efx->spi_eeprom;
572 size_t len;
573 int rc;
574
575 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
576 return -EINVAL;
577
578 rc = falcon_spi_write(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
579 eeprom->len, &len, buf);
580 eeprom->len = len;
581 return rc;
582}
583
576static int efx_ethtool_get_coalesce(struct net_device *net_dev, 584static int efx_ethtool_get_coalesce(struct net_device *net_dev,
577 struct ethtool_coalesce *coalesce) 585 struct ethtool_coalesce *coalesce)
578{ 586{
579 struct efx_nic *efx = net_dev->priv; 587 struct efx_nic *efx = netdev_priv(net_dev);
580 struct efx_tx_queue *tx_queue; 588 struct efx_tx_queue *tx_queue;
581 struct efx_rx_queue *rx_queue; 589 struct efx_rx_queue *rx_queue;
582 struct efx_channel *channel; 590 struct efx_channel *channel;
@@ -614,7 +622,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
614static int efx_ethtool_set_coalesce(struct net_device *net_dev, 622static int efx_ethtool_set_coalesce(struct net_device *net_dev,
615 struct ethtool_coalesce *coalesce) 623 struct ethtool_coalesce *coalesce)
616{ 624{
617 struct efx_nic *efx = net_dev->priv; 625 struct efx_nic *efx = netdev_priv(net_dev);
618 struct efx_channel *channel; 626 struct efx_channel *channel;
619 struct efx_tx_queue *tx_queue; 627 struct efx_tx_queue *tx_queue;
620 unsigned tx_usecs, rx_usecs; 628 unsigned tx_usecs, rx_usecs;
@@ -657,7 +665,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
657static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 665static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
658 struct ethtool_pauseparam *pause) 666 struct ethtool_pauseparam *pause)
659{ 667{
660 struct efx_nic *efx = net_dev->priv; 668 struct efx_nic *efx = netdev_priv(net_dev);
661 enum efx_fc_type flow_control = efx->flow_control; 669 enum efx_fc_type flow_control = efx->flow_control;
662 int rc; 670 int rc;
663 671
@@ -680,11 +688,11 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
680static void efx_ethtool_get_pauseparam(struct net_device *net_dev, 688static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
681 struct ethtool_pauseparam *pause) 689 struct ethtool_pauseparam *pause)
682{ 690{
683 struct efx_nic *efx = net_dev->priv; 691 struct efx_nic *efx = netdev_priv(net_dev);
684 692
685 pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0; 693 pause->rx_pause = !!(efx->flow_control & EFX_FC_RX);
686 pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0; 694 pause->tx_pause = !!(efx->flow_control & EFX_FC_TX);
687 pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0; 695 pause->autoneg = !!(efx->flow_control & EFX_FC_AUTO);
688} 696}
689 697
690 698
@@ -694,6 +702,9 @@ struct ethtool_ops efx_ethtool_ops = {
694 .get_drvinfo = efx_ethtool_get_drvinfo, 702 .get_drvinfo = efx_ethtool_get_drvinfo,
695 .nway_reset = efx_ethtool_nway_reset, 703 .nway_reset = efx_ethtool_nway_reset,
696 .get_link = efx_ethtool_get_link, 704 .get_link = efx_ethtool_get_link,
705 .get_eeprom_len = efx_ethtool_get_eeprom_len,
706 .get_eeprom = efx_ethtool_get_eeprom,
707 .set_eeprom = efx_ethtool_set_eeprom,
697 .get_coalesce = efx_ethtool_get_coalesce, 708 .get_coalesce = efx_ethtool_get_coalesce,
698 .set_coalesce = efx_ethtool_set_coalesce, 709 .set_coalesce = efx_ethtool_set_coalesce,
699 .get_pauseparam = efx_ethtool_get_pauseparam, 710 .get_pauseparam = efx_ethtool_get_pauseparam,
@@ -701,17 +712,16 @@ struct ethtool_ops efx_ethtool_ops = {
701 .get_rx_csum = efx_ethtool_get_rx_csum, 712 .get_rx_csum = efx_ethtool_get_rx_csum,
702 .set_rx_csum = efx_ethtool_set_rx_csum, 713 .set_rx_csum = efx_ethtool_set_rx_csum,
703 .get_tx_csum = ethtool_op_get_tx_csum, 714 .get_tx_csum = ethtool_op_get_tx_csum,
704 .set_tx_csum = efx_ethtool_set_tx_csum, 715 .set_tx_csum = ethtool_op_set_tx_csum,
705 .get_sg = ethtool_op_get_sg, 716 .get_sg = ethtool_op_get_sg,
706 .set_sg = ethtool_op_set_sg, 717 .set_sg = ethtool_op_set_sg,
707 .get_tso = ethtool_op_get_tso, 718 .get_tso = ethtool_op_get_tso,
708 .set_tso = efx_ethtool_set_tso, 719 .set_tso = ethtool_op_set_tso,
709 .get_flags = ethtool_op_get_flags, 720 .get_flags = ethtool_op_get_flags,
710 .set_flags = ethtool_op_set_flags, 721 .set_flags = ethtool_op_set_flags,
711 .self_test_count = efx_ethtool_self_test_count, 722 .get_sset_count = efx_ethtool_get_sset_count,
712 .self_test = efx_ethtool_self_test, 723 .self_test = efx_ethtool_self_test,
713 .get_strings = efx_ethtool_get_strings, 724 .get_strings = efx_ethtool_get_strings,
714 .phys_id = efx_ethtool_phys_id, 725 .phys_id = efx_ethtool_phys_id,
715 .get_stats_count = efx_ethtool_get_stats_count,
716 .get_ethtool_stats = efx_ethtool_get_stats, 726 .get_ethtool_stats = efx_ethtool_get_stats,
717}; 727};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9138ee5b7b7b..31ed1f49de00 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
108/* Max number of internal errors. After this resets will not be performed */ 108/* Max number of internal errors. After this resets will not be performed */
109#define FALCON_MAX_INT_ERRORS 4 109#define FALCON_MAX_INT_ERRORS 4
110 110
111/* Maximum period that we wait for flush events. If the flush event 111/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
112 * doesn't arrive in this period of time then we check if the queue 112 */
113 * was disabled anyway. */ 113#define FALCON_FLUSH_INTERVAL 10
114#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ 114#define FALCON_FLUSH_POLL_COUNT 100
115 115
116/************************************************************************** 116/**************************************************************************
117 * 117 *
@@ -242,7 +242,7 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
242 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing 242 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
243 * it to be used for event queues, descriptor rings etc. 243 * it to be used for event queues, descriptor rings etc.
244 */ 244 */
245static int 245static void
246falcon_init_special_buffer(struct efx_nic *efx, 246falcon_init_special_buffer(struct efx_nic *efx,
247 struct efx_special_buffer *buffer) 247 struct efx_special_buffer *buffer)
248{ 248{
@@ -266,8 +266,6 @@ falcon_init_special_buffer(struct efx_nic *efx,
266 BUF_OWNER_ID_FBUF, 0); 266 BUF_OWNER_ID_FBUF, 0);
267 falcon_write_sram(efx, &buf_desc, index); 267 falcon_write_sram(efx, &buf_desc, index);
268 } 268 }
269
270 return 0;
271} 269}
272 270
273/* Unmaps a buffer from Falcon and clears the buffer table entries */ 271/* Unmaps a buffer from Falcon and clears the buffer table entries */
@@ -449,16 +447,15 @@ int falcon_probe_tx(struct efx_tx_queue *tx_queue)
449 sizeof(efx_qword_t)); 447 sizeof(efx_qword_t));
450} 448}
451 449
452int falcon_init_tx(struct efx_tx_queue *tx_queue) 450void falcon_init_tx(struct efx_tx_queue *tx_queue)
453{ 451{
454 efx_oword_t tx_desc_ptr; 452 efx_oword_t tx_desc_ptr;
455 struct efx_nic *efx = tx_queue->efx; 453 struct efx_nic *efx = tx_queue->efx;
456 int rc; 454
455 tx_queue->flushed = false;
457 456
458 /* Pin TX descriptor ring */ 457 /* Pin TX descriptor ring */
459 rc = falcon_init_special_buffer(efx, &tx_queue->txd); 458 falcon_init_special_buffer(efx, &tx_queue->txd);
460 if (rc)
461 return rc;
462 459
463 /* Push TX descriptor ring to card */ 460 /* Push TX descriptor ring to card */
464 EFX_POPULATE_OWORD_10(tx_desc_ptr, 461 EFX_POPULATE_OWORD_10(tx_desc_ptr,
@@ -466,7 +463,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
466 TX_ISCSI_DDIG_EN, 0, 463 TX_ISCSI_DDIG_EN, 0,
467 TX_ISCSI_HDIG_EN, 0, 464 TX_ISCSI_HDIG_EN, 0,
468 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 465 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
469 TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum, 466 TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
470 TX_DESCQ_OWNER_ID, 0, 467 TX_DESCQ_OWNER_ID, 0,
471 TX_DESCQ_LABEL, tx_queue->queue, 468 TX_DESCQ_LABEL, tx_queue->queue,
472 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 469 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
@@ -474,9 +471,9 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 TX_NON_IP_DROP_DIS_B0, 1); 471 TX_NON_IP_DROP_DIS_B0, 1);
475 472
476 if (falcon_rev(efx) >= FALCON_REV_B0) { 473 if (falcon_rev(efx) >= FALCON_REV_B0) {
477 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 474 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
478 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 475 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
479 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 476 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
480 } 477 }
481 478
482 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 479 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -485,73 +482,28 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
485 if (falcon_rev(efx) < FALCON_REV_B0) { 482 if (falcon_rev(efx) < FALCON_REV_B0) {
486 efx_oword_t reg; 483 efx_oword_t reg;
487 484
488 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 485 /* Only 128 bits in this register */
486 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
489 487
490 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 488 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
491 if (efx->net_dev->features & NETIF_F_IP_CSUM) 489 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
492 clear_bit_le(tx_queue->queue, (void *)&reg); 490 clear_bit_le(tx_queue->queue, (void *)&reg);
493 else 491 else
494 set_bit_le(tx_queue->queue, (void *)&reg); 492 set_bit_le(tx_queue->queue, (void *)&reg);
495 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 493 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
496 } 494 }
497
498 return 0;
499} 495}
500 496
501static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 497static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
502{ 498{
503 struct efx_nic *efx = tx_queue->efx; 499 struct efx_nic *efx = tx_queue->efx;
504 struct efx_channel *channel = &efx->channel[0];
505 efx_oword_t tx_flush_descq; 500 efx_oword_t tx_flush_descq;
506 unsigned int read_ptr, i;
507 501
508 /* Post a flush command */ 502 /* Post a flush command */
509 EFX_POPULATE_OWORD_2(tx_flush_descq, 503 EFX_POPULATE_OWORD_2(tx_flush_descq,
510 TX_FLUSH_DESCQ_CMD, 1, 504 TX_FLUSH_DESCQ_CMD, 1,
511 TX_FLUSH_DESCQ, tx_queue->queue); 505 TX_FLUSH_DESCQ, tx_queue->queue);
512 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
513 msleep(FALCON_FLUSH_TIMEOUT);
514
515 if (EFX_WORKAROUND_7803(efx))
516 return 0;
517
518 /* Look for a flush completed event */
519 read_ptr = channel->eventq_read_ptr;
520 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
521 efx_qword_t *event = falcon_event(channel, read_ptr);
522 int ev_code, ev_sub_code, ev_queue;
523 if (!falcon_event_present(event))
524 break;
525
526 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
527 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
528 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
529 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
530 (ev_queue == tx_queue->queue)) {
531 EFX_LOG(efx, "tx queue %d flush command succesful\n",
532 tx_queue->queue);
533 return 0;
534 }
535
536 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
537 }
538
539 if (EFX_WORKAROUND_11557(efx)) {
540 efx_oword_t reg;
541 int enabled;
542
543 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
544 tx_queue->queue);
545 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
546 if (!enabled) {
547 EFX_LOG(efx, "tx queue %d disabled without a "
548 "flush event seen\n", tx_queue->queue);
549 return 0;
550 }
551 }
552
553 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
554 return -ETIMEDOUT;
555} 507}
556 508
557void falcon_fini_tx(struct efx_tx_queue *tx_queue) 509void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -559,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
559 struct efx_nic *efx = tx_queue->efx; 511 struct efx_nic *efx = tx_queue->efx;
560 efx_oword_t tx_desc_ptr; 512 efx_oword_t tx_desc_ptr;
561 513
562 /* Stop the hardware using the queue */ 514 /* The queue should have been flushed */
563 if (falcon_flush_tx_queue(tx_queue)) 515 WARN_ON(!tx_queue->flushed);
564 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
565 516
566 /* Remove TX descriptor ring from card */ 517 /* Remove TX descriptor ring from card */
567 EFX_ZERO_OWORD(tx_desc_ptr); 518 EFX_ZERO_OWORD(tx_desc_ptr);
@@ -638,29 +589,28 @@ int falcon_probe_rx(struct efx_rx_queue *rx_queue)
638 sizeof(efx_qword_t)); 589 sizeof(efx_qword_t));
639} 590}
640 591
641int falcon_init_rx(struct efx_rx_queue *rx_queue) 592void falcon_init_rx(struct efx_rx_queue *rx_queue)
642{ 593{
643 efx_oword_t rx_desc_ptr; 594 efx_oword_t rx_desc_ptr;
644 struct efx_nic *efx = rx_queue->efx; 595 struct efx_nic *efx = rx_queue->efx;
645 int rc; 596 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
646 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; 597 bool iscsi_digest_en = is_b0;
647 int iscsi_digest_en = is_b0;
648 598
649 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 599 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
650 rx_queue->queue, rx_queue->rxd.index, 600 rx_queue->queue, rx_queue->rxd.index,
651 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 601 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
652 602
603 rx_queue->flushed = false;
604
653 /* Pin RX descriptor ring */ 605 /* Pin RX descriptor ring */
654 rc = falcon_init_special_buffer(efx, &rx_queue->rxd); 606 falcon_init_special_buffer(efx, &rx_queue->rxd);
655 if (rc)
656 return rc;
657 607
658 /* Push RX descriptor ring to card */ 608 /* Push RX descriptor ring to card */
659 EFX_POPULATE_OWORD_10(rx_desc_ptr, 609 EFX_POPULATE_OWORD_10(rx_desc_ptr,
660 RX_ISCSI_DDIG_EN, iscsi_digest_en, 610 RX_ISCSI_DDIG_EN, iscsi_digest_en,
661 RX_ISCSI_HDIG_EN, iscsi_digest_en, 611 RX_ISCSI_HDIG_EN, iscsi_digest_en,
662 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 612 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
663 RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum, 613 RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
664 RX_DESCQ_OWNER_ID, 0, 614 RX_DESCQ_OWNER_ID, 0,
665 RX_DESCQ_LABEL, rx_queue->queue, 615 RX_DESCQ_LABEL, rx_queue->queue,
666 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 616 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
@@ -670,14 +620,11 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
670 RX_DESCQ_EN, 1); 620 RX_DESCQ_EN, 1);
671 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 621 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
672 rx_queue->queue); 622 rx_queue->queue);
673 return 0;
674} 623}
675 624
676static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 625static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
677{ 626{
678 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
679 struct efx_channel *channel = &efx->channel[0];
680 unsigned int read_ptr, i;
681 efx_oword_t rx_flush_descq; 628 efx_oword_t rx_flush_descq;
682 629
683 /* Post a flush command */ 630 /* Post a flush command */
@@ -685,75 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
685 RX_FLUSH_DESCQ_CMD, 1, 632 RX_FLUSH_DESCQ_CMD, 1,
686 RX_FLUSH_DESCQ, rx_queue->queue); 633 RX_FLUSH_DESCQ, rx_queue->queue);
687 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 634 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
688 msleep(FALCON_FLUSH_TIMEOUT);
689
690 if (EFX_WORKAROUND_7803(efx))
691 return 0;
692
693 /* Look for a flush completed event */
694 read_ptr = channel->eventq_read_ptr;
695 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
696 efx_qword_t *event = falcon_event(channel, read_ptr);
697 int ev_code, ev_sub_code, ev_queue, ev_failed;
698 if (!falcon_event_present(event))
699 break;
700
701 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
702 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
703 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
704 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
705
706 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
707 (ev_queue == rx_queue->queue)) {
708 if (ev_failed) {
709 EFX_INFO(efx, "rx queue %d flush command "
710 "failed\n", rx_queue->queue);
711 return -EAGAIN;
712 } else {
713 EFX_LOG(efx, "rx queue %d flush command "
714 "succesful\n", rx_queue->queue);
715 return 0;
716 }
717 }
718
719 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
720 }
721
722 if (EFX_WORKAROUND_11557(efx)) {
723 efx_oword_t reg;
724 int enabled;
725
726 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
727 rx_queue->queue);
728 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
729 if (!enabled) {
730 EFX_LOG(efx, "rx queue %d disabled without a "
731 "flush event seen\n", rx_queue->queue);
732 return 0;
733 }
734 }
735
736 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
737 return -ETIMEDOUT;
738} 635}
739 636
740void falcon_fini_rx(struct efx_rx_queue *rx_queue) 637void falcon_fini_rx(struct efx_rx_queue *rx_queue)
741{ 638{
742 efx_oword_t rx_desc_ptr; 639 efx_oword_t rx_desc_ptr;
743 struct efx_nic *efx = rx_queue->efx; 640 struct efx_nic *efx = rx_queue->efx;
744 int i, rc;
745 641
746 /* Try and flush the rx queue. This may need to be repeated */ 642 /* The queue should already have been flushed */
747 for (i = 0; i < 5; i++) { 643 WARN_ON(!rx_queue->flushed);
748 rc = falcon_flush_rx_queue(rx_queue);
749 if (rc == -EAGAIN)
750 continue;
751 break;
752 }
753 if (rc) {
754 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
755 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
756 }
757 644
758 /* Remove RX descriptor ring from card */ 645 /* Remove RX descriptor ring from card */
759 EFX_ZERO_OWORD(rx_desc_ptr); 646 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -793,7 +680,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
793 680
794 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); 681 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
795 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base, 682 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
796 channel->evqnum); 683 channel->channel);
797} 684}
798 685
799/* Use HW to insert a SW defined event */ 686/* Use HW to insert a SW defined event */
@@ -802,7 +689,7 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
802 efx_oword_t drv_ev_reg; 689 efx_oword_t drv_ev_reg;
803 690
804 EFX_POPULATE_OWORD_2(drv_ev_reg, 691 EFX_POPULATE_OWORD_2(drv_ev_reg,
805 DRV_EV_QID, channel->evqnum, 692 DRV_EV_QID, channel->channel,
806 DRV_EV_DATA, 693 DRV_EV_DATA,
807 EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); 694 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
808 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); 695 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
@@ -813,8 +700,8 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
813 * Falcon batches TX completion events; the message we receive is of 700 * Falcon batches TX completion events; the message we receive is of
814 * the form "complete all TX events up to this index". 701 * the form "complete all TX events up to this index".
815 */ 702 */
816static inline void falcon_handle_tx_event(struct efx_channel *channel, 703static void falcon_handle_tx_event(struct efx_channel *channel,
817 efx_qword_t *event) 704 efx_qword_t *event)
818{ 705{
819 unsigned int tx_ev_desc_ptr; 706 unsigned int tx_ev_desc_ptr;
820 unsigned int tx_ev_q_label; 707 unsigned int tx_ev_q_label;
@@ -847,39 +734,19 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
847 } 734 }
848} 735}
849 736
850/* Check received packet's destination MAC address. */
851static int check_dest_mac(struct efx_rx_queue *rx_queue,
852 const efx_qword_t *event)
853{
854 struct efx_rx_buffer *rx_buf;
855 struct efx_nic *efx = rx_queue->efx;
856 int rx_ev_desc_ptr;
857 struct ethhdr *eh;
858
859 if (efx->promiscuous)
860 return 1;
861
862 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
863 rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
864 eh = (struct ethhdr *)rx_buf->data;
865 if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
866 return 0;
867 return 1;
868}
869
870/* Detect errors included in the rx_evt_pkt_ok bit. */ 737/* Detect errors included in the rx_evt_pkt_ok bit. */
871static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 738static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
872 const efx_qword_t *event, 739 const efx_qword_t *event,
873 unsigned *rx_ev_pkt_ok, 740 bool *rx_ev_pkt_ok,
874 int *discard, int byte_count) 741 bool *discard)
875{ 742{
876 struct efx_nic *efx = rx_queue->efx; 743 struct efx_nic *efx = rx_queue->efx;
877 unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 744 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
878 unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 745 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
879 unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 746 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
880 unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm; 747 bool rx_ev_other_err, rx_ev_pause_frm;
881 unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; 748 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
882 int snap, non_ip; 749 unsigned rx_ev_pkt_type;
883 750
884 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 751 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
885 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 752 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
@@ -903,41 +770,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
903 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 770 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
904 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 771 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
905 772
906 snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
907 (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
908 non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
909
910 /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
911 * length field of an LLC frame, which sets TOBE_DISC. We could set
912 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
913 * protect the RX block).
914 *
915 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
916 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
917 * LLC can't encapsulate IP, so by definition
918 * these packets are NON_IP.
919 *
920 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
921 * to check this.
922 */
923 if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
924 /* If all the other flags are zero then we can state the
925 * entire packet is ok, which will flag to the kernel not
926 * to recalculate checksums.
927 */
928 if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
929 *rx_ev_pkt_ok = 1;
930
931 rx_ev_tobe_disc = 0;
932
933 /* TOBE_DISC is set for unicast mismatch. But given that
934 * we can't trust TOBE_DISC here, we must validate the dest
935 * MAC address ourselves.
936 */
937 if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
938 rx_ev_tobe_disc = 1;
939 }
940
941 /* Count errors that are not in MAC stats. */ 773 /* Count errors that are not in MAC stats. */
942 if (rx_ev_frm_trunc) 774 if (rx_ev_frm_trunc)
943 ++rx_queue->channel->n_rx_frm_trunc; 775 ++rx_queue->channel->n_rx_frm_trunc;
@@ -961,7 +793,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
961#ifdef EFX_ENABLE_DEBUG 793#ifdef EFX_ENABLE_DEBUG
962 if (rx_ev_other_err) { 794 if (rx_ev_other_err) {
963 EFX_INFO_RL(efx, " RX queue %d unexpected RX event " 795 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
964 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n", 796 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
965 rx_queue->queue, EFX_QWORD_VAL(*event), 797 rx_queue->queue, EFX_QWORD_VAL(*event),
966 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 798 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
967 rx_ev_ip_hdr_chksum_err ? 799 rx_ev_ip_hdr_chksum_err ?
@@ -972,8 +804,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
972 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 804 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
973 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 805 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
974 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 806 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
975 rx_ev_pause_frm ? " [PAUSE]" : "", 807 rx_ev_pause_frm ? " [PAUSE]" : "");
976 snap ? " [SNAP/LLC]" : "");
977 } 808 }
978#endif 809#endif
979 810
@@ -1006,13 +837,13 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
1006 * Also "is multicast" and "matches multicast filter" flags can be used to 837 * Also "is multicast" and "matches multicast filter" flags can be used to
1007 * discard non-matching multicast packets. 838 * discard non-matching multicast packets.
1008 */ 839 */
1009static inline int falcon_handle_rx_event(struct efx_channel *channel, 840static void falcon_handle_rx_event(struct efx_channel *channel,
1010 const efx_qword_t *event) 841 const efx_qword_t *event)
1011{ 842{
1012 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt; 843 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1013 unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt; 844 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1014 unsigned expected_ptr; 845 unsigned expected_ptr;
1015 int discard = 0, checksummed; 846 bool rx_ev_pkt_ok, discard = false, checksummed;
1016 struct efx_rx_queue *rx_queue; 847 struct efx_rx_queue *rx_queue;
1017 struct efx_nic *efx = channel->efx; 848 struct efx_nic *efx = channel->efx;
1018 849
@@ -1022,16 +853,14 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1022 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 853 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
1023 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); 854 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
1024 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); 855 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
856 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
1025 857
1026 rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL); 858 rx_queue = &efx->rx_queue[channel->channel];
1027 rx_queue = &efx->rx_queue[rx_ev_q_label];
1028 859
1029 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); 860 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
1030 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 861 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
1031 if (unlikely(rx_ev_desc_ptr != expected_ptr)) { 862 if (unlikely(rx_ev_desc_ptr != expected_ptr))
1032 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 863 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1033 return rx_ev_q_label;
1034 }
1035 864
1036 if (likely(rx_ev_pkt_ok)) { 865 if (likely(rx_ev_pkt_ok)) {
1037 /* If packet is marked as OK and packet type is TCP/IPv4 or 866 /* If packet is marked as OK and packet type is TCP/IPv4 or
@@ -1040,8 +869,8 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1040 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); 869 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
1041 } else { 870 } else {
1042 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 871 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
1043 &discard, rx_ev_byte_cnt); 872 &discard);
1044 checksummed = 0; 873 checksummed = false;
1045 } 874 }
1046 875
1047 /* Detect multicast packets that didn't match the filter */ 876 /* Detect multicast packets that didn't match the filter */
@@ -1051,14 +880,12 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1051 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); 880 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
1052 881
1053 if (unlikely(!rx_ev_mcast_hash_match)) 882 if (unlikely(!rx_ev_mcast_hash_match))
1054 discard = 1; 883 discard = true;
1055 } 884 }
1056 885
1057 /* Handle received packet */ 886 /* Handle received packet */
1058 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 887 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
1059 checksummed, discard); 888 checksummed, discard);
1060
1061 return rx_ev_q_label;
1062} 889}
1063 890
1064/* Global events are basically PHY events */ 891/* Global events are basically PHY events */
@@ -1066,23 +893,23 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1066 efx_qword_t *event) 893 efx_qword_t *event)
1067{ 894{
1068 struct efx_nic *efx = channel->efx; 895 struct efx_nic *efx = channel->efx;
1069 int is_phy_event = 0, handled = 0; 896 bool is_phy_event = false, handled = false;
1070 897
1071 /* Check for interrupt on either port. Some boards have a 898 /* Check for interrupt on either port. Some boards have a
1072 * single PHY wired to the interrupt line for port 1. */ 899 * single PHY wired to the interrupt line for port 1. */
1073 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 900 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
1074 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 901 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
1075 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 902 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1076 is_phy_event = 1; 903 is_phy_event = true;
1077 904
1078 if ((falcon_rev(efx) >= FALCON_REV_B0) && 905 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1079 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 906 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0))
1080 is_phy_event = 1; 907 is_phy_event = true;
1081 908
1082 if (is_phy_event) { 909 if (is_phy_event) {
1083 efx->phy_op->clear_interrupt(efx); 910 efx->phy_op->clear_interrupt(efx);
1084 queue_work(efx->workqueue, &efx->reconfigure_work); 911 queue_work(efx->workqueue, &efx->reconfigure_work);
1085 handled = 1; 912 handled = true;
1086 } 913 }
1087 914
1088 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { 915 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
@@ -1092,7 +919,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1092 atomic_inc(&efx->rx_reset); 919 atomic_inc(&efx->rx_reset);
1093 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? 920 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1094 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 921 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1095 handled = 1; 922 handled = true;
1096 } 923 }
1097 924
1098 if (!handled) 925 if (!handled)
@@ -1163,13 +990,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
1163 } 990 }
1164} 991}
1165 992
1166int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) 993int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1167{ 994{
1168 unsigned int read_ptr; 995 unsigned int read_ptr;
1169 efx_qword_t event, *p_event; 996 efx_qword_t event, *p_event;
1170 int ev_code; 997 int ev_code;
1171 int rxq; 998 int rx_packets = 0;
1172 int rxdmaqs = 0;
1173 999
1174 read_ptr = channel->eventq_read_ptr; 1000 read_ptr = channel->eventq_read_ptr;
1175 1001
@@ -1191,9 +1017,8 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1191 1017
1192 switch (ev_code) { 1018 switch (ev_code) {
1193 case RX_IP_EV_DECODE: 1019 case RX_IP_EV_DECODE:
1194 rxq = falcon_handle_rx_event(channel, &event); 1020 falcon_handle_rx_event(channel, &event);
1195 rxdmaqs |= (1 << rxq); 1021 ++rx_packets;
1196 (*rx_quota)--;
1197 break; 1022 break;
1198 case TX_IP_EV_DECODE: 1023 case TX_IP_EV_DECODE:
1199 falcon_handle_tx_event(channel, &event); 1024 falcon_handle_tx_event(channel, &event);
@@ -1220,10 +1045,10 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1220 /* Increment read pointer */ 1045 /* Increment read pointer */
1221 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1046 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1222 1047
1223 } while (*rx_quota); 1048 } while (rx_packets < rx_quota);
1224 1049
1225 channel->eventq_read_ptr = read_ptr; 1050 channel->eventq_read_ptr = read_ptr;
1226 return rxdmaqs; 1051 return rx_packets;
1227} 1052}
1228 1053
1229void falcon_set_int_moderation(struct efx_channel *channel) 1054void falcon_set_int_moderation(struct efx_channel *channel)
@@ -1251,7 +1076,7 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1251 TIMER_VAL, 0); 1076 TIMER_VAL, 0);
1252 } 1077 }
1253 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 1078 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1254 channel->evqnum); 1079 channel->channel);
1255 1080
1256} 1081}
1257 1082
@@ -1265,20 +1090,17 @@ int falcon_probe_eventq(struct efx_channel *channel)
1265 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1090 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1266} 1091}
1267 1092
1268int falcon_init_eventq(struct efx_channel *channel) 1093void falcon_init_eventq(struct efx_channel *channel)
1269{ 1094{
1270 efx_oword_t evq_ptr; 1095 efx_oword_t evq_ptr;
1271 struct efx_nic *efx = channel->efx; 1096 struct efx_nic *efx = channel->efx;
1272 int rc;
1273 1097
1274 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", 1098 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1275 channel->channel, channel->eventq.index, 1099 channel->channel, channel->eventq.index,
1276 channel->eventq.index + channel->eventq.entries - 1); 1100 channel->eventq.index + channel->eventq.entries - 1);
1277 1101
1278 /* Pin event queue buffer */ 1102 /* Pin event queue buffer */
1279 rc = falcon_init_special_buffer(efx, &channel->eventq); 1103 falcon_init_special_buffer(efx, &channel->eventq);
1280 if (rc)
1281 return rc;
1282 1104
1283 /* Fill event queue with all ones (i.e. empty events) */ 1105 /* Fill event queue with all ones (i.e. empty events) */
1284 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1106 memset(channel->eventq.addr, 0xff, channel->eventq.len);
@@ -1289,11 +1111,9 @@ int falcon_init_eventq(struct efx_channel *channel)
1289 EVQ_SIZE, FALCON_EVQ_ORDER, 1111 EVQ_SIZE, FALCON_EVQ_ORDER,
1290 EVQ_BUF_BASE_ID, channel->eventq.index); 1112 EVQ_BUF_BASE_ID, channel->eventq.index);
1291 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1113 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1292 channel->evqnum); 1114 channel->channel);
1293 1115
1294 falcon_set_int_moderation(channel); 1116 falcon_set_int_moderation(channel);
1295
1296 return 0;
1297} 1117}
1298 1118
1299void falcon_fini_eventq(struct efx_channel *channel) 1119void falcon_fini_eventq(struct efx_channel *channel)
@@ -1304,7 +1124,7 @@ void falcon_fini_eventq(struct efx_channel *channel)
1304 /* Remove event queue from card */ 1124 /* Remove event queue from card */
1305 EFX_ZERO_OWORD(eventq_ptr); 1125 EFX_ZERO_OWORD(eventq_ptr);
1306 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, 1126 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1307 channel->evqnum); 1127 channel->channel);
1308 1128
1309 /* Unpin event queue */ 1129 /* Unpin event queue */
1310 falcon_fini_special_buffer(efx, &channel->eventq); 1130 falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1331,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1331 falcon_generate_event(channel, &test_event); 1151 falcon_generate_event(channel, &test_event);
1332} 1152}
1333 1153
1154/**************************************************************************
1155 *
1156 * Flush handling
1157 *
1158 **************************************************************************/
1159
1160
1161static void falcon_poll_flush_events(struct efx_nic *efx)
1162{
1163 struct efx_channel *channel = &efx->channel[0];
1164 struct efx_tx_queue *tx_queue;
1165 struct efx_rx_queue *rx_queue;
1166 unsigned int read_ptr, i;
1167
1168 read_ptr = channel->eventq_read_ptr;
1169 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
1170 efx_qword_t *event = falcon_event(channel, read_ptr);
1171 int ev_code, ev_sub_code, ev_queue;
1172 bool ev_failed;
1173 if (!falcon_event_present(event))
1174 break;
1175
1176 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1177 if (ev_code != DRIVER_EV_DECODE)
1178 continue;
1179
1180 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1181 switch (ev_sub_code) {
1182 case TX_DESCQ_FLS_DONE_EV_DECODE:
1183 ev_queue = EFX_QWORD_FIELD(*event,
1184 DRIVER_EV_TX_DESCQ_ID);
1185 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1186 tx_queue = efx->tx_queue + ev_queue;
1187 tx_queue->flushed = true;
1188 }
1189 break;
1190 case RX_DESCQ_FLS_DONE_EV_DECODE:
1191 ev_queue = EFX_QWORD_FIELD(*event,
1192 DRIVER_EV_RX_DESCQ_ID);
1193 ev_failed = EFX_QWORD_FIELD(*event,
1194 DRIVER_EV_RX_FLUSH_FAIL);
1195 if (ev_queue < efx->n_rx_queues) {
1196 rx_queue = efx->rx_queue + ev_queue;
1197
1198 /* retry the rx flush */
1199 if (ev_failed)
1200 falcon_flush_rx_queue(rx_queue);
1201 else
1202 rx_queue->flushed = true;
1203 }
1204 break;
1205 }
1206
1207 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1208 }
1209}
1210
1211/* Handle tx and rx flushes at the same time, since they run in
1212 * parallel in the hardware and there's no reason for us to
1213 * serialise them */
1214int falcon_flush_queues(struct efx_nic *efx)
1215{
1216 struct efx_rx_queue *rx_queue;
1217 struct efx_tx_queue *tx_queue;
1218 int i;
1219 bool outstanding;
1220
1221 /* Issue flush requests */
1222 efx_for_each_tx_queue(tx_queue, efx) {
1223 tx_queue->flushed = false;
1224 falcon_flush_tx_queue(tx_queue);
1225 }
1226 efx_for_each_rx_queue(rx_queue, efx) {
1227 rx_queue->flushed = false;
1228 falcon_flush_rx_queue(rx_queue);
1229 }
1230
1231 /* Poll the evq looking for flush completions. Since we're not pushing
1232 * any more rx or tx descriptors at this point, we're in no danger of
1233 * overflowing the evq whilst we wait */
1234 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1235 msleep(FALCON_FLUSH_INTERVAL);
1236 falcon_poll_flush_events(efx);
1237
1238 /* Check if every queue has been succesfully flushed */
1239 outstanding = false;
1240 efx_for_each_tx_queue(tx_queue, efx)
1241 outstanding |= !tx_queue->flushed;
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 outstanding |= !rx_queue->flushed;
1244 if (!outstanding)
1245 return 0;
1246 }
1247
1248 /* Mark the queues as all flushed. We're going to return failure
1249 * leading to a reset, or fake up success anyway. "flushed" now
1250 * indicates that we tried to flush. */
1251 efx_for_each_tx_queue(tx_queue, efx) {
1252 if (!tx_queue->flushed)
1253 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1254 tx_queue->queue);
1255 tx_queue->flushed = true;
1256 }
1257 efx_for_each_rx_queue(rx_queue, efx) {
1258 if (!rx_queue->flushed)
1259 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1260 rx_queue->queue);
1261 rx_queue->flushed = true;
1262 }
1263
1264 if (EFX_WORKAROUND_7803(efx))
1265 return 0;
1266
1267 return -ETIMEDOUT;
1268}
1334 1269
1335/************************************************************************** 1270/**************************************************************************
1336 * 1271 *
@@ -1371,7 +1306,7 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1371 1306
1372 /* Force processing of all the channels to get the EVQ RPTRs up to 1307 /* Force processing of all the channels to get the EVQ RPTRs up to
1373 date */ 1308 date */
1374 efx_for_each_channel_with_interrupt(channel, efx) 1309 efx_for_each_channel(channel, efx)
1375 efx_schedule_channel(channel); 1310 efx_schedule_channel(channel);
1376} 1311}
1377 1312
@@ -1439,10 +1374,11 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1439 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1374 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1440 } 1375 }
1441 1376
1442 /* Disable DMA bus mastering on both devices */ 1377 /* Disable both devices */
1443 pci_disable_device(efx->pci_dev); 1378 pci_disable_device(efx->pci_dev);
1444 if (FALCON_IS_DUAL_FUNC(efx)) 1379 if (FALCON_IS_DUAL_FUNC(efx))
1445 pci_disable_device(nic_data->pci_dev2); 1380 pci_disable_device(nic_data->pci_dev2);
1381 falcon_disable_interrupts(efx);
1446 1382
1447 if (++n_int_errors < FALCON_MAX_INT_ERRORS) { 1383 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1448 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1384 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
@@ -1589,7 +1525,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1589 offset < RX_RSS_INDIR_TBL_B0 + 0x800; 1525 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1590 offset += 0x10) { 1526 offset += 0x10) {
1591 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, 1527 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1592 i % efx->rss_queues); 1528 i % efx->n_rx_queues);
1593 falcon_writel(efx, &dword, offset); 1529 falcon_writel(efx, &dword, offset);
1594 i++; 1530 i++;
1595 } 1531 }
@@ -1621,7 +1557,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1621 } 1557 }
1622 1558
1623 /* Hook MSI or MSI-X interrupt */ 1559 /* Hook MSI or MSI-X interrupt */
1624 efx_for_each_channel_with_interrupt(channel, efx) { 1560 efx_for_each_channel(channel, efx) {
1625 rc = request_irq(channel->irq, falcon_msi_interrupt, 1561 rc = request_irq(channel->irq, falcon_msi_interrupt,
1626 IRQF_PROBE_SHARED, /* Not shared */ 1562 IRQF_PROBE_SHARED, /* Not shared */
1627 efx->name, channel); 1563 efx->name, channel);
@@ -1634,7 +1570,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1634 return 0; 1570 return 0;
1635 1571
1636 fail2: 1572 fail2:
1637 efx_for_each_channel_with_interrupt(channel, efx) 1573 efx_for_each_channel(channel, efx)
1638 free_irq(channel->irq, channel); 1574 free_irq(channel->irq, channel);
1639 fail1: 1575 fail1:
1640 return rc; 1576 return rc;
@@ -1646,7 +1582,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1646 efx_oword_t reg; 1582 efx_oword_t reg;
1647 1583
1648 /* Disable MSI/MSI-X interrupts */ 1584 /* Disable MSI/MSI-X interrupts */
1649 efx_for_each_channel_with_interrupt(channel, efx) { 1585 efx_for_each_channel(channel, efx) {
1650 if (channel->irq) 1586 if (channel->irq)
1651 free_irq(channel->irq, channel); 1587 free_irq(channel->irq, channel);
1652 } 1588 }
@@ -1669,69 +1605,200 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1669 ************************************************************************** 1605 **************************************************************************
1670 */ 1606 */
1671 1607
1672#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) 1608#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t))
1673 1609
1674/* Wait for SPI command completion */ 1610/* Wait for SPI command completion */
1675static int falcon_spi_wait(struct efx_nic *efx) 1611static int falcon_spi_wait(struct efx_nic *efx)
1676{ 1612{
1613 unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10);
1677 efx_oword_t reg; 1614 efx_oword_t reg;
1678 int cmd_en, timer_active; 1615 bool cmd_en, timer_active;
1679 int count;
1680 1616
1681 count = 0; 1617 for (;;) {
1682 do {
1683 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1618 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1684 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN); 1619 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1685 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE); 1620 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1686 if (!cmd_en && !timer_active) 1621 if (!cmd_en && !timer_active)
1687 return 0; 1622 return 0;
1688 udelay(10); 1623 if (time_after_eq(jiffies, timeout)) {
1689 } while (++count < 10000); /* wait upto 100msec */ 1624 EFX_ERR(efx, "timed out waiting for SPI\n");
1690 EFX_ERR(efx, "timed out waiting for SPI\n"); 1625 return -ETIMEDOUT;
1691 return -ETIMEDOUT; 1626 }
1627 cpu_relax();
1628 }
1692} 1629}
1693 1630
1694static int 1631static int falcon_spi_cmd(const struct efx_spi_device *spi,
1695falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command, 1632 unsigned int command, int address,
1696 unsigned int address, unsigned int addr_len, 1633 const void *in, void *out, unsigned int len)
1697 void *data, unsigned int len)
1698{ 1634{
1635 struct efx_nic *efx = spi->efx;
1636 bool addressed = (address >= 0);
1637 bool reading = (out != NULL);
1699 efx_oword_t reg; 1638 efx_oword_t reg;
1700 int rc; 1639 int rc;
1701 1640
1702 BUG_ON(len > FALCON_SPI_MAX_LEN); 1641 /* Input validation */
1642 if (len > FALCON_SPI_MAX_LEN)
1643 return -EINVAL;
1703 1644
1704 /* Check SPI not currently being accessed */ 1645 /* Check SPI not currently being accessed */
1705 rc = falcon_spi_wait(efx); 1646 rc = falcon_spi_wait(efx);
1706 if (rc) 1647 if (rc)
1707 return rc; 1648 return rc;
1708 1649
1709 /* Program address register */ 1650 /* Program address register, if we have an address */
1710 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 1651 if (addressed) {
1711 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 1652 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1653 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
1654 }
1655
1656 /* Program data register, if we have data */
1657 if (in != NULL) {
1658 memcpy(&reg, in, len);
1659 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
1660 }
1712 1661
1713 /* Issue read command */ 1662 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 1663 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 1664 EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, device_id, 1665 EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 1666 EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, EE_SPI_READ, 1667 EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 1668 EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, addr_len, 1669 EE_SPI_HCMD_ADBCNT,
1670 (addressed ? spi->addr_len : 0),
1721 EE_SPI_HCMD_ENC, command); 1671 EE_SPI_HCMD_ENC, command);
1722 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 1672 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
1723 1673
1724 /* Wait for read to complete */ 1674 /* Wait for read/write to complete */
1725 rc = falcon_spi_wait(efx); 1675 rc = falcon_spi_wait(efx);
1726 if (rc) 1676 if (rc)
1727 return rc; 1677 return rc;
1728 1678
1729 /* Read data */ 1679 /* Read data */
1730 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 1680 if (out != NULL) {
1731 memcpy(data, &reg, len); 1681 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
1682 memcpy(out, &reg, len);
1683 }
1684
1732 return 0; 1685 return 0;
1733} 1686}
1734 1687
1688static unsigned int
1689falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start)
1690{
1691 return min(FALCON_SPI_MAX_LEN,
1692 (spi->block_size - (start & (spi->block_size - 1))));
1693}
1694
1695static inline u8
1696efx_spi_munge_command(const struct efx_spi_device *spi,
1697 const u8 command, const unsigned int address)
1698{
1699 return command | (((address >> 8) & spi->munge_address) << 3);
1700}
1701
1702
1703static int falcon_spi_fast_wait(const struct efx_spi_device *spi)
1704{
1705 u8 status;
1706 int i, rc;
1707
1708 /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
1709 for (i = 0; i < 50; i++) {
1710 udelay(20);
1711
1712 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1713 &status, sizeof(status));
1714 if (rc)
1715 return rc;
1716 if (!(status & SPI_STATUS_NRDY))
1717 return 0;
1718 }
1719 EFX_ERR(spi->efx,
1720 "timed out waiting for device %d last status=0x%02x\n",
1721 spi->device_id, status);
1722 return -ETIMEDOUT;
1723}
1724
1725int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1726 size_t len, size_t *retlen, u8 *buffer)
1727{
1728 unsigned int command, block_len, pos = 0;
1729 int rc = 0;
1730
1731 while (pos < len) {
1732 block_len = min((unsigned int)len - pos,
1733 FALCON_SPI_MAX_LEN);
1734
1735 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1736 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1737 buffer + pos, block_len);
1738 if (rc)
1739 break;
1740 pos += block_len;
1741
1742 /* Avoid locking up the system */
1743 cond_resched();
1744 if (signal_pending(current)) {
1745 rc = -EINTR;
1746 break;
1747 }
1748 }
1749
1750 if (retlen)
1751 *retlen = pos;
1752 return rc;
1753}
1754
1755int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1756 size_t len, size_t *retlen, const u8 *buffer)
1757{
1758 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1759 unsigned int command, block_len, pos = 0;
1760 int rc = 0;
1761
1762 while (pos < len) {
1763 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1764 if (rc)
1765 break;
1766
1767 block_len = min((unsigned int)len - pos,
1768 falcon_spi_write_limit(spi, start + pos));
1769 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1770 rc = falcon_spi_cmd(spi, command, start + pos,
1771 buffer + pos, NULL, block_len);
1772 if (rc)
1773 break;
1774
1775 rc = falcon_spi_fast_wait(spi);
1776 if (rc)
1777 break;
1778
1779 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1780 rc = falcon_spi_cmd(spi, command, start + pos,
1781 NULL, verify_buffer, block_len);
1782 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1783 rc = -EIO;
1784 break;
1785 }
1786
1787 pos += block_len;
1788
1789 /* Avoid locking up the system */
1790 cond_resched();
1791 if (signal_pending(current)) {
1792 rc = -EINTR;
1793 break;
1794 }
1795 }
1796
1797 if (retlen)
1798 *retlen = pos;
1799 return rc;
1800}
1801
1735/************************************************************************** 1802/**************************************************************************
1736 * 1803 *
1737 * MAC wrapper 1804 * MAC wrapper
@@ -1812,7 +1879,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1812{ 1879{
1813 efx_oword_t reg; 1880 efx_oword_t reg;
1814 int link_speed; 1881 int link_speed;
1815 unsigned int tx_fc; 1882 bool tx_fc;
1816 1883
1817 if (efx->link_options & GM_LPA_10000) 1884 if (efx->link_options & GM_LPA_10000)
1818 link_speed = 0x3; 1885 link_speed = 0x3;
@@ -1847,7 +1914,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1847 /* Transmission of pause frames when RX crosses the threshold is 1914 /* Transmission of pause frames when RX crosses the threshold is
1848 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1915 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1849 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1916 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1850 tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0; 1917 tx_fc = !!(efx->flow_control & EFX_FC_TX);
1851 falcon_read(efx, &reg, RX_CFG_REG_KER); 1918 falcon_read(efx, &reg, RX_CFG_REG_KER);
1852 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1919 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1853 1920
@@ -1887,8 +1954,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1887 1954
1888 /* Wait for transfer to complete */ 1955 /* Wait for transfer to complete */
1889 for (i = 0; i < 400; i++) { 1956 for (i = 0; i < 400; i++) {
1890 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) 1957 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
1958 rmb(); /* Ensure the stats are valid. */
1891 return 0; 1959 return 0;
1960 }
1892 udelay(10); 1961 udelay(10);
1893 } 1962 }
1894 1963
@@ -1951,7 +2020,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1951static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 2020static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1952 int addr, int value) 2021 int addr, int value)
1953{ 2022{
1954 struct efx_nic *efx = net_dev->priv; 2023 struct efx_nic *efx = netdev_priv(net_dev);
1955 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 2024 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1956 efx_oword_t reg; 2025 efx_oword_t reg;
1957 2026
@@ -2019,7 +2088,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2019 * could be read, -1 will be returned. */ 2088 * could be read, -1 will be returned. */
2020static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2089static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2021{ 2090{
2022 struct efx_nic *efx = net_dev->priv; 2091 struct efx_nic *efx = netdev_priv(net_dev);
2023 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2092 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2024 efx_oword_t reg; 2093 efx_oword_t reg;
2025 int value = -1; 2094 int value = -1;
@@ -2120,7 +2189,7 @@ int falcon_probe_port(struct efx_nic *efx)
2120 return rc; 2189 return rc;
2121 2190
2122 /* Set up GMII structure for PHY */ 2191 /* Set up GMII structure for PHY */
2123 efx->mii.supports_gmii = 1; 2192 efx->mii.supports_gmii = true;
2124 falcon_init_mdio(&efx->mii); 2193 falcon_init_mdio(&efx->mii);
2125 2194
2126 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2195 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
@@ -2168,6 +2237,170 @@ void falcon_set_multicast_hash(struct efx_nic *efx)
2168 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); 2237 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2169} 2238}
2170 2239
2240
2241/**************************************************************************
2242 *
2243 * Falcon test code
2244 *
2245 **************************************************************************/
2246
2247int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2248{
2249 struct falcon_nvconfig *nvconfig;
2250 struct efx_spi_device *spi;
2251 void *region;
2252 int rc, magic_num, struct_ver;
2253 __le16 *word, *limit;
2254 u32 csum;
2255
2256 region = kmalloc(NVCONFIG_END, GFP_KERNEL);
2257 if (!region)
2258 return -ENOMEM;
2259 nvconfig = region + NVCONFIG_OFFSET;
2260
2261 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2262 rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region);
2263 if (rc) {
2264 EFX_ERR(efx, "Failed to read %s\n",
2265 efx->spi_flash ? "flash" : "EEPROM");
2266 rc = -EIO;
2267 goto out;
2268 }
2269
2270 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2271 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2272
2273 rc = -EINVAL;
2274 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
2275 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2276 goto out;
2277 }
2278 if (struct_ver < 2) {
2279 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2280 goto out;
2281 } else if (struct_ver < 4) {
2282 word = &nvconfig->board_magic_num;
2283 limit = (__le16 *) (nvconfig + 1);
2284 } else {
2285 word = region;
2286 limit = region + NVCONFIG_END;
2287 }
2288 for (csum = 0; word < limit; ++word)
2289 csum += le16_to_cpu(*word);
2290
2291 if (~csum & 0xffff) {
2292 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2293 goto out;
2294 }
2295
2296 rc = 0;
2297 if (nvconfig_out)
2298 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2299
2300 out:
2301 kfree(region);
2302 return rc;
2303}
2304
2305/* Registers tested in the falcon register test */
2306static struct {
2307 unsigned address;
2308 efx_oword_t mask;
2309} efx_test_registers[] = {
2310 { ADR_REGION_REG_KER,
2311 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2312 { RX_CFG_REG_KER,
2313 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2314 { TX_CFG_REG_KER,
2315 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2316 { TX_CFG2_REG_KER,
2317 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2318 { MAC0_CTRL_REG_KER,
2319 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2320 { SRM_TX_DC_CFG_REG_KER,
2321 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2322 { RX_DC_CFG_REG_KER,
2323 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2324 { RX_DC_PF_WM_REG_KER,
2325 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2326 { DP_CTRL_REG,
2327 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2328 { XM_GLB_CFG_REG,
2329 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2330 { XM_TX_CFG_REG,
2331 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2332 { XM_RX_CFG_REG,
2333 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2334 { XM_RX_PARAM_REG,
2335 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2336 { XM_FC_REG,
2337 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2338 { XM_ADR_LO_REG,
2339 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2340 { XX_SD_CTL_REG,
2341 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2342};
2343
2344static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2345 const efx_oword_t *mask)
2346{
2347 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2348 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2349}
2350
2351int falcon_test_registers(struct efx_nic *efx)
2352{
2353 unsigned address = 0, i, j;
2354 efx_oword_t mask, imask, original, reg, buf;
2355
2356 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2357 WARN_ON(!LOOPBACK_INTERNAL(efx));
2358
2359 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2360 address = efx_test_registers[i].address;
2361 mask = imask = efx_test_registers[i].mask;
2362 EFX_INVERT_OWORD(imask);
2363
2364 falcon_read(efx, &original, address);
2365
2366 /* bit sweep on and off */
2367 for (j = 0; j < 128; j++) {
2368 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2369 continue;
2370
2371 /* Test this testable bit can be set in isolation */
2372 EFX_AND_OWORD(reg, original, mask);
2373 EFX_SET_OWORD32(reg, j, j, 1);
2374
2375 falcon_write(efx, &reg, address);
2376 falcon_read(efx, &buf, address);
2377
2378 if (efx_masked_compare_oword(&reg, &buf, &mask))
2379 goto fail;
2380
2381 /* Test this testable bit can be cleared in isolation */
2382 EFX_OR_OWORD(reg, original, mask);
2383 EFX_SET_OWORD32(reg, j, j, 0);
2384
2385 falcon_write(efx, &reg, address);
2386 falcon_read(efx, &buf, address);
2387
2388 if (efx_masked_compare_oword(&reg, &buf, &mask))
2389 goto fail;
2390 }
2391
2392 falcon_write(efx, &original, address);
2393 }
2394
2395 return 0;
2396
2397fail:
2398 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2399 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2400 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2401 return -EIO;
2402}
2403
2171/************************************************************************** 2404/**************************************************************************
2172 * 2405 *
2173 * Device reset 2406 * Device reset
@@ -2305,68 +2538,103 @@ static int falcon_reset_sram(struct efx_nic *efx)
2305 return -ETIMEDOUT; 2538 return -ETIMEDOUT;
2306} 2539}
2307 2540
2541static int falcon_spi_device_init(struct efx_nic *efx,
2542 struct efx_spi_device **spi_device_ret,
2543 unsigned int device_id, u32 device_type)
2544{
2545 struct efx_spi_device *spi_device;
2546
2547 if (device_type != 0) {
2548 spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL);
2549 if (!spi_device)
2550 return -ENOMEM;
2551 spi_device->device_id = device_id;
2552 spi_device->size =
2553 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2554 spi_device->addr_len =
2555 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2556 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2557 spi_device->addr_len == 1);
2558 spi_device->block_size =
2559 1 << SPI_DEV_TYPE_FIELD(device_type,
2560 SPI_DEV_TYPE_BLOCK_SIZE);
2561
2562 spi_device->efx = efx;
2563 } else {
2564 spi_device = NULL;
2565 }
2566
2567 kfree(*spi_device_ret);
2568 *spi_device_ret = spi_device;
2569 return 0;
2570}
2571
2572
2573static void falcon_remove_spi_devices(struct efx_nic *efx)
2574{
2575 kfree(efx->spi_eeprom);
2576 efx->spi_eeprom = NULL;
2577 kfree(efx->spi_flash);
2578 efx->spi_flash = NULL;
2579}
2580
2308/* Extract non-volatile configuration */ 2581/* Extract non-volatile configuration */
2309static int falcon_probe_nvconfig(struct efx_nic *efx) 2582static int falcon_probe_nvconfig(struct efx_nic *efx)
2310{ 2583{
2311 struct falcon_nvconfig *nvconfig; 2584 struct falcon_nvconfig *nvconfig;
2312 efx_oword_t nic_stat; 2585 int board_rev;
2313 int device_id;
2314 unsigned addr_len;
2315 size_t offset, len;
2316 int magic_num, struct_ver, board_rev;
2317 int rc; 2586 int rc;
2318 2587
2319 /* Find the boot device. */
2320 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2321 if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
2322 device_id = EE_SPI_FLASH;
2323 addr_len = 3;
2324 } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
2325 device_id = EE_SPI_EEPROM;
2326 addr_len = 2;
2327 } else {
2328 return -ENODEV;
2329 }
2330
2331 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 2588 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2589 if (!nvconfig)
2590 return -ENOMEM;
2332 2591
2333 /* Read the whole configuration structure into memory. */ 2592 rc = falcon_read_nvram(efx, nvconfig);
2334 for (offset = 0; offset < sizeof(*nvconfig); offset += len) { 2593 if (rc == -EINVAL) {
2335 len = min(sizeof(*nvconfig) - offset, 2594 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2336 (size_t) FALCON_SPI_MAX_LEN);
2337 rc = falcon_spi_read(efx, device_id, SPI_READ,
2338 NVCONFIG_BASE + offset, addr_len,
2339 (char *)nvconfig + offset, len);
2340 if (rc)
2341 goto out;
2342 }
2343
2344 /* Read the MAC addresses */
2345 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2346
2347 /* Read the board configuration. */
2348 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2349 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2350
2351 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
2352 EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
2353 "therefore using defaults\n", magic_num, struct_ver);
2354 efx->phy_type = PHY_TYPE_NONE; 2595 efx->phy_type = PHY_TYPE_NONE;
2355 efx->mii.phy_id = PHY_ADDR_INVALID; 2596 efx->mii.phy_id = PHY_ADDR_INVALID;
2356 board_rev = 0; 2597 board_rev = 0;
2598 rc = 0;
2599 } else if (rc) {
2600 goto fail1;
2357 } else { 2601 } else {
2358 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 2602 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2603 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2359 2604
2360 efx->phy_type = v2->port0_phy_type; 2605 efx->phy_type = v2->port0_phy_type;
2361 efx->mii.phy_id = v2->port0_phy_addr; 2606 efx->mii.phy_id = v2->port0_phy_addr;
2362 board_rev = le16_to_cpu(v2->board_revision); 2607 board_rev = le16_to_cpu(v2->board_revision);
2608
2609 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2610 __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
2611 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
2612 rc = falcon_spi_device_init(efx, &efx->spi_flash,
2613 EE_SPI_FLASH,
2614 le32_to_cpu(fl));
2615 if (rc)
2616 goto fail2;
2617 rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
2618 EE_SPI_EEPROM,
2619 le32_to_cpu(ee));
2620 if (rc)
2621 goto fail2;
2622 }
2363 } 2623 }
2364 2624
2625 /* Read the MAC addresses */
2626 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2627
2365 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id); 2628 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2366 2629
2367 efx_set_board_info(efx, board_rev); 2630 efx_set_board_info(efx, board_rev);
2368 2631
2369 out: 2632 kfree(nvconfig);
2633 return 0;
2634
2635 fail2:
2636 falcon_remove_spi_devices(efx);
2637 fail1:
2370 kfree(nvconfig); 2638 kfree(nvconfig);
2371 return rc; 2639 return rc;
2372} 2640}
@@ -2417,6 +2685,86 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2417 return 0; 2685 return 0;
2418} 2686}
2419 2687
2688/* Probe all SPI devices on the NIC */
2689static void falcon_probe_spi_devices(struct efx_nic *efx)
2690{
2691 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2692 bool has_flash, has_eeprom, boot_is_external;
2693
2694 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2695 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2696 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2697
2698 has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST);
2699 has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST);
2700 boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE);
2701
2702 if (has_flash) {
2703 /* Default flash SPI device: Atmel AT25F1024
2704 * 128 KB, 24-bit address, 32 KB erase block,
2705 * 256 B write block
2706 */
2707 u32 flash_device_type =
2708 (17 << SPI_DEV_TYPE_SIZE_LBN)
2709 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2710 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
2711 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
2712 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2713
2714 falcon_spi_device_init(efx, &efx->spi_flash,
2715 EE_SPI_FLASH, flash_device_type);
2716
2717 if (!boot_is_external) {
2718 /* Disable VPD and set clock dividers to safe
2719 * values for initial programming.
2720 */
2721 EFX_LOG(efx, "Booted from internal ASIC settings;"
2722 " setting SPI config\n");
2723 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2724 /* 125 MHz / 7 ~= 20 MHz */
2725 EE_SF_CLOCK_DIV, 7,
2726 /* 125 MHz / 63 ~= 2 MHz */
2727 EE_EE_CLOCK_DIV, 63);
2728 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2729 }
2730 }
2731
2732 if (has_eeprom) {
2733 u32 eeprom_device_type;
2734
2735 /* If it has no flash, it must have a large EEPROM
2736 * for chip config; otherwise check whether 9-bit
2737 * addressing is used for VPD configuration
2738 */
2739 if (has_flash &&
2740 (!boot_is_external ||
2741 EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
2742 /* Default SPI device: Atmel AT25040 or similar
2743 * 512 B, 9-bit address, 8 B write block
2744 */
2745 eeprom_device_type =
2746 (9 << SPI_DEV_TYPE_SIZE_LBN)
2747 | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2748 | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2749 } else {
2750 /* "Large" SPI device: Atmel AT25640 or similar
2751 * 8 KB, 16-bit address, 32 B write block
2752 */
2753 eeprom_device_type =
2754 (13 << SPI_DEV_TYPE_SIZE_LBN)
2755 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2756 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2757 }
2758
2759 falcon_spi_device_init(efx, &efx->spi_eeprom,
2760 EE_SPI_EEPROM, eeprom_device_type);
2761 }
2762
2763 EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
2764 (has_flash ? "present" : "absent"),
2765 (has_eeprom ? "present" : "absent"));
2766}
2767
2420int falcon_probe_nic(struct efx_nic *efx) 2768int falcon_probe_nic(struct efx_nic *efx)
2421{ 2769{
2422 struct falcon_nic_data *nic_data; 2770 struct falcon_nic_data *nic_data;
@@ -2424,6 +2772,8 @@ int falcon_probe_nic(struct efx_nic *efx)
2424 2772
2425 /* Allocate storage for hardware specific data */ 2773 /* Allocate storage for hardware specific data */
2426 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2774 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2775 if (!nic_data)
2776 return -ENOMEM;
2427 efx->nic_data = nic_data; 2777 efx->nic_data = nic_data;
2428 2778
2429 /* Determine number of ports etc. */ 2779 /* Determine number of ports etc. */
@@ -2467,6 +2817,8 @@ int falcon_probe_nic(struct efx_nic *efx)
2467 (unsigned long long)efx->irq_status.dma_addr, 2817 (unsigned long long)efx->irq_status.dma_addr,
2468 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); 2818 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2469 2819
2820 falcon_probe_spi_devices(efx);
2821
2470 /* Read in the non-volatile configuration */ 2822 /* Read in the non-volatile configuration */
2471 rc = falcon_probe_nvconfig(efx); 2823 rc = falcon_probe_nvconfig(efx);
2472 if (rc) 2824 if (rc)
@@ -2486,6 +2838,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2486 return 0; 2838 return 0;
2487 2839
2488 fail5: 2840 fail5:
2841 falcon_remove_spi_devices(efx);
2489 falcon_free_buffer(efx, &efx->irq_status); 2842 falcon_free_buffer(efx, &efx->irq_status);
2490 fail4: 2843 fail4:
2491 fail3: 2844 fail3:
@@ -2573,19 +2926,14 @@ int falcon_init_nic(struct efx_nic *efx)
2573 EFX_INVERT_OWORD(temp); 2926 EFX_INVERT_OWORD(temp);
2574 falcon_write(efx, &temp, FATAL_INTR_REG_KER); 2927 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2575 2928
2576 /* Set number of RSS queues for receive path. */
2577 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2578 if (falcon_rev(efx) >= FALCON_REV_B0)
2579 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2580 else
2581 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
2582 if (EFX_WORKAROUND_7244(efx)) { 2929 if (EFX_WORKAROUND_7244(efx)) {
2930 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2583 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 2931 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2584 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 2932 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2585 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 2933 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2586 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 2934 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2935 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2587 } 2936 }
2588 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2589 2937
2590 falcon_setup_rss_indir_table(efx); 2938 falcon_setup_rss_indir_table(efx);
2591 2939
@@ -2641,8 +2989,8 @@ int falcon_init_nic(struct efx_nic *efx)
2641 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); 2989 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
2642 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); 2990 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
2643 /* RX control FIFO thresholds [32 entries] */ 2991 /* RX control FIFO thresholds [32 entries] */
2644 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25); 2992 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
2645 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20); 2993 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
2646 falcon_write(efx, &temp, RX_CFG_REG_KER); 2994 falcon_write(efx, &temp, RX_CFG_REG_KER);
2647 2995
2648 /* Set destination of both TX and RX Flush events */ 2996 /* Set destination of both TX and RX Flush events */
@@ -2662,6 +3010,7 @@ void falcon_remove_nic(struct efx_nic *efx)
2662 rc = i2c_del_adapter(&efx->i2c_adap); 3010 rc = i2c_del_adapter(&efx->i2c_adap);
2663 BUG_ON(rc); 3011 BUG_ON(rc);
2664 3012
3013 falcon_remove_spi_devices(efx);
2665 falcon_free_buffer(efx, &efx->irq_status); 3014 falcon_free_buffer(efx, &efx->irq_status);
2666 3015
2667 falcon_reset_hw(efx, RESET_TYPE_ALL); 3016 falcon_reset_hw(efx, RESET_TYPE_ALL);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 492f9bc28840..be025ba7a6c6 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -40,24 +40,24 @@ extern struct efx_nic_type falcon_b_nic_type;
40 40
41/* TX data path */ 41/* TX data path */
42extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 42extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
43extern int falcon_init_tx(struct efx_tx_queue *tx_queue); 43extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_fini_tx(struct efx_tx_queue *tx_queue); 44extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
45extern void falcon_remove_tx(struct efx_tx_queue *tx_queue); 45extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
46extern void falcon_push_buffers(struct efx_tx_queue *tx_queue); 46extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
47 47
48/* RX data path */ 48/* RX data path */
49extern int falcon_probe_rx(struct efx_rx_queue *rx_queue); 49extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
50extern int falcon_init_rx(struct efx_rx_queue *rx_queue); 50extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
51extern void falcon_fini_rx(struct efx_rx_queue *rx_queue); 51extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
52extern void falcon_remove_rx(struct efx_rx_queue *rx_queue); 52extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
53extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue); 53extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
54 54
55/* Event data path */ 55/* Event data path */
56extern int falcon_probe_eventq(struct efx_channel *channel); 56extern int falcon_probe_eventq(struct efx_channel *channel);
57extern int falcon_init_eventq(struct efx_channel *channel); 57extern void falcon_init_eventq(struct efx_channel *channel);
58extern void falcon_fini_eventq(struct efx_channel *channel); 58extern void falcon_fini_eventq(struct efx_channel *channel);
59extern void falcon_remove_eventq(struct efx_channel *channel); 59extern void falcon_remove_eventq(struct efx_channel *channel);
60extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota); 60extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
61extern void falcon_eventq_read_ack(struct efx_channel *channel); 61extern void falcon_eventq_read_ack(struct efx_channel *channel);
62 62
63/* Ports */ 63/* Ports */
@@ -65,7 +65,7 @@ extern int falcon_probe_port(struct efx_nic *efx);
65extern void falcon_remove_port(struct efx_nic *efx); 65extern void falcon_remove_port(struct efx_nic *efx);
66 66
67/* MAC/PHY */ 67/* MAC/PHY */
68extern int falcon_xaui_link_ok(struct efx_nic *efx); 68extern bool falcon_xaui_link_ok(struct efx_nic *efx);
69extern int falcon_dma_stats(struct efx_nic *efx, 69extern int falcon_dma_stats(struct efx_nic *efx,
70 unsigned int done_offset); 70 unsigned int done_offset);
71extern void falcon_drain_tx_fifo(struct efx_nic *efx); 71extern void falcon_drain_tx_fifo(struct efx_nic *efx);
@@ -86,6 +86,7 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
86extern int falcon_probe_nic(struct efx_nic *efx); 86extern int falcon_probe_nic(struct efx_nic *efx);
87extern int falcon_probe_resources(struct efx_nic *efx); 87extern int falcon_probe_resources(struct efx_nic *efx);
88extern int falcon_init_nic(struct efx_nic *efx); 88extern int falcon_init_nic(struct efx_nic *efx);
89extern int falcon_flush_queues(struct efx_nic *efx);
89extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 90extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
90extern void falcon_remove_resources(struct efx_nic *efx); 91extern void falcon_remove_resources(struct efx_nic *efx);
91extern void falcon_remove_nic(struct efx_nic *efx); 92extern void falcon_remove_nic(struct efx_nic *efx);
@@ -93,6 +94,12 @@ extern void falcon_update_nic_stats(struct efx_nic *efx);
93extern void falcon_set_multicast_hash(struct efx_nic *efx); 94extern void falcon_set_multicast_hash(struct efx_nic *efx);
94extern int falcon_reset_xaui(struct efx_nic *efx); 95extern int falcon_reset_xaui(struct efx_nic *efx);
95 96
97/* Tests */
98struct falcon_nvconfig;
99extern int falcon_read_nvram(struct efx_nic *efx,
100 struct falcon_nvconfig *nvconfig);
101extern int falcon_test_registers(struct efx_nic *efx);
102
96/************************************************************************** 103/**************************************************************************
97 * 104 *
98 * Falcon MAC stats 105 * Falcon MAC stats
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 6d003114eeab..5d584b0dbb51 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -92,6 +92,17 @@
92/* SPI host data register */ 92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120 93#define EE_SPI_HDATA_REG_KER 0x0120
94 94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
95/* PCIE CORE ACCESS REG */ 106/* PCIE CORE ACCESS REG */
96#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68 107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
97#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70 108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
@@ -106,7 +117,6 @@
106#define SF_PRST_WIDTH 1 117#define SF_PRST_WIDTH 1
107#define EE_PRST_LBN 8 118#define EE_PRST_LBN 8
108#define EE_PRST_WIDTH 1 119#define EE_PRST_WIDTH 1
109/* See pic_mode_t for decoding of this field */
110/* These bit definitions are extrapolated from the list of numerical 120/* These bit definitions are extrapolated from the list of numerical
111 * values for STRAP_PINS. 121 * values for STRAP_PINS.
112 */ 122 */
@@ -115,6 +125,9 @@
115#define STRAP_PCIE_LBN 0 125#define STRAP_PCIE_LBN 0
116#define STRAP_PCIE_WIDTH 1 126#define STRAP_PCIE_WIDTH 1
117 127
128#define BOOTED_USING_NVDEVICE_LBN 3
129#define BOOTED_USING_NVDEVICE_WIDTH 1
130
118/* GPIO control register */ 131/* GPIO control register */
119#define GPIO_CTL_REG_KER 0x0210 132#define GPIO_CTL_REG_KER 0x0210
120#define GPIO_OUTPUTS_LBN (16) 133#define GPIO_OUTPUTS_LBN (16)
@@ -479,18 +492,8 @@
479#define MAC_MCAST_HASH_REG0_KER 0xca0 492#define MAC_MCAST_HASH_REG0_KER 0xca0
480#define MAC_MCAST_HASH_REG1_KER 0xcb0 493#define MAC_MCAST_HASH_REG1_KER 0xcb0
481 494
482/* GMAC registers */
483#define FALCON_GMAC_REGBANK 0xe00
484#define FALCON_GMAC_REGBANK_SIZE 0x200
485#define FALCON_GMAC_REG_SIZE 0x10
486
487/* XMAC registers */
488#define FALCON_XMAC_REGBANK 0x1200
489#define FALCON_XMAC_REGBANK_SIZE 0x200
490#define FALCON_XMAC_REG_SIZE 0x10
491
492/* XGMAC address register low */ 495/* XGMAC address register low */
493#define XM_ADR_LO_REG_MAC 0x00 496#define XM_ADR_LO_REG 0x1200
494#define XM_ADR_3_LBN 24 497#define XM_ADR_3_LBN 24
495#define XM_ADR_3_WIDTH 8 498#define XM_ADR_3_WIDTH 8
496#define XM_ADR_2_LBN 16 499#define XM_ADR_2_LBN 16
@@ -501,14 +504,14 @@
501#define XM_ADR_0_WIDTH 8 504#define XM_ADR_0_WIDTH 8
502 505
503/* XGMAC address register high */ 506/* XGMAC address register high */
504#define XM_ADR_HI_REG_MAC 0x01 507#define XM_ADR_HI_REG 0x1210
505#define XM_ADR_5_LBN 8 508#define XM_ADR_5_LBN 8
506#define XM_ADR_5_WIDTH 8 509#define XM_ADR_5_WIDTH 8
507#define XM_ADR_4_LBN 0 510#define XM_ADR_4_LBN 0
508#define XM_ADR_4_WIDTH 8 511#define XM_ADR_4_WIDTH 8
509 512
510/* XGMAC global configuration */ 513/* XGMAC global configuration */
511#define XM_GLB_CFG_REG_MAC 0x02 514#define XM_GLB_CFG_REG 0x1220
512#define XM_RX_STAT_EN_LBN 11 515#define XM_RX_STAT_EN_LBN 11
513#define XM_RX_STAT_EN_WIDTH 1 516#define XM_RX_STAT_EN_WIDTH 1
514#define XM_TX_STAT_EN_LBN 10 517#define XM_TX_STAT_EN_LBN 10
@@ -521,7 +524,7 @@
521#define XM_CORE_RST_WIDTH 1 524#define XM_CORE_RST_WIDTH 1
522 525
523/* XGMAC transmit configuration */ 526/* XGMAC transmit configuration */
524#define XM_TX_CFG_REG_MAC 0x03 527#define XM_TX_CFG_REG 0x1230
525#define XM_IPG_LBN 16 528#define XM_IPG_LBN 16
526#define XM_IPG_WIDTH 4 529#define XM_IPG_WIDTH 4
527#define XM_FCNTL_LBN 10 530#define XM_FCNTL_LBN 10
@@ -536,7 +539,7 @@
536#define XM_TXEN_WIDTH 1 539#define XM_TXEN_WIDTH 1
537 540
538/* XGMAC receive configuration */ 541/* XGMAC receive configuration */
539#define XM_RX_CFG_REG_MAC 0x04 542#define XM_RX_CFG_REG 0x1240
540#define XM_PASS_CRC_ERR_LBN 25 543#define XM_PASS_CRC_ERR_LBN 25
541#define XM_PASS_CRC_ERR_WIDTH 1 544#define XM_PASS_CRC_ERR_WIDTH 1
542#define XM_ACPT_ALL_MCAST_LBN 11 545#define XM_ACPT_ALL_MCAST_LBN 11
@@ -549,7 +552,7 @@
549#define XM_RXEN_WIDTH 1 552#define XM_RXEN_WIDTH 1
550 553
551/* XGMAC management interrupt mask register */ 554/* XGMAC management interrupt mask register */
552#define XM_MGT_INT_MSK_REG_MAC_B0 0x5 555#define XM_MGT_INT_MSK_REG_B0 0x1250
553#define XM_MSK_PRMBLE_ERR_LBN 2 556#define XM_MSK_PRMBLE_ERR_LBN 2
554#define XM_MSK_PRMBLE_ERR_WIDTH 1 557#define XM_MSK_PRMBLE_ERR_WIDTH 1
555#define XM_MSK_RMTFLT_LBN 1 558#define XM_MSK_RMTFLT_LBN 1
@@ -558,29 +561,29 @@
558#define XM_MSK_LCLFLT_WIDTH 1 561#define XM_MSK_LCLFLT_WIDTH 1
559 562
560/* XGMAC flow control register */ 563/* XGMAC flow control register */
561#define XM_FC_REG_MAC 0x7 564#define XM_FC_REG 0x1270
562#define XM_PAUSE_TIME_LBN 16 565#define XM_PAUSE_TIME_LBN 16
563#define XM_PAUSE_TIME_WIDTH 16 566#define XM_PAUSE_TIME_WIDTH 16
564#define XM_DIS_FCNTL_LBN 0 567#define XM_DIS_FCNTL_LBN 0
565#define XM_DIS_FCNTL_WIDTH 1 568#define XM_DIS_FCNTL_WIDTH 1
566 569
567/* XGMAC pause time count register */ 570/* XGMAC pause time count register */
568#define XM_PAUSE_TIME_REG_MAC 0x9 571#define XM_PAUSE_TIME_REG 0x1290
569 572
570/* XGMAC transmit parameter register */ 573/* XGMAC transmit parameter register */
571#define XM_TX_PARAM_REG_MAC 0x0d 574#define XM_TX_PARAM_REG 0x012d0
572#define XM_TX_JUMBO_MODE_LBN 31 575#define XM_TX_JUMBO_MODE_LBN 31
573#define XM_TX_JUMBO_MODE_WIDTH 1 576#define XM_TX_JUMBO_MODE_WIDTH 1
574#define XM_MAX_TX_FRM_SIZE_LBN 16 577#define XM_MAX_TX_FRM_SIZE_LBN 16
575#define XM_MAX_TX_FRM_SIZE_WIDTH 14 578#define XM_MAX_TX_FRM_SIZE_WIDTH 14
576 579
577/* XGMAC receive parameter register */ 580/* XGMAC receive parameter register */
578#define XM_RX_PARAM_REG_MAC 0x0e 581#define XM_RX_PARAM_REG 0x12e0
579#define XM_MAX_RX_FRM_SIZE_LBN 0 582#define XM_MAX_RX_FRM_SIZE_LBN 0
580#define XM_MAX_RX_FRM_SIZE_WIDTH 14 583#define XM_MAX_RX_FRM_SIZE_WIDTH 14
581 584
582/* XGMAC management interrupt status register */ 585/* XGMAC management interrupt status register */
583#define XM_MGT_INT_REG_MAC_B0 0x0f 586#define XM_MGT_INT_REG_B0 0x12f0
584#define XM_PRMBLE_ERR 2 587#define XM_PRMBLE_ERR 2
585#define XM_PRMBLE_WIDTH 1 588#define XM_PRMBLE_WIDTH 1
586#define XM_RMTFLT_LBN 1 589#define XM_RMTFLT_LBN 1
@@ -589,7 +592,7 @@
589#define XM_LCLFLT_WIDTH 1 592#define XM_LCLFLT_WIDTH 1
590 593
591/* XGXS/XAUI powerdown/reset register */ 594/* XGXS/XAUI powerdown/reset register */
592#define XX_PWR_RST_REG_MAC 0x10 595#define XX_PWR_RST_REG 0x1300
593 596
594#define XX_PWRDND_EN_LBN 15 597#define XX_PWRDND_EN_LBN 15
595#define XX_PWRDND_EN_WIDTH 1 598#define XX_PWRDND_EN_WIDTH 1
@@ -619,7 +622,7 @@
619#define XX_RST_XX_EN_WIDTH 1 622#define XX_RST_XX_EN_WIDTH 1
620 623
621/* XGXS/XAUI powerdown/reset control register */ 624/* XGXS/XAUI powerdown/reset control register */
622#define XX_SD_CTL_REG_MAC 0x11 625#define XX_SD_CTL_REG 0x1310
623#define XX_HIDRVD_LBN 15 626#define XX_HIDRVD_LBN 15
624#define XX_HIDRVD_WIDTH 1 627#define XX_HIDRVD_WIDTH 1
625#define XX_LODRVD_LBN 14 628#define XX_LODRVD_LBN 14
@@ -645,7 +648,7 @@
645#define XX_LPBKA_LBN 0 648#define XX_LPBKA_LBN 0
646#define XX_LPBKA_WIDTH 1 649#define XX_LPBKA_WIDTH 1
647 650
648#define XX_TXDRV_CTL_REG_MAC 0x12 651#define XX_TXDRV_CTL_REG 0x1320
649#define XX_DEQD_LBN 28 652#define XX_DEQD_LBN 28
650#define XX_DEQD_WIDTH 4 653#define XX_DEQD_WIDTH 4
651#define XX_DEQC_LBN 24 654#define XX_DEQC_LBN 24
@@ -664,7 +667,7 @@
664#define XX_DTXA_WIDTH 4 667#define XX_DTXA_WIDTH 4
665 668
666/* XAUI XGXS core status register */ 669/* XAUI XGXS core status register */
667#define XX_CORE_STAT_REG_MAC 0x16 670#define XX_CORE_STAT_REG 0x1360
668#define XX_FORCE_SIG_LBN 24 671#define XX_FORCE_SIG_LBN 24
669#define XX_FORCE_SIG_WIDTH 8 672#define XX_FORCE_SIG_WIDTH 8
670#define XX_FORCE_SIG_DECODE_FORCED 0xff 673#define XX_FORCE_SIG_DECODE_FORCED 0xff
@@ -1127,7 +1130,28 @@ struct falcon_nvconfig_board_v2 {
1127 __le16 board_revision; 1130 __le16 board_revision;
1128} __packed; 1131} __packed;
1129 1132
1130#define NVCONFIG_BASE 0x300 1133/* Board configuration v3 extra information */
1134struct falcon_nvconfig_board_v3 {
1135 __le32 spi_device_type[2];
1136} __packed;
1137
1138/* Bit numbers for spi_device_type */
1139#define SPI_DEV_TYPE_SIZE_LBN 0
1140#define SPI_DEV_TYPE_SIZE_WIDTH 5
1141#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1142#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1143#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1144#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1145#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1146#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1147#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1148#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1149#define SPI_DEV_TYPE_FIELD(type, field) \
1150 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1151
1152#define NVCONFIG_OFFSET 0x300
1153#define NVCONFIG_END 0x400
1154
1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1155#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1132struct falcon_nvconfig { 1156struct falcon_nvconfig {
1133 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ 1157 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
@@ -1144,6 +1168,8 @@ struct falcon_nvconfig {
1144 __le16 board_struct_ver; 1168 __le16 board_struct_ver;
1145 __le16 board_checksum; 1169 __le16 board_checksum;
1146 struct falcon_nvconfig_board_v2 board_v2; 1170 struct falcon_nvconfig_board_v2 board_v2;
1171 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1172 struct falcon_nvconfig_board_v3 board_v3;
1147} __packed; 1173} __packed;
1148 1174
1149#endif /* EFX_FALCON_HWDEFS_H */ 1175#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index 6670cdfc41ab..c16da3149fa9 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -13,7 +13,6 @@
13 13
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include "net_driver.h"
17 16
18/************************************************************************** 17/**************************************************************************
19 * 18 *
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 55c0d9760be8..d4012314dd01 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -23,56 +23,24 @@
23 23
24/************************************************************************** 24/**************************************************************************
25 * 25 *
26 * MAC register access
27 *
28 **************************************************************************/
29
30/* Offset of an XMAC register within Falcon */
31#define FALCON_XMAC_REG(mac_reg) \
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33
34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg)
36{
37 efx_oword_t temp;
38
39 EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
40 falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
41}
42
43void falcon_xmac_readl(struct efx_nic *efx,
44 efx_dword_t *value, unsigned int mac_reg)
45{
46 efx_oword_t temp;
47
48 falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
49 EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
50}
51
52/**************************************************************************
53 *
54 * MAC operations 26 * MAC operations
55 * 27 *
56 *************************************************************************/ 28 *************************************************************************/
57static int falcon_reset_xmac(struct efx_nic *efx) 29static int falcon_reset_xmac(struct efx_nic *efx)
58{ 30{
59 efx_dword_t reg; 31 efx_oword_t reg;
60 int count; 32 int count;
61 33
62 EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1); 34 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
63 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC); 35 falcon_write(efx, &reg, XM_GLB_CFG_REG);
64 36
65 for (count = 0; count < 10000; count++) { /* wait upto 100ms */ 37 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
66 falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC); 38 falcon_read(efx, &reg, XM_GLB_CFG_REG);
67 if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0) 39 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
68 return 0; 40 return 0;
69 udelay(10); 41 udelay(10);
70 } 42 }
71 43
72 /* This often fails when DSP is disabled, ignore it */
73 if (sfe4001_phy_flash_cfg != 0)
74 return 0;
75
76 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 44 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
77 return -ETIMEDOUT; 45 return -ETIMEDOUT;
78} 46}
@@ -80,25 +48,25 @@ static int falcon_reset_xmac(struct efx_nic *efx)
80/* Configure the XAUI driver that is an output from Falcon */ 48/* Configure the XAUI driver that is an output from Falcon */
81static void falcon_setup_xaui(struct efx_nic *efx) 49static void falcon_setup_xaui(struct efx_nic *efx)
82{ 50{
83 efx_dword_t sdctl, txdrv; 51 efx_oword_t sdctl, txdrv;
84 52
85 /* Move the XAUI into low power, unless there is no PHY, in 53 /* Move the XAUI into low power, unless there is no PHY, in
86 * which case the XAUI will have to drive a cable. */ 54 * which case the XAUI will have to drive a cable. */
87 if (efx->phy_type == PHY_TYPE_NONE) 55 if (efx->phy_type == PHY_TYPE_NONE)
88 return; 56 return;
89 57
90 falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC); 58 falcon_read(efx, &sdctl, XX_SD_CTL_REG);
91 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 59 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
92 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 60 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
93 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 61 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
94 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 62 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
95 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 63 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
96 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 64 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
97 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 65 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
98 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 66 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
99 falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC); 67 falcon_write(efx, &sdctl, XX_SD_CTL_REG);
100 68
101 EFX_POPULATE_DWORD_8(txdrv, 69 EFX_POPULATE_OWORD_8(txdrv,
102 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 70 XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
103 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 71 XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
104 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 72 XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
@@ -107,93 +75,21 @@ static void falcon_setup_xaui(struct efx_nic *efx)
107 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 75 XX_DTXC, XX_TXDRV_DTX_DEFAULT,
108 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 76 XX_DTXB, XX_TXDRV_DTX_DEFAULT,
109 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 77 XX_DTXA, XX_TXDRV_DTX_DEFAULT);
110 falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC); 78 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
111} 79}
112 80
113static void falcon_hold_xaui_in_rst(struct efx_nic *efx) 81int falcon_reset_xaui(struct efx_nic *efx)
114{
115 efx_dword_t reg;
116
117 EFX_ZERO_DWORD(reg);
118 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
119 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
120 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
121 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
122 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
123 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
124 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
125 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
126 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
127 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
128 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
129 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
130 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
131 udelay(10);
132}
133
134static int _falcon_reset_xaui_a(struct efx_nic *efx)
135{
136 efx_dword_t reg;
137
138 falcon_hold_xaui_in_rst(efx);
139 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
140
141 /* Follow the RAMBUS XAUI data reset sequencing
142 * Channels A and B first: power down, reset PLL, reset, clear
143 */
144 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
145 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
146 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
147 udelay(10);
148
149 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
150 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
151 udelay(10);
152
153 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
154 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
155 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
156 udelay(10);
157
158 /* Channels C and D: power down, reset PLL, reset, clear */
159 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
160 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
161 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
162 udelay(10);
163
164 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
165 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
166 udelay(10);
167
168 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
169 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
170 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
171 udelay(10);
172
173 /* Setup XAUI */
174 falcon_setup_xaui(efx);
175 udelay(10);
176
177 /* Take XGXS out of reset */
178 EFX_ZERO_DWORD(reg);
179 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
180 udelay(10);
181
182 return 0;
183}
184
185static int _falcon_reset_xaui_b(struct efx_nic *efx)
186{ 82{
187 efx_dword_t reg; 83 efx_oword_t reg;
188 int count; 84 int count;
189 85
190 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 86 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
191 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 87 falcon_write(efx, &reg, XX_PWR_RST_REG);
192 88
193 /* Give some time for the link to establish */ 89 /* Give some time for the link to establish */
194 for (count = 0; count < 1000; count++) { /* wait upto 10ms */ 90 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
195 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 91 falcon_read(efx, &reg, XX_PWR_RST_REG);
196 if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) { 92 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
197 falcon_setup_xaui(efx); 93 falcon_setup_xaui(efx);
198 return 0; 94 return 0;
199 } 95 }
@@ -203,55 +99,41 @@ static int _falcon_reset_xaui_b(struct efx_nic *efx)
203 return -ETIMEDOUT; 99 return -ETIMEDOUT;
204} 100}
205 101
206int falcon_reset_xaui(struct efx_nic *efx) 102static bool falcon_xgmii_status(struct efx_nic *efx)
207{ 103{
208 int rc; 104 efx_oword_t reg;
209
210 if (EFX_WORKAROUND_9388(efx)) {
211 falcon_hold_xaui_in_rst(efx);
212 efx->phy_op->reset_xaui(efx);
213 rc = _falcon_reset_xaui_a(efx);
214 } else {
215 rc = _falcon_reset_xaui_b(efx);
216 }
217 return rc;
218}
219
220static int falcon_xgmii_status(struct efx_nic *efx)
221{
222 efx_dword_t reg;
223 105
224 if (falcon_rev(efx) < FALCON_REV_B0) 106 if (falcon_rev(efx) < FALCON_REV_B0)
225 return 1; 107 return true;
226 108
227 /* The ISR latches, so clear it and re-read */ 109 /* The ISR latches, so clear it and re-read */
228 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 110 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
229 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 111 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
230 112
231 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 113 if (EFX_OWORD_FIELD(reg, XM_LCLFLT) ||
232 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 114 EFX_OWORD_FIELD(reg, XM_RMTFLT)) {
233 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); 115 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
234 return 0; 116 return false;
235 } 117 }
236 118
237 return 1; 119 return true;
238} 120}
239 121
240static void falcon_mask_status_intr(struct efx_nic *efx, int enable) 122static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
241{ 123{
242 efx_dword_t reg; 124 efx_oword_t reg;
243 125
244 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 126 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
245 return; 127 return;
246 128
247 /* Flush the ISR */ 129 /* Flush the ISR */
248 if (enable) 130 if (enable)
249 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 131 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
250 132
251 EFX_POPULATE_DWORD_2(reg, 133 EFX_POPULATE_OWORD_2(reg,
252 XM_MSK_RMTFLT, !enable, 134 XM_MSK_RMTFLT, !enable,
253 XM_MSK_LCLFLT, !enable); 135 XM_MSK_LCLFLT, !enable);
254 falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0); 136 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
255} 137}
256 138
257int falcon_init_xmac(struct efx_nic *efx) 139int falcon_init_xmac(struct efx_nic *efx)
@@ -274,7 +156,7 @@ int falcon_init_xmac(struct efx_nic *efx)
274 if (rc) 156 if (rc)
275 goto fail2; 157 goto fail2;
276 158
277 falcon_mask_status_intr(efx, 1); 159 falcon_mask_status_intr(efx, true);
278 return 0; 160 return 0;
279 161
280 fail2: 162 fail2:
@@ -283,34 +165,34 @@ int falcon_init_xmac(struct efx_nic *efx)
283 return rc; 165 return rc;
284} 166}
285 167
286int falcon_xaui_link_ok(struct efx_nic *efx) 168bool falcon_xaui_link_ok(struct efx_nic *efx)
287{ 169{
288 efx_dword_t reg; 170 efx_oword_t reg;
289 int align_done, sync_status, link_ok = 0; 171 bool align_done, link_ok = false;
172 int sync_status;
290 173
291 if (LOOPBACK_INTERNAL(efx)) 174 if (LOOPBACK_INTERNAL(efx))
292 return 1; 175 return true;
293 176
294 /* Read link status */ 177 /* Read link status */
295 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 178 falcon_read(efx, &reg, XX_CORE_STAT_REG);
296 179
297 align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE); 180 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
298 sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT); 181 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
299 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 182 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
300 link_ok = 1; 183 link_ok = true;
301 184
302 /* Clear link status ready for next read */ 185 /* Clear link status ready for next read */
303 EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 186 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
304 EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 187 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
305 EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 188 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
306 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 189 falcon_write(efx, &reg, XX_CORE_STAT_REG);
307 190
308 /* If the link is up, then check the phy side of the xaui link 191 /* If the link is up, then check the phy side of the xaui link
309 * (error conditions from the wire side propoagate back through 192 * (error conditions from the wire side propoagate back through
310 * the phy to the xaui side). */ 193 * the phy to the xaui side). */
311 if (efx->link_up && link_ok) { 194 if (efx->link_up && link_ok) {
312 int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS); 195 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
313 if (has_phyxs)
314 link_ok = mdio_clause45_phyxgxs_lane_sync(efx); 196 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
315 } 197 }
316 198
@@ -325,15 +207,15 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
325static void falcon_reconfigure_xmac_core(struct efx_nic *efx) 207static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
326{ 208{
327 unsigned int max_frame_len; 209 unsigned int max_frame_len;
328 efx_dword_t reg; 210 efx_oword_t reg;
329 int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0; 211 bool rx_fc = !!(efx->flow_control & EFX_FC_RX);
330 212
331 /* Configure MAC - cut-thru mode is hard wired on */ 213 /* Configure MAC - cut-thru mode is hard wired on */
332 EFX_POPULATE_DWORD_3(reg, 214 EFX_POPULATE_DWORD_3(reg,
333 XM_RX_JUMBO_MODE, 1, 215 XM_RX_JUMBO_MODE, 1,
334 XM_TX_STAT_EN, 1, 216 XM_TX_STAT_EN, 1,
335 XM_RX_STAT_EN, 1); 217 XM_RX_STAT_EN, 1);
336 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC); 218 falcon_write(efx, &reg, XM_GLB_CFG_REG);
337 219
338 /* Configure TX */ 220 /* Configure TX */
339 EFX_POPULATE_DWORD_6(reg, 221 EFX_POPULATE_DWORD_6(reg,
@@ -343,7 +225,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
343 XM_TXCRC, 1, 225 XM_TXCRC, 1,
344 XM_FCNTL, 1, 226 XM_FCNTL, 1,
345 XM_IPG, 0x3); 227 XM_IPG, 0x3);
346 falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC); 228 falcon_write(efx, &reg, XM_TX_CFG_REG);
347 229
348 /* Configure RX */ 230 /* Configure RX */
349 EFX_POPULATE_DWORD_5(reg, 231 EFX_POPULATE_DWORD_5(reg,
@@ -352,21 +234,21 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
352 XM_ACPT_ALL_MCAST, 1, 234 XM_ACPT_ALL_MCAST, 1,
353 XM_ACPT_ALL_UCAST, efx->promiscuous, 235 XM_ACPT_ALL_UCAST, efx->promiscuous,
354 XM_PASS_CRC_ERR, 1); 236 XM_PASS_CRC_ERR, 1);
355 falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC); 237 falcon_write(efx, &reg, XM_RX_CFG_REG);
356 238
357 /* Set frame length */ 239 /* Set frame length */
358 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 240 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
359 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 241 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
360 falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC); 242 falcon_write(efx, &reg, XM_RX_PARAM_REG);
361 EFX_POPULATE_DWORD_2(reg, 243 EFX_POPULATE_DWORD_2(reg,
362 XM_MAX_TX_FRM_SIZE, max_frame_len, 244 XM_MAX_TX_FRM_SIZE, max_frame_len,
363 XM_TX_JUMBO_MODE, 1); 245 XM_TX_JUMBO_MODE, 1);
364 falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC); 246 falcon_write(efx, &reg, XM_TX_PARAM_REG);
365 247
366 EFX_POPULATE_DWORD_2(reg, 248 EFX_POPULATE_DWORD_2(reg,
367 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 249 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
368 XM_DIS_FCNTL, rx_fc ? 0 : 1); 250 XM_DIS_FCNTL, !rx_fc);
369 falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC); 251 falcon_write(efx, &reg, XM_FC_REG);
370 252
371 /* Set MAC address */ 253 /* Set MAC address */
372 EFX_POPULATE_DWORD_4(reg, 254 EFX_POPULATE_DWORD_4(reg,
@@ -374,83 +256,75 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
374 XM_ADR_1, efx->net_dev->dev_addr[1], 256 XM_ADR_1, efx->net_dev->dev_addr[1],
375 XM_ADR_2, efx->net_dev->dev_addr[2], 257 XM_ADR_2, efx->net_dev->dev_addr[2],
376 XM_ADR_3, efx->net_dev->dev_addr[3]); 258 XM_ADR_3, efx->net_dev->dev_addr[3]);
377 falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC); 259 falcon_write(efx, &reg, XM_ADR_LO_REG);
378 EFX_POPULATE_DWORD_2(reg, 260 EFX_POPULATE_DWORD_2(reg,
379 XM_ADR_4, efx->net_dev->dev_addr[4], 261 XM_ADR_4, efx->net_dev->dev_addr[4],
380 XM_ADR_5, efx->net_dev->dev_addr[5]); 262 XM_ADR_5, efx->net_dev->dev_addr[5]);
381 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 263 falcon_write(efx, &reg, XM_ADR_HI_REG);
382} 264}
383 265
384static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 266static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
385{ 267{
386 efx_dword_t reg; 268 efx_oword_t reg;
387 int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; 269 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
388 int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; 270 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
389 int xgmii_loopback = 271 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
390 (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
391 272
392 /* XGXS block is flaky and will need to be reset if moving 273 /* XGXS block is flaky and will need to be reset if moving
393 * into our out of XGMII, XGXS or XAUI loopbacks. */ 274 * into our out of XGMII, XGXS or XAUI loopbacks. */
394 if (EFX_WORKAROUND_5147(efx)) { 275 if (EFX_WORKAROUND_5147(efx)) {
395 int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 276 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
396 int reset_xgxs; 277 bool reset_xgxs;
397 278
398 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 279 falcon_read(efx, &reg, XX_CORE_STAT_REG);
399 old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); 280 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
400 old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); 281 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
401 282
402 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 283 falcon_read(efx, &reg, XX_SD_CTL_REG);
403 old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); 284 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
404 285
405 /* The PHY driver may have turned XAUI off */ 286 /* The PHY driver may have turned XAUI off */
406 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 287 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
407 (xaui_loopback != old_xaui_loopback) || 288 (xaui_loopback != old_xaui_loopback) ||
408 (xgmii_loopback != old_xgmii_loopback)); 289 (xgmii_loopback != old_xgmii_loopback));
409 if (reset_xgxs) { 290
410 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 291 if (reset_xgxs)
411 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); 292 falcon_reset_xaui(efx);
412 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
413 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
414 udelay(1);
415 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
416 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
417 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
418 udelay(1);
419 }
420 } 293 }
421 294
422 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 295 falcon_read(efx, &reg, XX_CORE_STAT_REG);
423 EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, 296 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
424 (xgxs_loopback || xaui_loopback) ? 297 (xgxs_loopback || xaui_loopback) ?
425 XX_FORCE_SIG_DECODE_FORCED : 0); 298 XX_FORCE_SIG_DECODE_FORCED : 0);
426 EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 299 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
427 EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 300 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
428 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 301 falcon_write(efx, &reg, XX_CORE_STAT_REG);
429 302
430 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 303 falcon_read(efx, &reg, XX_SD_CTL_REG);
431 EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 304 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
432 EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 305 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
433 EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 306 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
434 EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 307 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
435 falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC); 308 falcon_write(efx, &reg, XX_SD_CTL_REG);
436} 309}
437 310
438 311
439/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 312/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
440 * to come back up. Bash it until it comes back up */ 313 * to come back up. Bash it until it comes back up */
441static int falcon_check_xaui_link_up(struct efx_nic *efx) 314static bool falcon_check_xaui_link_up(struct efx_nic *efx)
442{ 315{
443 int max_tries, tries; 316 int max_tries, tries;
444 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 317 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
445 max_tries = tries; 318 max_tries = tries;
446 319
447 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 320 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
448 (efx->phy_type == PHY_TYPE_NONE)) 321 (efx->phy_type == PHY_TYPE_NONE) ||
449 return 0; 322 efx_phy_mode_disabled(efx->phy_mode))
323 return false;
450 324
451 while (tries) { 325 while (tries) {
452 if (falcon_xaui_link_ok(efx)) 326 if (falcon_xaui_link_ok(efx))
453 return 1; 327 return true;
454 328
455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 329 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
456 __func__, tries); 330 __func__, tries);
@@ -461,18 +335,22 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
461 335
462 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", 336 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
463 max_tries); 337 max_tries);
464 return 0; 338 return false;
465} 339}
466 340
467void falcon_reconfigure_xmac(struct efx_nic *efx) 341void falcon_reconfigure_xmac(struct efx_nic *efx)
468{ 342{
469 int xaui_link_ok; 343 bool xaui_link_ok;
470 344
471 falcon_mask_status_intr(efx, 0); 345 falcon_mask_status_intr(efx, false);
472 346
473 falcon_deconfigure_mac_wrapper(efx); 347 falcon_deconfigure_mac_wrapper(efx);
474 348
475 efx->tx_disabled = LOOPBACK_INTERNAL(efx); 349 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
350 if (LOOPBACK_INTERNAL(efx))
351 efx->phy_mode |= PHY_MODE_TX_DISABLED;
352 else
353 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
476 efx->phy_op->reconfigure(efx); 354 efx->phy_op->reconfigure(efx);
477 355
478 falcon_reconfigure_xgxs_core(efx); 356 falcon_reconfigure_xgxs_core(efx);
@@ -484,7 +362,7 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
484 xaui_link_ok = falcon_check_xaui_link_up(efx); 362 xaui_link_ok = falcon_check_xaui_link_up(efx);
485 363
486 if (xaui_link_ok && efx->link_up) 364 if (xaui_link_ok && efx->link_up)
487 falcon_mask_status_intr(efx, 1); 365 falcon_mask_status_intr(efx, true);
488} 366}
489 367
490void falcon_fini_xmac(struct efx_nic *efx) 368void falcon_fini_xmac(struct efx_nic *efx)
@@ -554,21 +432,23 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
554 432
555 /* Update derived statistics */ 433 /* Update derived statistics */
556 mac_stats->tx_good_bytes = 434 mac_stats->tx_good_bytes =
557 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes); 435 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
436 mac_stats->tx_control * 64);
558 mac_stats->rx_bad_bytes = 437 mac_stats->rx_bad_bytes =
559 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 438 (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
439 mac_stats->rx_control * 64);
560} 440}
561 441
562int falcon_check_xmac(struct efx_nic *efx) 442int falcon_check_xmac(struct efx_nic *efx)
563{ 443{
564 unsigned xaui_link_ok; 444 bool xaui_link_ok;
565 int rc; 445 int rc;
566 446
567 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 447 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
568 (efx->phy_type == PHY_TYPE_NONE)) 448 efx_phy_mode_disabled(efx->phy_mode))
569 return 0; 449 return 0;
570 450
571 falcon_mask_status_intr(efx, 0); 451 falcon_mask_status_intr(efx, false);
572 xaui_link_ok = falcon_xaui_link_ok(efx); 452 xaui_link_ok = falcon_xaui_link_ok(efx);
573 453
574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 454 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
@@ -579,7 +459,7 @@ int falcon_check_xmac(struct efx_nic *efx)
579 459
580 /* Unmask interrupt if everything was (and still is) ok */ 460 /* Unmask interrupt if everything was (and still is) ok */
581 if (xaui_link_ok && efx->link_up) 461 if (xaui_link_ok && efx->link_up)
582 falcon_mask_status_intr(efx, 1); 462 falcon_mask_status_intr(efx, true);
583 463
584 return rc; 464 return rc;
585} 465}
@@ -620,7 +500,7 @@ int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
620 500
621int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) 501int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
622{ 502{
623 int reset; 503 bool reset;
624 504
625 if (flow_control & EFX_FC_AUTO) { 505 if (flow_control & EFX_FC_AUTO) {
626 EFX_LOG(efx, "10G does not support flow control " 506 EFX_LOG(efx, "10G does not support flow control "
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index edd07d4dee18..a31571c69137 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,6 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern void falcon_xmac_writel(struct efx_nic *efx,
17 efx_dword_t *value, unsigned int mac_reg);
18extern void falcon_xmac_readl(struct efx_nic *efx,
19 efx_dword_t *value, unsigned int mac_reg);
20extern int falcon_init_xmac(struct efx_nic *efx); 16extern int falcon_init_xmac(struct efx_nic *efx);
21extern void falcon_reconfigure_xmac(struct efx_nic *efx); 17extern void falcon_reconfigure_xmac(struct efx_nic *efx);
22extern void falcon_update_stats_xmac(struct efx_nic *efx); 18extern void falcon_update_stats_xmac(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index c4f540e93b79..003e48dcb2f3 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -159,20 +159,21 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
159 return 0; 159 return 0;
160} 160}
161 161
162int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) 162bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
163{ 163{
164 int phy_id = efx->mii.phy_id; 164 int phy_id = efx->mii.phy_id;
165 int status; 165 int status;
166 int ok = 1; 166 bool ok = true;
167 int mmd = 0; 167 int mmd = 0;
168 int good;
169 168
170 /* If the port is in loopback, then we should only consider a subset 169 /* If the port is in loopback, then we should only consider a subset
171 * of mmd's */ 170 * of mmd's */
172 if (LOOPBACK_INTERNAL(efx)) 171 if (LOOPBACK_INTERNAL(efx))
173 return 1; 172 return true;
174 else if (efx->loopback_mode == LOOPBACK_NETWORK) 173 else if (efx->loopback_mode == LOOPBACK_NETWORK)
175 return 0; 174 return false;
175 else if (efx_phy_mode_disabled(efx->phy_mode))
176 return false;
176 else if (efx->loopback_mode == LOOPBACK_PHYXS) 177 else if (efx->loopback_mode == LOOPBACK_PHYXS)
177 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | 178 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
178 MDIO_MMDREG_DEVS0_PCS | 179 MDIO_MMDREG_DEVS0_PCS |
@@ -192,8 +193,7 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
192 status = mdio_clause45_read(efx, phy_id, 193 status = mdio_clause45_read(efx, phy_id,
193 mmd, MDIO_MMDREG_STAT1); 194 mmd, MDIO_MMDREG_STAT1);
194 195
195 good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN); 196 ok = ok && (status & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
196 ok = ok && good;
197 } 197 }
198 mmd_mask = (mmd_mask >> 1); 198 mmd_mask = (mmd_mask >> 1);
199 mmd++; 199 mmd++;
@@ -208,7 +208,7 @@ void mdio_clause45_transmit_disable(struct efx_nic *efx)
208 208
209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
210 MDIO_MMDREG_TXDIS); 210 MDIO_MMDREG_TXDIS);
211 if (efx->tx_disabled) 211 if (efx->phy_mode & PHY_MODE_TX_DISABLED)
212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
213 else 213 else
214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index cb99f3f4491c..19c42eaf7fb4 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -199,18 +199,19 @@ static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
199 return (id_hi << 16) | (id_low); 199 return (id_hi << 16) | (id_low);
200} 200}
201 201
202static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx) 202static inline bool mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
203{ 203{
204 int i, sync, lane_status; 204 int i, lane_status;
205 bool sync;
205 206
206 for (i = 0; i < 2; ++i) 207 for (i = 0; i < 2; ++i)
207 lane_status = mdio_clause45_read(efx, efx->mii.phy_id, 208 lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
208 MDIO_MMD_PHYXS, 209 MDIO_MMD_PHYXS,
209 MDIO_PHYXS_LANE_STATE); 210 MDIO_PHYXS_LANE_STATE);
210 211
211 sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0; 212 sync = !!(lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN));
212 if (!sync) 213 if (!sync)
213 EFX_INFO(efx, "XGXS lane status: %x\n", lane_status); 214 EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
214 return sync; 215 return sync;
215} 216}
216 217
@@ -230,8 +231,8 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
230 unsigned int mmd_mask, unsigned int fatal_mask); 231 unsigned int mmd_mask, unsigned int fatal_mask);
231 232
232/* Check the link status of specified mmds in bit mask */ 233/* Check the link status of specified mmds in bit mask */
233extern int mdio_clause45_links_ok(struct efx_nic *efx, 234extern bool mdio_clause45_links_ok(struct efx_nic *efx,
234 unsigned int mmd_mask); 235 unsigned int mmd_mask);
235 236
236/* Generic transmit disable support though PMAPMD */ 237/* Generic transmit disable support though PMAPMD */
237extern void mdio_clause45_transmit_disable(struct efx_nic *efx); 238extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 219c74a772c3..cdb11fad6050 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -88,9 +88,12 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
88 **************************************************************************/ 88 **************************************************************************/
89 89
90#define EFX_MAX_CHANNELS 32 90#define EFX_MAX_CHANNELS 32
91#define EFX_MAX_TX_QUEUES 1
92#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 91#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
93 92
93#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
94#define EFX_TX_QUEUE_NO_CSUM 1
95#define EFX_TX_QUEUE_COUNT 2
96
94/** 97/**
95 * struct efx_special_buffer - An Efx special buffer 98 * struct efx_special_buffer - An Efx special buffer
96 * @addr: CPU base address of the buffer 99 * @addr: CPU base address of the buffer
@@ -127,7 +130,6 @@ struct efx_special_buffer {
127 * This field is zero when the queue slot is empty. 130 * This field is zero when the queue slot is empty.
128 * @continuation: True if this fragment is not the end of a packet. 131 * @continuation: True if this fragment is not the end of a packet.
129 * @unmap_single: True if pci_unmap_single should be used. 132 * @unmap_single: True if pci_unmap_single should be used.
130 * @unmap_addr: DMA address to unmap
131 * @unmap_len: Length of this fragment to unmap 133 * @unmap_len: Length of this fragment to unmap
132 */ 134 */
133struct efx_tx_buffer { 135struct efx_tx_buffer {
@@ -135,9 +137,8 @@ struct efx_tx_buffer {
135 struct efx_tso_header *tsoh; 137 struct efx_tso_header *tsoh;
136 dma_addr_t dma_addr; 138 dma_addr_t dma_addr;
137 unsigned short len; 139 unsigned short len;
138 unsigned char continuation; 140 bool continuation;
139 unsigned char unmap_single; 141 bool unmap_single;
140 dma_addr_t unmap_addr;
141 unsigned short unmap_len; 142 unsigned short unmap_len;
142}; 143};
143 144
@@ -156,13 +157,13 @@ struct efx_tx_buffer {
156 * 157 *
157 * @efx: The associated Efx NIC 158 * @efx: The associated Efx NIC
158 * @queue: DMA queue number 159 * @queue: DMA queue number
159 * @used: Queue is used by net driver
160 * @channel: The associated channel 160 * @channel: The associated channel
161 * @buffer: The software buffer ring 161 * @buffer: The software buffer ring
162 * @txd: The hardware descriptor ring 162 * @txd: The hardware descriptor ring
163 * @flushed: Used when handling queue flushing
163 * @read_count: Current read pointer. 164 * @read_count: Current read pointer.
164 * This is the number of buffers that have been removed from both rings. 165 * This is the number of buffers that have been removed from both rings.
165 * @stopped: Stopped flag. 166 * @stopped: Stopped count.
166 * Set if this TX queue is currently stopping its port. 167 * Set if this TX queue is currently stopping its port.
167 * @insert_count: Current insert pointer 168 * @insert_count: Current insert pointer
168 * This is the number of buffers that have been added to the 169 * This is the number of buffers that have been added to the
@@ -188,11 +189,11 @@ struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 189 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 190 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 191 int queue;
191 int used;
192 struct efx_channel *channel; 192 struct efx_channel *channel;
193 struct efx_nic *nic; 193 struct efx_nic *nic;
194 struct efx_tx_buffer *buffer; 194 struct efx_tx_buffer *buffer;
195 struct efx_special_buffer txd; 195 struct efx_special_buffer txd;
196 bool flushed;
196 197
197 /* Members used mainly on the completion path */ 198 /* Members used mainly on the completion path */
198 unsigned int read_count ____cacheline_aligned_in_smp; 199 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -232,7 +233,6 @@ struct efx_rx_buffer {
232 * struct efx_rx_queue - An Efx RX queue 233 * struct efx_rx_queue - An Efx RX queue
233 * @efx: The associated Efx NIC 234 * @efx: The associated Efx NIC
234 * @queue: DMA queue number 235 * @queue: DMA queue number
235 * @used: Queue is used by net driver
236 * @channel: The associated channel 236 * @channel: The associated channel
237 * @buffer: The software buffer ring 237 * @buffer: The software buffer ring
238 * @rxd: The hardware descriptor ring 238 * @rxd: The hardware descriptor ring
@@ -262,11 +262,11 @@ struct efx_rx_buffer {
262 * the remaining space in the allocation. 262 * the remaining space in the allocation.
263 * @buf_dma_addr: Page's DMA address. 263 * @buf_dma_addr: Page's DMA address.
264 * @buf_data: Page's host address. 264 * @buf_data: Page's host address.
265 * @flushed: Use when handling queue flushing
265 */ 266 */
266struct efx_rx_queue { 267struct efx_rx_queue {
267 struct efx_nic *efx; 268 struct efx_nic *efx;
268 int queue; 269 int queue;
269 int used;
270 struct efx_channel *channel; 270 struct efx_channel *channel;
271 struct efx_rx_buffer *buffer; 271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd; 272 struct efx_special_buffer rxd;
@@ -288,6 +288,7 @@ struct efx_rx_queue {
288 struct page *buf_page; 288 struct page *buf_page;
289 dma_addr_t buf_dma_addr; 289 dma_addr_t buf_dma_addr;
290 char *buf_data; 290 char *buf_data;
291 bool flushed;
291}; 292};
292 293
293/** 294/**
@@ -325,12 +326,10 @@ enum efx_rx_alloc_method {
325 * queue. 326 * queue.
326 * 327 *
327 * @efx: Associated Efx NIC 328 * @efx: Associated Efx NIC
328 * @evqnum: Event queue number
329 * @channel: Channel instance number 329 * @channel: Channel instance number
330 * @used_flags: Channel is used by net driver 330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 331 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 332 * @irq: IRQ number (MSI and MSI-X only)
333 * @has_interrupt: Channel has an interrupt
334 * @irq_moderation: IRQ moderation value (in us) 333 * @irq_moderation: IRQ moderation value (in us)
335 * @napi_dev: Net device used with NAPI 334 * @napi_dev: Net device used with NAPI
336 * @napi_str: NAPI control structure 335 * @napi_str: NAPI control structure
@@ -357,17 +356,14 @@ enum efx_rx_alloc_method {
357 */ 356 */
358struct efx_channel { 357struct efx_channel {
359 struct efx_nic *efx; 358 struct efx_nic *efx;
360 int evqnum;
361 int channel; 359 int channel;
362 int used_flags; 360 int used_flags;
363 int enabled; 361 bool enabled;
364 int irq; 362 int irq;
365 unsigned int has_interrupt;
366 unsigned int irq_moderation; 363 unsigned int irq_moderation;
367 struct net_device *napi_dev; 364 struct net_device *napi_dev;
368 struct napi_struct napi_str; 365 struct napi_struct napi_str;
369 struct work_struct reset_work; 366 bool work_pending;
370 int work_pending;
371 struct efx_special_buffer eventq; 367 struct efx_special_buffer eventq;
372 unsigned int eventq_read_ptr; 368 unsigned int eventq_read_ptr;
373 unsigned int last_eventq_read_ptr; 369 unsigned int last_eventq_read_ptr;
@@ -390,7 +386,7 @@ struct efx_channel {
390 * access with prefetches. 386 * access with prefetches.
391 */ 387 */
392 struct efx_rx_buffer *rx_pkt; 388 struct efx_rx_buffer *rx_pkt;
393 int rx_pkt_csummed; 389 bool rx_pkt_csummed;
394 390
395}; 391};
396 392
@@ -403,8 +399,8 @@ struct efx_channel {
403 */ 399 */
404struct efx_blinker { 400struct efx_blinker {
405 int led_num; 401 int led_num;
406 int state; 402 bool state;
407 int resubmit; 403 bool resubmit;
408 struct timer_list timer; 404 struct timer_list timer;
409}; 405};
410 406
@@ -432,8 +428,8 @@ struct efx_board {
432 * have a separate init callback that happens later than 428 * have a separate init callback that happens later than
433 * board init. */ 429 * board init. */
434 int (*init_leds)(struct efx_nic *efx); 430 int (*init_leds)(struct efx_nic *efx);
435 void (*set_fault_led) (struct efx_nic *efx, int state); 431 void (*set_fault_led) (struct efx_nic *efx, bool state);
436 void (*blink) (struct efx_nic *efx, int start); 432 void (*blink) (struct efx_nic *efx, bool start);
437 void (*fini) (struct efx_nic *nic); 433 void (*fini) (struct efx_nic *nic);
438 struct efx_blinker blinker; 434 struct efx_blinker blinker;
439 struct i2c_client *hwmon_client, *ioexp_client; 435 struct i2c_client *hwmon_client, *ioexp_client;
@@ -467,8 +463,7 @@ enum nic_state {
467 STATE_INIT = 0, 463 STATE_INIT = 0,
468 STATE_RUNNING = 1, 464 STATE_RUNNING = 1,
469 STATE_FINI = 2, 465 STATE_FINI = 2,
470 STATE_RESETTING = 3, /* rtnl_lock always held */ 466 STATE_DISABLED = 3,
471 STATE_DISABLED = 4,
472 STATE_MAX, 467 STATE_MAX,
473}; 468};
474 469
@@ -479,7 +474,7 @@ enum nic_state {
479 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 474 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
480 * of the skb->head for hardware DMA]. 475 * of the skb->head for hardware DMA].
481 */ 476 */
482#if defined(__i386__) || defined(__x86_64__) 477#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
483#define EFX_PAGE_IP_ALIGN 0 478#define EFX_PAGE_IP_ALIGN 0
484#else 479#else
485#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN 480#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
@@ -512,7 +507,6 @@ enum efx_fc_type {
512 * @clear_interrupt: Clear down interrupt 507 * @clear_interrupt: Clear down interrupt
513 * @blink: Blink LEDs 508 * @blink: Blink LEDs
514 * @check_hw: Check hardware 509 * @check_hw: Check hardware
515 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
516 * @mmds: MMD presence mask 510 * @mmds: MMD presence mask
517 * @loopbacks: Supported loopback modes mask 511 * @loopbacks: Supported loopback modes mask
518 */ 512 */
@@ -522,11 +516,28 @@ struct efx_phy_operations {
522 void (*reconfigure) (struct efx_nic *efx); 516 void (*reconfigure) (struct efx_nic *efx);
523 void (*clear_interrupt) (struct efx_nic *efx); 517 void (*clear_interrupt) (struct efx_nic *efx);
524 int (*check_hw) (struct efx_nic *efx); 518 int (*check_hw) (struct efx_nic *efx);
525 void (*reset_xaui) (struct efx_nic *efx); 519 int (*test) (struct efx_nic *efx);
526 int mmds; 520 int mmds;
527 unsigned loopbacks; 521 unsigned loopbacks;
528}; 522};
529 523
524/**
525 * @enum efx_phy_mode - PHY operating mode flags
526 * @PHY_MODE_NORMAL: on and should pass traffic
527 * @PHY_MODE_TX_DISABLED: on with TX disabled
528 * @PHY_MODE_SPECIAL: on but will not pass traffic
529 */
530enum efx_phy_mode {
531 PHY_MODE_NORMAL = 0,
532 PHY_MODE_TX_DISABLED = 1,
533 PHY_MODE_SPECIAL = 8,
534};
535
536static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
537{
538 return !!(mode & ~PHY_MODE_TX_DISABLED);
539}
540
530/* 541/*
531 * Efx extended statistics 542 * Efx extended statistics
532 * 543 *
@@ -632,7 +643,7 @@ union efx_multicast_hash {
632 * @tx_queue: TX DMA queues 643 * @tx_queue: TX DMA queues
633 * @rx_queue: RX DMA queues 644 * @rx_queue: RX DMA queues
634 * @channel: Channels 645 * @channel: Channels
635 * @rss_queues: Number of RSS queues 646 * @n_rx_queues: Number of RX queues
636 * @rx_buffer_len: RX buffer length 647 * @rx_buffer_len: RX buffer length
637 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 648 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
638 * @irq_status: Interrupt status buffer 649 * @irq_status: Interrupt status buffer
@@ -640,15 +651,20 @@ union efx_multicast_hash {
640 * This register is written with the SMP processor ID whenever an 651 * This register is written with the SMP processor ID whenever an
641 * interrupt is handled. It is used by falcon_test_interrupt() 652 * interrupt is handled. It is used by falcon_test_interrupt()
642 * to verify that an interrupt has occurred. 653 * to verify that an interrupt has occurred.
654 * @spi_flash: SPI flash device
655 * This field will be %NULL if no flash device is present.
656 * @spi_eeprom: SPI EEPROM device
657 * This field will be %NULL if no EEPROM device is present.
643 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 658 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
644 * @nic_data: Hardware dependant state 659 * @nic_data: Hardware dependant state
645 * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and 660 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
646 * efx_reconfigure_port() 661 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
647 * @port_enabled: Port enabled indicator. 662 * @port_enabled: Port enabled indicator.
648 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and 663 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
649 * efx_reconfigure_work with kernel interfaces. Safe to read under any 664 * efx_reconfigure_work with kernel interfaces. Safe to read under any
650 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 665 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
651 * be held to modify it. 666 * be held to modify it.
667 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
652 * @port_initialized: Port initialized? 668 * @port_initialized: Port initialized?
653 * @net_dev: Operating system network device. Consider holding the rtnl lock 669 * @net_dev: Operating system network device. Consider holding the rtnl lock
654 * @rx_checksum_enabled: RX checksumming enabled 670 * @rx_checksum_enabled: RX checksumming enabled
@@ -658,14 +674,16 @@ union efx_multicast_hash {
658 * can provide. Generic code converts these into a standard 674 * can provide. Generic code converts these into a standard
659 * &struct net_device_stats. 675 * &struct net_device_stats.
660 * @stats_buffer: DMA buffer for statistics 676 * @stats_buffer: DMA buffer for statistics
661 * @stats_lock: Statistics update lock 677 * @stats_lock: Statistics update lock. Serialises statistics fetches
678 * @stats_enabled: Temporarily disable statistics fetches.
679 * Serialised by @stats_lock
662 * @mac_address: Permanent MAC address 680 * @mac_address: Permanent MAC address
663 * @phy_type: PHY type 681 * @phy_type: PHY type
664 * @phy_lock: PHY access lock 682 * @phy_lock: PHY access lock
665 * @phy_op: PHY interface 683 * @phy_op: PHY interface
666 * @phy_data: PHY private data (including PHY-specific stats) 684 * @phy_data: PHY private data (including PHY-specific stats)
667 * @mii: PHY interface 685 * @mii: PHY interface
668 * @tx_disabled: PHY transmitter turned off 686 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
669 * @link_up: Link status 687 * @link_up: Link status
670 * @link_options: Link options (MII/GMII format) 688 * @link_options: Link options (MII/GMII format)
671 * @n_link_state_changes: Number of times the link has changed state 689 * @n_link_state_changes: Number of times the link has changed state
@@ -700,27 +718,31 @@ struct efx_nic {
700 enum nic_state state; 718 enum nic_state state;
701 enum reset_type reset_pending; 719 enum reset_type reset_pending;
702 720
703 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 721 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
704 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 722 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
705 struct efx_channel channel[EFX_MAX_CHANNELS]; 723 struct efx_channel channel[EFX_MAX_CHANNELS];
706 724
707 int rss_queues; 725 int n_rx_queues;
708 unsigned int rx_buffer_len; 726 unsigned int rx_buffer_len;
709 unsigned int rx_buffer_order; 727 unsigned int rx_buffer_order;
710 728
711 struct efx_buffer irq_status; 729 struct efx_buffer irq_status;
712 volatile signed int last_irq_cpu; 730 volatile signed int last_irq_cpu;
713 731
732 struct efx_spi_device *spi_flash;
733 struct efx_spi_device *spi_eeprom;
734
714 unsigned n_rx_nodesc_drop_cnt; 735 unsigned n_rx_nodesc_drop_cnt;
715 736
716 struct falcon_nic_data *nic_data; 737 struct falcon_nic_data *nic_data;
717 738
718 struct mutex mac_lock; 739 struct mutex mac_lock;
719 int port_enabled; 740 bool port_enabled;
741 bool port_inhibited;
720 742
721 int port_initialized; 743 bool port_initialized;
722 struct net_device *net_dev; 744 struct net_device *net_dev;
723 int rx_checksum_enabled; 745 bool rx_checksum_enabled;
724 746
725 atomic_t netif_stop_count; 747 atomic_t netif_stop_count;
726 spinlock_t netif_stop_lock; 748 spinlock_t netif_stop_lock;
@@ -728,6 +750,7 @@ struct efx_nic {
728 struct efx_mac_stats mac_stats; 750 struct efx_mac_stats mac_stats;
729 struct efx_buffer stats_buffer; 751 struct efx_buffer stats_buffer;
730 spinlock_t stats_lock; 752 spinlock_t stats_lock;
753 bool stats_enabled;
731 754
732 unsigned char mac_address[ETH_ALEN]; 755 unsigned char mac_address[ETH_ALEN];
733 756
@@ -736,13 +759,13 @@ struct efx_nic {
736 struct efx_phy_operations *phy_op; 759 struct efx_phy_operations *phy_op;
737 void *phy_data; 760 void *phy_data;
738 struct mii_if_info mii; 761 struct mii_if_info mii;
739 unsigned tx_disabled; 762 enum efx_phy_mode phy_mode;
740 763
741 int link_up; 764 bool link_up;
742 unsigned int link_options; 765 unsigned int link_options;
743 unsigned int n_link_state_changes; 766 unsigned int n_link_state_changes;
744 767
745 int promiscuous; 768 bool promiscuous;
746 union efx_multicast_hash multicast_hash; 769 union efx_multicast_hash multicast_hash;
747 enum efx_fc_type flow_control; 770 enum efx_fc_type flow_control;
748 struct work_struct reconfigure_work; 771 struct work_struct reconfigure_work;
@@ -829,50 +852,33 @@ struct efx_nic_type {
829 continue; \ 852 continue; \
830 else 853 else
831 854
832/* Iterate over all used channels with interrupts */
833#define efx_for_each_channel_with_interrupt(_channel, _efx) \
834 for (_channel = &_efx->channel[0]; \
835 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
836 _channel++) \
837 if (!(_channel->used_flags && _channel->has_interrupt)) \
838 continue; \
839 else
840
841/* Iterate over all used TX queues */ 855/* Iterate over all used TX queues */
842#define efx_for_each_tx_queue(_tx_queue, _efx) \ 856#define efx_for_each_tx_queue(_tx_queue, _efx) \
843 for (_tx_queue = &_efx->tx_queue[0]; \ 857 for (_tx_queue = &_efx->tx_queue[0]; \
844 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 858 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
845 _tx_queue++) \ 859 _tx_queue++)
846 if (!_tx_queue->used) \
847 continue; \
848 else
849 860
850/* Iterate over all TX queues belonging to a channel */ 861/* Iterate over all TX queues belonging to a channel */
851#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 862#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
852 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 863 for (_tx_queue = &_channel->efx->tx_queue[0]; \
853 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 864 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
854 _tx_queue++) \ 865 _tx_queue++) \
855 if ((!_tx_queue->used) || \ 866 if (_tx_queue->channel != _channel) \
856 (_tx_queue->channel != _channel)) \
857 continue; \ 867 continue; \
858 else 868 else
859 869
860/* Iterate over all used RX queues */ 870/* Iterate over all used RX queues */
861#define efx_for_each_rx_queue(_rx_queue, _efx) \ 871#define efx_for_each_rx_queue(_rx_queue, _efx) \
862 for (_rx_queue = &_efx->rx_queue[0]; \ 872 for (_rx_queue = &_efx->rx_queue[0]; \
863 _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \ 873 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
864 _rx_queue++) \ 874 _rx_queue++)
865 if (!_rx_queue->used) \
866 continue; \
867 else
868 875
869/* Iterate over all RX queues belonging to a channel */ 876/* Iterate over all RX queues belonging to a channel */
870#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 877#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
871 for (_rx_queue = &_channel->efx->rx_queue[0]; \ 878 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
872 _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \ 879 _rx_queue; \
873 _rx_queue++) \ 880 _rx_queue = NULL) \
874 if ((!_rx_queue->used) || \ 881 if (_rx_queue->channel != _channel) \
875 (_rx_queue->channel != _channel)) \
876 continue; \ 882 continue; \
877 else 883 else
878 884
@@ -886,13 +892,13 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
886} 892}
887 893
888/* Set bit in a little-endian bitfield */ 894/* Set bit in a little-endian bitfield */
889static inline void set_bit_le(int nr, unsigned char *addr) 895static inline void set_bit_le(unsigned nr, unsigned char *addr)
890{ 896{
891 addr[nr / 8] |= (1 << (nr % 8)); 897 addr[nr / 8] |= (1 << (nr % 8));
892} 898}
893 899
894/* Clear bit in a little-endian bitfield */ 900/* Clear bit in a little-endian bitfield */
895static inline void clear_bit_le(int nr, unsigned char *addr) 901static inline void clear_bit_le(unsigned nr, unsigned char *addr)
896{ 902{
897 addr[nr / 8] &= ~(1 << (nr % 8)); 903 addr[nr / 8] &= ~(1 << (nr % 8));
898} 904}
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 9d02c84e6b2d..f746536f4ffa 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -15,15 +15,7 @@
15 */ 15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops; 16extern struct efx_phy_operations falcon_tenxpress_phy_ops;
17 17
18enum tenxpress_state { 18extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
19 TENXPRESS_STATUS_OFF = 0,
20 TENXPRESS_STATUS_OTEMP = 1,
21 TENXPRESS_STATUS_NORMAL = 2,
22};
23
24extern void tenxpress_set_state(struct efx_nic *efx,
25 enum tenxpress_state state);
26extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
27extern void tenxpress_crc_err(struct efx_nic *efx); 19extern void tenxpress_crc_err(struct efx_nic *efx);
28 20
29/**************************************************************************** 21/****************************************************************************
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 0d27dd39bc09..0f805da4ce55 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -212,8 +212,8 @@ void efx_lro_fini(struct net_lro_mgr *lro_mgr)
212 * and populates a struct efx_rx_buffer with the relevant 212 * and populates a struct efx_rx_buffer with the relevant
213 * information. Return a negative error code or 0 on success. 213 * information. Return a negative error code or 0 on success.
214 */ 214 */
215static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, 215static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
216 struct efx_rx_buffer *rx_buf) 216 struct efx_rx_buffer *rx_buf)
217{ 217{
218 struct efx_nic *efx = rx_queue->efx; 218 struct efx_nic *efx = rx_queue->efx;
219 struct net_device *net_dev = efx->net_dev; 219 struct net_device *net_dev = efx->net_dev;
@@ -252,8 +252,8 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
252 * and populates a struct efx_rx_buffer with the relevant 252 * and populates a struct efx_rx_buffer with the relevant
253 * information. Return a negative error code or 0 on success. 253 * information. Return a negative error code or 0 on success.
254 */ 254 */
255static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 255static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
256 struct efx_rx_buffer *rx_buf) 256 struct efx_rx_buffer *rx_buf)
257{ 257{
258 struct efx_nic *efx = rx_queue->efx; 258 struct efx_nic *efx = rx_queue->efx;
259 int bytes, space, offset; 259 int bytes, space, offset;
@@ -319,8 +319,8 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
319 * and populates a struct efx_rx_buffer with the relevant 319 * and populates a struct efx_rx_buffer with the relevant
320 * information. 320 * information.
321 */ 321 */
322static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, 322static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
323 struct efx_rx_buffer *new_rx_buf) 323 struct efx_rx_buffer *new_rx_buf)
324{ 324{
325 int rc = 0; 325 int rc = 0;
326 326
@@ -340,8 +340,8 @@ static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
340 return rc; 340 return rc;
341} 341}
342 342
343static inline void efx_unmap_rx_buffer(struct efx_nic *efx, 343static void efx_unmap_rx_buffer(struct efx_nic *efx,
344 struct efx_rx_buffer *rx_buf) 344 struct efx_rx_buffer *rx_buf)
345{ 345{
346 if (rx_buf->page) { 346 if (rx_buf->page) {
347 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -357,8 +357,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
357 } 357 }
358} 358}
359 359
360static inline void efx_free_rx_buffer(struct efx_nic *efx, 360static void efx_free_rx_buffer(struct efx_nic *efx,
361 struct efx_rx_buffer *rx_buf) 361 struct efx_rx_buffer *rx_buf)
362{ 362{
363 if (rx_buf->page) { 363 if (rx_buf->page) {
364 __free_pages(rx_buf->page, efx->rx_buffer_order); 364 __free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -369,8 +369,8 @@ static inline void efx_free_rx_buffer(struct efx_nic *efx,
369 } 369 }
370} 370}
371 371
372static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 372static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
373 struct efx_rx_buffer *rx_buf) 373 struct efx_rx_buffer *rx_buf)
374{ 374{
375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
376 efx_free_rx_buffer(rx_queue->efx, rx_buf); 376 efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -506,10 +506,10 @@ void efx_rx_work(struct work_struct *data)
506 efx_schedule_slow_fill(rx_queue, 1); 506 efx_schedule_slow_fill(rx_queue, 1);
507} 507}
508 508
509static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 509static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
510 struct efx_rx_buffer *rx_buf, 510 struct efx_rx_buffer *rx_buf,
511 int len, int *discard, 511 int len, bool *discard,
512 int *leak_packet) 512 bool *leak_packet)
513{ 513{
514 struct efx_nic *efx = rx_queue->efx; 514 struct efx_nic *efx = rx_queue->efx;
515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -520,7 +520,7 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
520 /* The packet must be discarded, but this is only a fatal error 520 /* The packet must be discarded, but this is only a fatal error
521 * if the caller indicated it was 521 * if the caller indicated it was
522 */ 522 */
523 *discard = 1; 523 *discard = true;
524 524
525 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 525 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
526 EFX_ERR_RL(efx, " RX queue %d seriously overlength " 526 EFX_ERR_RL(efx, " RX queue %d seriously overlength "
@@ -546,8 +546,8 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
546 * Handles driverlink veto, and passes the fragment up via 546 * Handles driverlink veto, and passes the fragment up via
547 * the appropriate LRO method 547 * the appropriate LRO method
548 */ 548 */
549static inline void efx_rx_packet_lro(struct efx_channel *channel, 549static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 550 struct efx_rx_buffer *rx_buf)
551{ 551{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
553 void *priv = channel; 553 void *priv = channel;
@@ -574,9 +574,9 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
574} 574}
575 575
576/* Allocate and construct an SKB around a struct page.*/ 576/* Allocate and construct an SKB around a struct page.*/
577static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, 577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx, 578 struct efx_nic *efx,
579 int hdr_len) 579 int hdr_len)
580{ 580{
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
@@ -621,11 +621,11 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
621} 621}
622 622
623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
624 unsigned int len, int checksummed, int discard) 624 unsigned int len, bool checksummed, bool discard)
625{ 625{
626 struct efx_nic *efx = rx_queue->efx; 626 struct efx_nic *efx = rx_queue->efx;
627 struct efx_rx_buffer *rx_buf; 627 struct efx_rx_buffer *rx_buf;
628 int leak_packet = 0; 628 bool leak_packet = false;
629 629
630 rx_buf = efx_rx_buffer(rx_queue, index); 630 rx_buf = efx_rx_buffer(rx_queue, index);
631 EFX_BUG_ON_PARANOID(!rx_buf->data); 631 EFX_BUG_ON_PARANOID(!rx_buf->data);
@@ -683,11 +683,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
683 683
684/* Handle a received packet. Second half: Touches packet payload. */ 684/* Handle a received packet. Second half: Touches packet payload. */
685void __efx_rx_packet(struct efx_channel *channel, 685void __efx_rx_packet(struct efx_channel *channel,
686 struct efx_rx_buffer *rx_buf, int checksummed) 686 struct efx_rx_buffer *rx_buf, bool checksummed)
687{ 687{
688 struct efx_nic *efx = channel->efx; 688 struct efx_nic *efx = channel->efx;
689 struct sk_buff *skb; 689 struct sk_buff *skb;
690 int lro = efx->net_dev->features & NETIF_F_LRO; 690 bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
691 691
692 /* If we're in loopback test, then pass the packet directly to the 692 /* If we're in loopback test, then pass the packet directly to the
693 * loopback layer, and free the rx_buf here 693 * loopback layer, and free the rx_buf here
@@ -789,27 +789,18 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
789 /* Allocate RX buffers */ 789 /* Allocate RX buffers */
790 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 790 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
791 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 791 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
792 if (!rx_queue->buffer) { 792 if (!rx_queue->buffer)
793 rc = -ENOMEM; 793 return -ENOMEM;
794 goto fail1;
795 }
796 794
797 rc = falcon_probe_rx(rx_queue); 795 rc = falcon_probe_rx(rx_queue);
798 if (rc) 796 if (rc) {
799 goto fail2; 797 kfree(rx_queue->buffer);
800 798 rx_queue->buffer = NULL;
801 return 0; 799 }
802
803 fail2:
804 kfree(rx_queue->buffer);
805 rx_queue->buffer = NULL;
806 fail1:
807 rx_queue->used = 0;
808
809 return rc; 800 return rc;
810} 801}
811 802
812int efx_init_rx_queue(struct efx_rx_queue *rx_queue) 803void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
813{ 804{
814 struct efx_nic *efx = rx_queue->efx; 805 struct efx_nic *efx = rx_queue->efx;
815 unsigned int max_fill, trigger, limit; 806 unsigned int max_fill, trigger, limit;
@@ -833,7 +824,7 @@ int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
833 rx_queue->fast_fill_limit = limit; 824 rx_queue->fast_fill_limit = limit;
834 825
835 /* Set up RX descriptor ring */ 826 /* Set up RX descriptor ring */
836 return falcon_init_rx(rx_queue); 827 falcon_init_rx(rx_queue);
837} 828}
838 829
839void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 830void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -872,7 +863,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
872 863
873 kfree(rx_queue->buffer); 864 kfree(rx_queue->buffer);
874 rx_queue->buffer = NULL; 865 rx_queue->buffer = NULL;
875 rx_queue->used = 0;
876} 866}
877 867
878void efx_flush_lro(struct efx_channel *channel) 868void efx_flush_lro(struct efx_channel *channel)
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index f35e377bfc5f..0e88a9ddc1c6 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -14,7 +14,7 @@
14 14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17int efx_init_rx_queue(struct efx_rx_queue *rx_queue); 17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19 19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx); 20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
@@ -24,6 +24,6 @@ void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data); 25void efx_rx_work(struct work_struct *data);
26void __efx_rx_packet(struct efx_channel *channel, 26void __efx_rx_packet(struct efx_channel *channel,
27 struct efx_rx_buffer *rx_buf, int checksummed); 27 struct efx_rx_buffer *rx_buf, bool checksummed);
28 28
29#endif /* EFX_RX_H */ 29#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3b2de9fe7f27..362956e3fe17 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -27,6 +27,9 @@
27#include "boards.h" 27#include "boards.h"
28#include "workarounds.h" 28#include "workarounds.h"
29#include "mac.h" 29#include "mac.h"
30#include "spi.h"
31#include "falcon_io.h"
32#include "mdio_10g.h"
30 33
31/* 34/*
32 * Loopback test packet structure 35 * Loopback test packet structure
@@ -51,7 +54,7 @@ static const char *payload_msg =
51 "Hello world! This is an Efx loopback test in progress!"; 54 "Hello world! This is an Efx loopback test in progress!";
52 55
53/** 56/**
54 * efx_selftest_state - persistent state during a selftest 57 * efx_loopback_state - persistent state during a loopback selftest
55 * @flush: Drop all packets in efx_loopback_rx_packet 58 * @flush: Drop all packets in efx_loopback_rx_packet
56 * @packet_count: Number of packets being used in this test 59 * @packet_count: Number of packets being used in this test
57 * @skbs: An array of skbs transmitted 60 * @skbs: An array of skbs transmitted
@@ -59,10 +62,14 @@ static const char *payload_msg =
59 * @rx_bad: RX bad packet count 62 * @rx_bad: RX bad packet count
60 * @payload: Payload used in tests 63 * @payload: Payload used in tests
61 */ 64 */
62struct efx_selftest_state { 65struct efx_loopback_state {
63 int flush; 66 bool flush;
64 int packet_count; 67 int packet_count;
65 struct sk_buff **skbs; 68 struct sk_buff **skbs;
69
70 /* Checksums are being offloaded */
71 bool offload_csum;
72
66 atomic_t rx_good; 73 atomic_t rx_good;
67 atomic_t rx_bad; 74 atomic_t rx_bad;
68 struct efx_loopback_payload payload; 75 struct efx_loopback_payload payload;
@@ -70,21 +77,65 @@ struct efx_selftest_state {
70 77
71/************************************************************************** 78/**************************************************************************
72 * 79 *
73 * Configurable values 80 * MII, NVRAM and register tests
74 * 81 *
75 **************************************************************************/ 82 **************************************************************************/
76 83
77/* Level of loopback testing 84static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
78 * 85{
79 * The maximum packet burst length is 16**(n-1), i.e. 86 int rc = 0;
80 * 87 u16 physid1, physid2;
81 * - Level 0 : no packets 88 struct mii_if_info *mii = &efx->mii;
82 * - Level 1 : 1 packet 89 struct net_device *net_dev = efx->net_dev;
83 * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) 90
84 * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) 91 if (efx->phy_type == PHY_TYPE_NONE)
85 * 92 return 0;
86 */ 93
87static unsigned int loopback_test_level = 3; 94 mutex_lock(&efx->mac_lock);
95 tests->mii = -1;
96
97 physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
98 physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
99
100 if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
101 (physid2 == 0x0000) || (physid2 == 0xffff)) {
102 EFX_ERR(efx, "no MII PHY present with ID %d\n",
103 mii->phy_id);
104 rc = -EINVAL;
105 goto out;
106 }
107
108 rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
109 if (rc)
110 goto out;
111
112out:
113 mutex_unlock(&efx->mac_lock);
114 tests->mii = rc ? -1 : 1;
115 return rc;
116}
117
118static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
119{
120 int rc;
121
122 rc = falcon_read_nvram(efx, NULL);
123 tests->nvram = rc ? -1 : 1;
124 return rc;
125}
126
127static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
128{
129 int rc;
130
131 /* Not supported on A-series silicon */
132 if (falcon_rev(efx) < FALCON_REV_B0)
133 return 0;
134
135 rc = falcon_test_registers(efx);
136 tests->registers = rc ? -1 : 1;
137 return rc;
138}
88 139
89/************************************************************************** 140/**************************************************************************
90 * 141 *
@@ -107,7 +158,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
107 158
108 /* ACK each interrupting event queue. Receiving an interrupt due to 159 /* ACK each interrupting event queue. Receiving an interrupt due to
109 * traffic before a test event is raised is considered a pass */ 160 * traffic before a test event is raised is considered a pass */
110 efx_for_each_channel_with_interrupt(channel, efx) { 161 efx_for_each_channel(channel, efx) {
111 if (channel->work_pending) 162 if (channel->work_pending)
112 efx_process_channel_now(channel); 163 efx_process_channel_now(channel);
113 if (efx->last_irq_cpu >= 0) 164 if (efx->last_irq_cpu >= 0)
@@ -132,41 +183,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
132 return 0; 183 return 0;
133} 184}
134 185
135/* Test generation and receipt of non-interrupting events */
136static int efx_test_eventq(struct efx_channel *channel,
137 struct efx_self_tests *tests)
138{
139 unsigned int magic;
140
141 /* Channel specific code, limited to 20 bits */
142 magic = (0x00010150 + channel->channel);
143 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
144 channel->channel, magic);
145
146 tests->eventq_dma[channel->channel] = -1;
147 tests->eventq_int[channel->channel] = 1; /* fake pass */
148 tests->eventq_poll[channel->channel] = 1; /* fake pass */
149
150 /* Reset flag and zero magic word */
151 channel->efx->last_irq_cpu = -1;
152 channel->eventq_magic = 0;
153 smp_wmb();
154
155 falcon_generate_test_event(channel, magic);
156 udelay(1);
157
158 efx_process_channel_now(channel);
159 if (channel->eventq_magic != magic) {
160 EFX_ERR(channel->efx, "channel %d failed to see test event\n",
161 channel->channel);
162 return -ETIMEDOUT;
163 } else {
164 tests->eventq_dma[channel->channel] = 1;
165 }
166
167 return 0;
168}
169
170/* Test generation and receipt of interrupting events */ 186/* Test generation and receipt of interrupting events */
171static int efx_test_eventq_irq(struct efx_channel *channel, 187static int efx_test_eventq_irq(struct efx_channel *channel,
172 struct efx_self_tests *tests) 188 struct efx_self_tests *tests)
@@ -230,39 +246,18 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
230 return 0; 246 return 0;
231} 247}
232 248
233/************************************************************************** 249static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests)
234 *
235 * PHY testing
236 *
237 **************************************************************************/
238
239/* Check PHY presence by reading the PHY ID registers */
240static int efx_test_phy(struct efx_nic *efx,
241 struct efx_self_tests *tests)
242{ 250{
243 u16 physid1, physid2; 251 int rc;
244 struct mii_if_info *mii = &efx->mii;
245 struct net_device *net_dev = efx->net_dev;
246 252
247 if (efx->phy_type == PHY_TYPE_NONE) 253 if (!efx->phy_op->test)
248 return 0; 254 return 0;
249 255
250 EFX_LOG(efx, "testing PHY presence\n"); 256 mutex_lock(&efx->mac_lock);
251 tests->phy_ok = -1; 257 rc = efx->phy_op->test(efx);
252 258 mutex_unlock(&efx->mac_lock);
253 physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); 259 tests->phy = rc ? -1 : 1;
254 physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); 260 return rc;
255
256 if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
257 (physid2 != 0x0000) && (physid2 != 0xffff)) {
258 EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
259 mii->phy_id, physid1, physid2);
260 tests->phy_ok = 1;
261 return 0;
262 }
263
264 EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
265 return -ENODEV;
266} 261}
267 262
268/************************************************************************** 263/**************************************************************************
@@ -278,7 +273,7 @@ static int efx_test_phy(struct efx_nic *efx,
278void efx_loopback_rx_packet(struct efx_nic *efx, 273void efx_loopback_rx_packet(struct efx_nic *efx,
279 const char *buf_ptr, int pkt_len) 274 const char *buf_ptr, int pkt_len)
280{ 275{
281 struct efx_selftest_state *state = efx->loopback_selftest; 276 struct efx_loopback_state *state = efx->loopback_selftest;
282 struct efx_loopback_payload *received; 277 struct efx_loopback_payload *received;
283 struct efx_loopback_payload *payload; 278 struct efx_loopback_payload *payload;
284 279
@@ -289,11 +284,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
289 return; 284 return;
290 285
291 payload = &state->payload; 286 payload = &state->payload;
292 287
293 received = (struct efx_loopback_payload *) buf_ptr; 288 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 289 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 290 if (state->offload_csum)
296 291 received->ip.check = payload->ip.check;
292
297 /* Check that header exists */ 293 /* Check that header exists */
298 if (pkt_len < sizeof(received->header)) { 294 if (pkt_len < sizeof(received->header)) {
299 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 295 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
@@ -362,7 +358,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
362/* Initialise an efx_selftest_state for a new iteration */ 358/* Initialise an efx_selftest_state for a new iteration */
363static void efx_iterate_state(struct efx_nic *efx) 359static void efx_iterate_state(struct efx_nic *efx)
364{ 360{
365 struct efx_selftest_state *state = efx->loopback_selftest; 361 struct efx_loopback_state *state = efx->loopback_selftest;
366 struct net_device *net_dev = efx->net_dev; 362 struct net_device *net_dev = efx->net_dev;
367 struct efx_loopback_payload *payload = &state->payload; 363 struct efx_loopback_payload *payload = &state->payload;
368 364
@@ -395,17 +391,17 @@ static void efx_iterate_state(struct efx_nic *efx)
395 smp_wmb(); 391 smp_wmb();
396} 392}
397 393
398static int efx_tx_loopback(struct efx_tx_queue *tx_queue) 394static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
399{ 395{
400 struct efx_nic *efx = tx_queue->efx; 396 struct efx_nic *efx = tx_queue->efx;
401 struct efx_selftest_state *state = efx->loopback_selftest; 397 struct efx_loopback_state *state = efx->loopback_selftest;
402 struct efx_loopback_payload *payload; 398 struct efx_loopback_payload *payload;
403 struct sk_buff *skb; 399 struct sk_buff *skb;
404 int i, rc; 400 int i, rc;
405 401
406 /* Transmit N copies of buffer */ 402 /* Transmit N copies of buffer */
407 for (i = 0; i < state->packet_count; i++) { 403 for (i = 0; i < state->packet_count; i++) {
408 /* Allocate an skb, holding an extra reference for 404 /* Allocate an skb, holding an extra reference for
409 * transmit completion counting */ 405 * transmit completion counting */
410 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 406 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
411 if (!skb) 407 if (!skb)
@@ -444,11 +440,25 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
444 return 0; 440 return 0;
445} 441}
446 442
447static int efx_rx_loopback(struct efx_tx_queue *tx_queue, 443static int efx_poll_loopback(struct efx_nic *efx)
448 struct efx_loopback_self_tests *lb_tests) 444{
445 struct efx_loopback_state *state = efx->loopback_selftest;
446 struct efx_channel *channel;
447
448 /* NAPI polling is not enabled, so process channels
449 * synchronously */
450 efx_for_each_channel(channel, efx) {
451 if (channel->work_pending)
452 efx_process_channel_now(channel);
453 }
454 return atomic_read(&state->rx_good) == state->packet_count;
455}
456
457static int efx_end_loopback(struct efx_tx_queue *tx_queue,
458 struct efx_loopback_self_tests *lb_tests)
449{ 459{
450 struct efx_nic *efx = tx_queue->efx; 460 struct efx_nic *efx = tx_queue->efx;
451 struct efx_selftest_state *state = efx->loopback_selftest; 461 struct efx_loopback_state *state = efx->loopback_selftest;
452 struct sk_buff *skb; 462 struct sk_buff *skb;
453 int tx_done = 0, rx_good, rx_bad; 463 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0; 464 int i, rc = 0;
@@ -507,11 +517,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
507 struct efx_loopback_self_tests *lb_tests) 517 struct efx_loopback_self_tests *lb_tests)
508{ 518{
509 struct efx_nic *efx = tx_queue->efx; 519 struct efx_nic *efx = tx_queue->efx;
510 struct efx_selftest_state *state = efx->loopback_selftest; 520 struct efx_loopback_state *state = efx->loopback_selftest;
511 struct efx_channel *channel; 521 int i, begin_rc, end_rc;
512 int i, rc = 0;
513 522
514 for (i = 0; i < loopback_test_level; i++) { 523 for (i = 0; i < 3; i++) {
515 /* Determine how many packets to send */ 524 /* Determine how many packets to send */
516 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 525 state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
517 state->packet_count = min(1 << (i << 2), state->packet_count); 526 state->packet_count = min(1 << (i << 2), state->packet_count);
@@ -519,30 +528,31 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
519 state->packet_count, GFP_KERNEL); 528 state->packet_count, GFP_KERNEL);
520 if (!state->skbs) 529 if (!state->skbs)
521 return -ENOMEM; 530 return -ENOMEM;
522 state->flush = 0; 531 state->flush = false;
523 532
524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 533 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
525 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 534 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
526 state->packet_count); 535 state->packet_count);
527 536
528 efx_iterate_state(efx); 537 efx_iterate_state(efx);
529 rc = efx_tx_loopback(tx_queue); 538 begin_rc = efx_begin_loopback(tx_queue);
530 539
531 /* NAPI polling is not enabled, so process channels synchronously */ 540 /* This will normally complete very quickly, but be
532 schedule_timeout_uninterruptible(HZ / 50); 541 * prepared to wait up to 100 ms. */
533 efx_for_each_channel_with_interrupt(channel, efx) { 542 msleep(1);
534 if (channel->work_pending) 543 if (!efx_poll_loopback(efx)) {
535 efx_process_channel_now(channel); 544 msleep(100);
545 efx_poll_loopback(efx);
536 } 546 }
537 547
538 rc |= efx_rx_loopback(tx_queue, lb_tests); 548 end_rc = efx_end_loopback(tx_queue, lb_tests);
539 kfree(state->skbs); 549 kfree(state->skbs);
540 550
541 if (rc) { 551 if (begin_rc || end_rc) {
542 /* Wait a while to ensure there are no packets 552 /* Wait a while to ensure there are no packets
543 * floating around after a failure. */ 553 * floating around after a failure. */
544 schedule_timeout_uninterruptible(HZ / 10); 554 schedule_timeout_uninterruptible(HZ / 10);
545 return rc; 555 return begin_rc ? begin_rc : end_rc;
546 } 556 }
547 } 557 }
548 558
@@ -550,49 +560,36 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
550 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 560 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
551 state->packet_count); 561 state->packet_count);
552 562
553 return rc; 563 return 0;
554} 564}
555 565
556static int efx_test_loopbacks(struct efx_nic *efx, 566static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
557 struct efx_self_tests *tests, 567 struct efx_self_tests *tests,
558 unsigned int loopback_modes) 568 unsigned int loopback_modes)
559{ 569{
560 struct efx_selftest_state *state = efx->loopback_selftest; 570 enum efx_loopback_mode mode;
561 struct ethtool_cmd ecmd, ecmd_loopback; 571 struct efx_loopback_state *state;
562 struct efx_tx_queue *tx_queue; 572 struct efx_tx_queue *tx_queue;
563 enum efx_loopback_mode old_mode, mode; 573 bool link_up;
564 int count, rc = 0, link_up; 574 int count, rc = 0;
565
566 rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
567 if (rc) {
568 EFX_ERR(efx, "could not get GMII settings\n");
569 return rc;
570 }
571 old_mode = efx->loopback_mode;
572
573 /* Disable autonegotiation for the purposes of loopback */
574 memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
575 if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
576 ecmd_loopback.autoneg = AUTONEG_DISABLE;
577 ecmd_loopback.duplex = DUPLEX_FULL;
578 ecmd_loopback.speed = SPEED_10000;
579 }
580 575
581 rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); 576 /* Set the port loopback_selftest member. From this point on
582 if (rc) { 577 * all received packets will be dropped. Mark the state as
583 EFX_ERR(efx, "could not disable autonegotiation\n"); 578 * "flushing" so all inflight packets are dropped */
584 goto out; 579 state = kzalloc(sizeof(*state), GFP_KERNEL);
585 } 580 if (state == NULL)
586 tests->loopback_speed = ecmd_loopback.speed; 581 return -ENOMEM;
587 tests->loopback_full_duplex = ecmd_loopback.duplex; 582 BUG_ON(efx->loopback_selftest);
583 state->flush = true;
584 efx->loopback_selftest = state;
588 585
589 /* Test all supported loopback modes */ 586 /* Test all supported loopback modes */
590 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 587 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
591 if (!(loopback_modes & (1 << mode))) 588 if (!(loopback_modes & (1 << mode)))
592 continue; 589 continue;
593 590
594 /* Move the port into the specified loopback mode. */ 591 /* Move the port into the specified loopback mode. */
595 state->flush = 1; 592 state->flush = true;
596 efx->loopback_mode = mode; 593 efx->loopback_mode = mode;
597 efx_reconfigure_port(efx); 594 efx_reconfigure_port(efx);
598 595
@@ -616,7 +613,7 @@ static int efx_test_loopbacks(struct efx_nic *efx,
616 */ 613 */
617 link_up = efx->link_up; 614 link_up = efx->link_up;
618 if (!falcon_xaui_link_ok(efx)) 615 if (!falcon_xaui_link_ok(efx))
619 link_up = 0; 616 link_up = false;
620 617
621 } while ((++count < 20) && !link_up); 618 } while ((++count < 20) && !link_up);
622 619
@@ -634,18 +631,21 @@ static int efx_test_loopbacks(struct efx_nic *efx,
634 631
635 /* Test every TX queue */ 632 /* Test every TX queue */
636 efx_for_each_tx_queue(tx_queue, efx) { 633 efx_for_each_tx_queue(tx_queue, efx) {
637 rc |= efx_test_loopback(tx_queue, 634 state->offload_csum = (tx_queue->queue ==
638 &tests->loopback[mode]); 635 EFX_TX_QUEUE_OFFLOAD_CSUM);
636 rc = efx_test_loopback(tx_queue,
637 &tests->loopback[mode]);
639 if (rc) 638 if (rc)
640 goto out; 639 goto out;
641 } 640 }
642 } 641 }
643 642
644 out: 643 out:
645 /* Take out of loopback and restore PHY settings */ 644 /* Remove the flush. The caller will remove the loopback setting */
646 state->flush = 1; 645 state->flush = true;
647 efx->loopback_mode = old_mode; 646 efx->loopback_selftest = NULL;
648 efx_ethtool_set_settings(efx->net_dev, &ecmd); 647 wmb();
648 kfree(state);
649 649
650 return rc; 650 return rc;
651} 651}
@@ -661,23 +661,27 @@ static int efx_test_loopbacks(struct efx_nic *efx,
661int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) 661int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
662{ 662{
663 struct efx_channel *channel; 663 struct efx_channel *channel;
664 int rc = 0; 664 int rc, rc2 = 0;
665
666 rc = efx_test_mii(efx, tests);
667 if (rc && !rc2)
668 rc2 = rc;
665 669
666 EFX_LOG(efx, "performing online self-tests\n"); 670 rc = efx_test_nvram(efx, tests);
671 if (rc && !rc2)
672 rc2 = rc;
673
674 rc = efx_test_interrupts(efx, tests);
675 if (rc && !rc2)
676 rc2 = rc;
667 677
668 rc |= efx_test_interrupts(efx, tests);
669 efx_for_each_channel(channel, efx) { 678 efx_for_each_channel(channel, efx) {
670 if (channel->has_interrupt) 679 rc = efx_test_eventq_irq(channel, tests);
671 rc |= efx_test_eventq_irq(channel, tests); 680 if (rc && !rc2)
672 else 681 rc2 = rc;
673 rc |= efx_test_eventq(channel, tests);
674 } 682 }
675 rc |= efx_test_phy(efx, tests);
676
677 if (rc)
678 EFX_ERR(efx, "failed online self-tests\n");
679 683
680 return rc; 684 return rc2;
681} 685}
682 686
683/* Offline (i.e. disruptive) testing 687/* Offline (i.e. disruptive) testing
@@ -685,35 +689,66 @@ int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
685int efx_offline_test(struct efx_nic *efx, 689int efx_offline_test(struct efx_nic *efx,
686 struct efx_self_tests *tests, unsigned int loopback_modes) 690 struct efx_self_tests *tests, unsigned int loopback_modes)
687{ 691{
688 struct efx_selftest_state *state; 692 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
689 int rc = 0; 693 int phy_mode = efx->phy_mode;
690 694 struct ethtool_cmd ecmd, ecmd_test;
691 EFX_LOG(efx, "performing offline self-tests\n"); 695 int rc, rc2 = 0;
696
697 /* force the carrier state off so the kernel doesn't transmit during
698 * the loopback test, and the watchdog timeout doesn't fire. Also put
699 * falcon into loopback for the register test.
700 */
701 mutex_lock(&efx->mac_lock);
702 efx->port_inhibited = true;
703 if (efx->loopback_modes)
704 efx->loopback_mode = __ffs(efx->loopback_modes);
705 __efx_reconfigure_port(efx);
706 mutex_unlock(&efx->mac_lock);
707
708 /* free up all consumers of SRAM (including all the queues) */
709 efx_reset_down(efx, &ecmd);
710
711 rc = efx_test_chip(efx, tests);
712 if (rc && !rc2)
713 rc2 = rc;
714
715 /* reset the chip to recover from the register test */
716 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
717
718 /* Modify the saved ecmd so that when efx_reset_up() restores the phy
719 * state, AN is disabled, and the phy is powered, and out of loopback */
720 memcpy(&ecmd_test, &ecmd, sizeof(ecmd_test));
721 if (ecmd_test.autoneg == AUTONEG_ENABLE) {
722 ecmd_test.autoneg = AUTONEG_DISABLE;
723 ecmd_test.duplex = DUPLEX_FULL;
724 ecmd_test.speed = SPEED_10000;
725 }
726 efx->loopback_mode = LOOPBACK_NONE;
692 727
693 /* Create a selftest_state structure to hold state for the test */ 728 rc = efx_reset_up(efx, &ecmd_test, rc == 0);
694 state = kzalloc(sizeof(*state), GFP_KERNEL); 729 if (rc) {
695 if (state == NULL) { 730 EFX_ERR(efx, "Unable to recover from chip test\n");
696 rc = -ENOMEM; 731 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
697 goto out; 732 return rc;
698 } 733 }
699 734
700 /* Set the port loopback_selftest member. From this point on 735 tests->loopback_speed = ecmd_test.speed;
701 * all received packets will be dropped. Mark the state as 736 tests->loopback_full_duplex = ecmd_test.duplex;
702 * "flushing" so all inflight packets are dropped */
703 BUG_ON(efx->loopback_selftest);
704 state->flush = 1;
705 efx->loopback_selftest = state;
706 737
707 rc = efx_test_loopbacks(efx, tests, loopback_modes); 738 rc = efx_test_phy(efx, tests);
739 if (rc && !rc2)
740 rc2 = rc;
708 741
709 efx->loopback_selftest = NULL; 742 rc = efx_test_loopbacks(efx, ecmd_test, tests, loopback_modes);
710 wmb(); 743 if (rc && !rc2)
711 kfree(state); 744 rc2 = rc;
712 745
713 out: 746 /* restore the PHY to the previous state */
714 if (rc) 747 efx->loopback_mode = loopback_mode;
715 EFX_ERR(efx, "failed offline self-tests\n"); 748 efx->phy_mode = phy_mode;
749 efx->port_inhibited = false;
750 efx_ethtool_set_settings(efx->net_dev, &ecmd);
716 751
717 return rc; 752 return rc2;
718} 753}
719 754
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6999c2b622d..fc15df15d766 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_MAX_TX_QUEUES]; 21 int tx_sent[EFX_TX_QUEUE_COUNT];
22 int tx_done[EFX_MAX_TX_QUEUES]; 22 int tx_done[EFX_TX_QUEUE_COUNT];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
@@ -29,14 +29,19 @@ struct efx_loopback_self_tests {
29 * indicates failure. 29 * indicates failure.
30 */ 30 */
31struct efx_self_tests { 31struct efx_self_tests {
32 /* online tests */
33 int mii;
34 int nvram;
32 int interrupt; 35 int interrupt;
33 int eventq_dma[EFX_MAX_CHANNELS]; 36 int eventq_dma[EFX_MAX_CHANNELS];
34 int eventq_int[EFX_MAX_CHANNELS]; 37 int eventq_int[EFX_MAX_CHANNELS];
35 int eventq_poll[EFX_MAX_CHANNELS]; 38 int eventq_poll[EFX_MAX_CHANNELS];
36 int phy_ok; 39 /* offline tests */
40 int registers;
41 int phy;
37 int loopback_speed; 42 int loopback_speed;
38 int loopback_full_duplex; 43 int loopback_full_duplex;
39 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; 44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
40}; 45};
41 46
42extern void efx_loopback_rx_packet(struct efx_nic *efx, 47extern void efx_loopback_rx_packet(struct efx_nic *efx,
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index b27849523990..fe4e3fd22330 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -13,11 +13,13 @@
13 * the PHY 13 * the PHY
14 */ 14 */
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include "net_driver.h"
16#include "efx.h" 17#include "efx.h"
17#include "phy.h" 18#include "phy.h"
18#include "boards.h" 19#include "boards.h"
19#include "falcon.h" 20#include "falcon.h"
20#include "falcon_hwdefs.h" 21#include "falcon_hwdefs.h"
22#include "falcon_io.h"
21#include "mac.h" 23#include "mac.h"
22 24
23/************************************************************************** 25/**************************************************************************
@@ -120,23 +122,144 @@ static void sfe4001_poweroff(struct efx_nic *efx)
120 i2c_smbus_read_byte_data(hwmon_client, RSL); 122 i2c_smbus_read_byte_data(hwmon_client, RSL);
121} 123}
122 124
123static void sfe4001_fini(struct efx_nic *efx) 125static int sfe4001_poweron(struct efx_nic *efx)
124{ 126{
125 EFX_INFO(efx, "%s\n", __func__); 127 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
128 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
129 unsigned int i, j;
130 int rc;
131 u8 out;
132
133 /* Clear any previous over-temperature alert */
134 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
135 if (rc < 0)
136 return rc;
137
138 /* Enable port 0 and port 1 outputs on IO expander */
139 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
140 if (rc)
141 return rc;
142 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
143 0xff & ~(1 << P1_SPARE_LBN));
144 if (rc)
145 goto fail_on;
146
147 /* If PHY power is on, turn it all off and wait 1 second to
148 * ensure a full reset.
149 */
150 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
151 if (rc < 0)
152 goto fail_on;
153 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
154 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
155 (0 << P0_EN_1V0X_LBN));
156 if (rc != out) {
157 EFX_INFO(efx, "power-cycling PHY\n");
158 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
159 if (rc)
160 goto fail_on;
161 schedule_timeout_uninterruptible(HZ);
162 }
126 163
164 for (i = 0; i < 20; ++i) {
165 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
166 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
167 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
168 (1 << P0_X_TRST_LBN));
169 if (efx->phy_mode & PHY_MODE_SPECIAL)
170 out |= 1 << P0_EN_3V3X_LBN;
171
172 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
173 if (rc)
174 goto fail_on;
175 msleep(10);
176
177 /* Turn on 1V power rail */
178 out &= ~(1 << P0_EN_1V0X_LBN);
179 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
180 if (rc)
181 goto fail_on;
182
183 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
184
185 /* In flash config mode, DSP does not turn on AFE, so
186 * just wait 1 second.
187 */
188 if (efx->phy_mode & PHY_MODE_SPECIAL) {
189 schedule_timeout_uninterruptible(HZ);
190 return 0;
191 }
192
193 for (j = 0; j < 10; ++j) {
194 msleep(100);
195
196 /* Check DSP has asserted AFE power line */
197 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
198 if (rc < 0)
199 goto fail_on;
200 if (rc & (1 << P1_AFE_PWD_LBN))
201 return 0;
202 }
203 }
204
205 EFX_INFO(efx, "timed out waiting for DSP boot\n");
206 rc = -ETIMEDOUT;
207fail_on:
127 sfe4001_poweroff(efx); 208 sfe4001_poweroff(efx);
128 i2c_unregister_device(efx->board_info.ioexp_client); 209 return rc;
129 i2c_unregister_device(efx->board_info.hwmon_client);
130} 210}
131 211
132/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 212/* On SFE4001 rev A2 and later, we can control the FLASH_CFG_1 pin
133 * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- 213 * using the 3V3X output of the IO-expander. Allow the user to set
134 * up to allow writing the flash (done through MDIO from userland). 214 * this when the device is stopped, and keep it stopped then.
135 */ 215 */
136unsigned int sfe4001_phy_flash_cfg; 216
137module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); 217static ssize_t show_phy_flash_cfg(struct device *dev,
138MODULE_PARM_DESC(phy_flash_cfg, 218 struct device_attribute *attr, char *buf)
139 "Force PHY to enter flash configuration mode"); 219{
220 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
221 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
222}
223
224static ssize_t set_phy_flash_cfg(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
229 enum efx_phy_mode old_mode, new_mode;
230 int err;
231
232 rtnl_lock();
233 old_mode = efx->phy_mode;
234 if (count == 0 || *buf == '0')
235 new_mode = old_mode & ~PHY_MODE_SPECIAL;
236 else
237 new_mode = PHY_MODE_SPECIAL;
238 if (old_mode == new_mode) {
239 err = 0;
240 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
241 err = -EBUSY;
242 } else {
243 efx->phy_mode = new_mode;
244 err = sfe4001_poweron(efx);
245 efx_reconfigure_port(efx);
246 }
247 rtnl_unlock();
248
249 return err ? err : count;
250}
251
252static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
253
254static void sfe4001_fini(struct efx_nic *efx)
255{
256 EFX_INFO(efx, "%s\n", __func__);
257
258 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
259 sfe4001_poweroff(efx);
260 i2c_unregister_device(efx->board_info.ioexp_client);
261 i2c_unregister_device(efx->board_info.hwmon_client);
262}
140 263
141/* This board uses an I2C expander to provider power to the PHY, which needs to 264/* This board uses an I2C expander to provider power to the PHY, which needs to
142 * be turned on before the PHY can be used. 265 * be turned on before the PHY can be used.
@@ -144,41 +267,14 @@ MODULE_PARM_DESC(phy_flash_cfg,
144 */ 267 */
145int sfe4001_init(struct efx_nic *efx) 268int sfe4001_init(struct efx_nic *efx)
146{ 269{
147 struct i2c_client *hwmon_client, *ioexp_client; 270 struct i2c_client *hwmon_client;
148 unsigned int count;
149 int rc; 271 int rc;
150 u8 out;
151 efx_dword_t reg;
152 272
153 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647); 273 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
154 if (!hwmon_client) 274 if (!hwmon_client)
155 return -EIO; 275 return -EIO;
156 efx->board_info.hwmon_client = hwmon_client; 276 efx->board_info.hwmon_client = hwmon_client;
157 277
158 ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
159 if (!ioexp_client) {
160 rc = -EIO;
161 goto fail_hwmon;
162 }
163 efx->board_info.ioexp_client = ioexp_client;
164
165 /* 10Xpress has fixed-function LED pins, so there is no board-specific
166 * blink code. */
167 efx->board_info.blink = tenxpress_phy_blink;
168
169 /* Ensure that XGXS and XAUI SerDes are held in reset */
170 EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
171 XX_PWRDNB_EN, 1,
172 XX_RSTPLLAB_EN, 1,
173 XX_RESETA_EN, 1,
174 XX_RESETB_EN, 1,
175 XX_RSTXGXSRX_EN, 1,
176 XX_RSTXGXSTX_EN, 1);
177 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
178 udelay(10);
179
180 efx->board_info.fini = sfe4001_fini;
181
182 /* Set DSP over-temperature alert threshold */ 278 /* Set DSP over-temperature alert threshold */
183 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); 279 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
184 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO, 280 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
@@ -195,78 +291,34 @@ int sfe4001_init(struct efx_nic *efx)
195 goto fail_ioexp; 291 goto fail_ioexp;
196 } 292 }
197 293
198 /* Clear any previous over-temperature alert */ 294 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
199 rc = i2c_smbus_read_byte_data(hwmon_client, RSL); 295 if (!efx->board_info.ioexp_client) {
200 if (rc < 0) 296 rc = -EIO;
201 goto fail_ioexp; 297 goto fail_hwmon;
298 }
202 299
203 /* Enable port 0 and port 1 outputs on IO expander */ 300 /* 10Xpress has fixed-function LED pins, so there is no board-specific
204 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); 301 * blink code. */
302 efx->board_info.blink = tenxpress_phy_blink;
303
304 efx->board_info.fini = sfe4001_fini;
305
306 rc = sfe4001_poweron(efx);
205 if (rc) 307 if (rc)
206 goto fail_ioexp; 308 goto fail_ioexp;
207 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
208 0xff & ~(1 << P1_SPARE_LBN));
209 if (rc)
210 goto fail_on;
211 309
212 /* Turn all power off then wait 1 sec. This ensures PHY is reset */ 310 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
213 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
214 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
215 (0 << P0_EN_1V0X_LBN));
216 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
217 if (rc) 311 if (rc)
218 goto fail_on; 312 goto fail_on;
219 313
220 schedule_timeout_uninterruptible(HZ);
221 count = 0;
222 do {
223 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
224 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
225 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
226 (1 << P0_X_TRST_LBN));
227 if (sfe4001_phy_flash_cfg)
228 out |= 1 << P0_EN_3V3X_LBN;
229
230 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
231 if (rc)
232 goto fail_on;
233 msleep(10);
234
235 /* Turn on 1V power rail */
236 out &= ~(1 << P0_EN_1V0X_LBN);
237 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
238 if (rc)
239 goto fail_on;
240
241 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
242
243 schedule_timeout_uninterruptible(HZ);
244
245 /* Check DSP is powered */
246 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
247 if (rc < 0)
248 goto fail_on;
249 if (rc & (1 << P1_AFE_PWD_LBN))
250 goto done;
251
252 /* DSP doesn't look powered in flash config mode */
253 if (sfe4001_phy_flash_cfg)
254 goto done;
255 } while (++count < 20);
256
257 EFX_INFO(efx, "timed out waiting for power\n");
258 rc = -ETIMEDOUT;
259 goto fail_on;
260
261done:
262 EFX_INFO(efx, "PHY is powered on\n"); 314 EFX_INFO(efx, "PHY is powered on\n");
263 return 0; 315 return 0;
264 316
265fail_on: 317fail_on:
266 sfe4001_poweroff(efx); 318 sfe4001_poweroff(efx);
267fail_ioexp: 319fail_ioexp:
268 i2c_unregister_device(ioexp_client); 320 i2c_unregister_device(efx->board_info.ioexp_client);
269fail_hwmon: 321fail_hwmon:
270 i2c_unregister_device(hwmon_client); 322 i2c_unregister_device(hwmon_client);
271 return rc; 323 return rc;
272} 324}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 34412f3d41c9..feef61942377 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -19,53 +19,48 @@
19 * 19 *
20 *************************************************************************/ 20 *************************************************************************/
21 21
22/* 22#define SPI_WRSR 0x01 /* Write status register */
23 * Commands common to all known devices. 23#define SPI_WRITE 0x02 /* Write data to memory array */
24 * 24#define SPI_READ 0x03 /* Read data from memory array */
25#define SPI_WRDI 0x04 /* Reset write enable latch */
26#define SPI_RDSR 0x05 /* Read status register */
27#define SPI_WREN 0x06 /* Set write enable latch */
28
29#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
30#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
31#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
32#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
33#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
34#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
35
36/**
37 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
38 * @efx: The Efx controller that owns this device
39 * @device_id: Controller's id for the device
40 * @size: Size (in bytes)
41 * @addr_len: Number of address bytes in read/write commands
42 * @munge_address: Flag whether addresses should be munged.
43 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
44 * use bit 3 of the command byte as address bit A8, rather
45 * than having a two-byte address. If this flag is set, then
46 * commands should be munged in this way.
47 * @block_size: Write block size (in bytes).
48 * Write commands are limited to blocks with this size and alignment.
49 * @read: Read function for the device
50 * @write: Write function for the device
25 */ 51 */
26 52struct efx_spi_device {
27/* Write status register */ 53 struct efx_nic *efx;
28#define SPI_WRSR 0x01 54 int device_id;
29 55 unsigned int size;
30/* Write data to memory array */ 56 unsigned int addr_len;
31#define SPI_WRITE 0x02 57 unsigned int munge_address:1;
32 58 unsigned int block_size;
33/* Read data from memory array */ 59};
34#define SPI_READ 0x03 60
35 61int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
36/* Reset write enable latch */ 62 size_t len, size_t *retlen, u8 *buffer);
37#define SPI_WRDI 0x04 63int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
38 64 size_t len, size_t *retlen, const u8 *buffer);
39/* Read status register */
40#define SPI_RDSR 0x05
41
42/* Set write enable latch */
43#define SPI_WREN 0x06
44
45/* SST: Enable write to status register */
46#define SPI_SST_EWSR 0x50
47
48/*
49 * Status register bits. Not all bits are supported on all devices.
50 *
51 */
52
53/* Write-protect pin enabled */
54#define SPI_STATUS_WPEN 0x80
55
56/* Block protection bit 2 */
57#define SPI_STATUS_BP2 0x10
58
59/* Block protection bit 1 */
60#define SPI_STATUS_BP1 0x08
61
62/* Block protection bit 0 */
63#define SPI_STATUS_BP0 0x04
64
65/* State of the write enable latch */
66#define SPI_STATUS_WEN 0x02
67
68/* Device busy flag */
69#define SPI_STATUS_NRDY 0x01
70 65
71#endif /* EFX_SPI_H */ 66#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index c0146061c326..d507c93d666e 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -65,25 +65,10 @@
65#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 65#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
66 66
67 67
68/* Self test (BIST) control register */
69#define PMA_PMD_BIST_CTRL_REG (0xc014)
70#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
71#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
72#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
73/* Self test status register */
74#define PMA_PMD_BIST_STAT_REG (0xc015)
75#define PMA_PMD_BIST_ENX_LBN (3)
76#define PMA_PMD_BIST_PMA_LBN (2)
77#define PMA_PMD_BIST_RXD_LBN (1)
78#define PMA_PMD_BIST_AFE_LBN (0)
79
80/* Special Software reset register */ 68/* Special Software reset register */
81#define PMA_PMD_EXT_CTRL_REG 49152 69#define PMA_PMD_EXT_CTRL_REG 49152
82#define PMA_PMD_EXT_SSR_LBN 15 70#define PMA_PMD_EXT_SSR_LBN 15
83 71
84#define BIST_MAX_DELAY (1000)
85#define BIST_POLL_DELAY (10)
86
87/* Misc register defines */ 72/* Misc register defines */
88#define PCS_CLOCK_CTRL_REG 0xd801 73#define PCS_CLOCK_CTRL_REG 0xd801
89#define PLL312_RST_N_LBN 2 74#define PLL312_RST_N_LBN 2
@@ -119,27 +104,12 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
119 "Max number of CRC errors before XAUI reset"); 104 "Max number of CRC errors before XAUI reset");
120 105
121struct tenxpress_phy_data { 106struct tenxpress_phy_data {
122 enum tenxpress_state state;
123 enum efx_loopback_mode loopback_mode; 107 enum efx_loopback_mode loopback_mode;
124 atomic_t bad_crc_count; 108 atomic_t bad_crc_count;
125 int tx_disabled; 109 enum efx_phy_mode phy_mode;
126 int bad_lp_tries; 110 int bad_lp_tries;
127}; 111};
128 112
129static int tenxpress_state_is(struct efx_nic *efx, int state)
130{
131 struct tenxpress_phy_data *phy_data = efx->phy_data;
132 return (phy_data != NULL) && (state == phy_data->state);
133}
134
135void tenxpress_set_state(struct efx_nic *efx,
136 enum tenxpress_state state)
137{
138 struct tenxpress_phy_data *phy_data = efx->phy_data;
139 if (phy_data != NULL)
140 phy_data->state = state;
141}
142
143void tenxpress_crc_err(struct efx_nic *efx) 113void tenxpress_crc_err(struct efx_nic *efx)
144{ 114{
145 struct tenxpress_phy_data *phy_data = efx->phy_data; 115 struct tenxpress_phy_data *phy_data = efx->phy_data;
@@ -176,8 +146,6 @@ static int tenxpress_phy_check(struct efx_nic *efx)
176 return 0; 146 return 0;
177} 147}
178 148
179static void tenxpress_reset_xaui(struct efx_nic *efx);
180
181static int tenxpress_init(struct efx_nic *efx) 149static int tenxpress_init(struct efx_nic *efx)
182{ 150{
183 int rc, reg; 151 int rc, reg;
@@ -214,15 +182,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
214 if (!phy_data) 182 if (!phy_data)
215 return -ENOMEM; 183 return -ENOMEM;
216 efx->phy_data = phy_data; 184 efx->phy_data = phy_data;
185 phy_data->phy_mode = efx->phy_mode;
217 186
218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 187 rc = mdio_clause45_wait_reset_mmds(efx,
219 188 TENXPRESS_REQUIRED_DEVS);
220 if (!sfe4001_phy_flash_cfg) { 189 if (rc < 0)
221 rc = mdio_clause45_wait_reset_mmds(efx, 190 goto fail;
222 TENXPRESS_REQUIRED_DEVS);
223 if (rc < 0)
224 goto fail;
225 }
226 191
227 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 192 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
228 if (rc < 0) 193 if (rc < 0)
@@ -249,7 +214,10 @@ static int tenxpress_special_reset(struct efx_nic *efx)
249{ 214{
250 int rc, reg; 215 int rc, reg;
251 216
252 EFX_TRACE(efx, "%s\n", __func__); 217 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
218 * a special software reset can glitch the XGMAC sufficiently for stats
219 * requests to fail. Since we don't ofen special_reset, just lock. */
220 spin_lock(&efx->stats_lock);
253 221
254 /* Initiate reset */ 222 /* Initiate reset */
255 reg = mdio_clause45_read(efx, efx->mii.phy_id, 223 reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -258,23 +226,25 @@ static int tenxpress_special_reset(struct efx_nic *efx)
258 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 226 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
259 PMA_PMD_EXT_CTRL_REG, reg); 227 PMA_PMD_EXT_CTRL_REG, reg);
260 228
261 msleep(200); 229 mdelay(200);
262 230
263 /* Wait for the blocks to come out of reset */ 231 /* Wait for the blocks to come out of reset */
264 rc = mdio_clause45_wait_reset_mmds(efx, 232 rc = mdio_clause45_wait_reset_mmds(efx,
265 TENXPRESS_REQUIRED_DEVS); 233 TENXPRESS_REQUIRED_DEVS);
266 if (rc < 0) 234 if (rc < 0)
267 return rc; 235 goto unlock;
268 236
269 /* Try and reconfigure the device */ 237 /* Try and reconfigure the device */
270 rc = tenxpress_init(efx); 238 rc = tenxpress_init(efx);
271 if (rc < 0) 239 if (rc < 0)
272 return rc; 240 goto unlock;
273 241
274 return 0; 242unlock:
243 spin_unlock(&efx->stats_lock);
244 return rc;
275} 245}
276 246
277static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) 247static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp)
278{ 248{
279 struct tenxpress_phy_data *pd = efx->phy_data; 249 struct tenxpress_phy_data *pd = efx->phy_data;
280 int reg; 250 int reg;
@@ -311,15 +281,15 @@ static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
311 * into a non-10GBT port and if so warn the user that they won't get 281 * into a non-10GBT port and if so warn the user that they won't get
312 * link any time soon as we are 10GBT only, unless caller specified 282 * link any time soon as we are 10GBT only, unless caller specified
313 * not to do this check (it isn't useful in loopback) */ 283 * not to do this check (it isn't useful in loopback) */
314static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) 284static bool tenxpress_link_ok(struct efx_nic *efx, bool check_lp)
315{ 285{
316 int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS); 286 bool ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
317 287
318 if (ok) { 288 if (ok) {
319 tenxpress_set_bad_lp(efx, 0); 289 tenxpress_set_bad_lp(efx, false);
320 } else if (check_lp) { 290 } else if (check_lp) {
321 /* Are we plugged into the wrong sort of link? */ 291 /* Are we plugged into the wrong sort of link? */
322 int bad_lp = 0; 292 bool bad_lp = false;
323 int phy_id = efx->mii.phy_id; 293 int phy_id = efx->mii.phy_id;
324 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, 294 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
325 MDIO_AN_STATUS); 295 MDIO_AN_STATUS);
@@ -332,7 +302,7 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
332 * bit has the advantage of not clearing when autoneg 302 * bit has the advantage of not clearing when autoneg
333 * restarts. */ 303 * restarts. */
334 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) { 304 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
335 tenxpress_set_bad_lp(efx, 0); 305 tenxpress_set_bad_lp(efx, false);
336 return ok; 306 return ok;
337 } 307 }
338 308
@@ -367,16 +337,19 @@ static void tenxpress_phyxs_loopback(struct efx_nic *efx)
367static void tenxpress_phy_reconfigure(struct efx_nic *efx) 337static void tenxpress_phy_reconfigure(struct efx_nic *efx)
368{ 338{
369 struct tenxpress_phy_data *phy_data = efx->phy_data; 339 struct tenxpress_phy_data *phy_data = efx->phy_data;
370 int loop_change = LOOPBACK_OUT_OF(phy_data, efx, 340 bool loop_change = LOOPBACK_OUT_OF(phy_data, efx,
371 TENXPRESS_LOOPBACKS); 341 TENXPRESS_LOOPBACKS);
372 342
373 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 343 if (efx->phy_mode & PHY_MODE_SPECIAL) {
344 phy_data->phy_mode = efx->phy_mode;
374 return; 345 return;
346 }
375 347
376 /* When coming out of transmit disable, coming out of low power 348 /* When coming out of transmit disable, coming out of low power
377 * mode, or moving out of any PHY internal loopback mode, 349 * mode, or moving out of any PHY internal loopback mode,
378 * perform a special software reset */ 350 * perform a special software reset */
379 if ((phy_data->tx_disabled && !efx->tx_disabled) || 351 if ((efx->phy_mode == PHY_MODE_NORMAL &&
352 phy_data->phy_mode != PHY_MODE_NORMAL) ||
380 loop_change) { 353 loop_change) {
381 tenxpress_special_reset(efx); 354 tenxpress_special_reset(efx);
382 falcon_reset_xaui(efx); 355 falcon_reset_xaui(efx);
@@ -386,9 +359,9 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
386 mdio_clause45_phy_reconfigure(efx); 359 mdio_clause45_phy_reconfigure(efx);
387 tenxpress_phyxs_loopback(efx); 360 tenxpress_phyxs_loopback(efx);
388 361
389 phy_data->tx_disabled = efx->tx_disabled;
390 phy_data->loopback_mode = efx->loopback_mode; 362 phy_data->loopback_mode = efx->loopback_mode;
391 efx->link_up = tenxpress_link_ok(efx, 0); 363 phy_data->phy_mode = efx->phy_mode;
364 efx->link_up = tenxpress_link_ok(efx, false);
392 efx->link_options = GM_LPA_10000FULL; 365 efx->link_options = GM_LPA_10000FULL;
393} 366}
394 367
@@ -402,16 +375,14 @@ static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
402static int tenxpress_phy_check_hw(struct efx_nic *efx) 375static int tenxpress_phy_check_hw(struct efx_nic *efx)
403{ 376{
404 struct tenxpress_phy_data *phy_data = efx->phy_data; 377 struct tenxpress_phy_data *phy_data = efx->phy_data;
405 int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL); 378 bool link_ok;
406 int link_ok;
407 379
408 link_ok = phy_up && tenxpress_link_ok(efx, 1); 380 link_ok = tenxpress_link_ok(efx, true);
409 381
410 if (link_ok != efx->link_up) 382 if (link_ok != efx->link_up)
411 falcon_xmac_sim_phy_event(efx); 383 falcon_xmac_sim_phy_event(efx);
412 384
413 /* Nothing to check if we've already shut down the PHY */ 385 if (phy_data->phy_mode != PHY_MODE_NORMAL)
414 if (!phy_up)
415 return 0; 386 return 0;
416 387
417 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { 388 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
@@ -444,7 +415,7 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
444 415
445/* Set the RX and TX LEDs and Link LED flashing. The other LEDs 416/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
446 * (which probably aren't wired anyway) are left in AUTO mode */ 417 * (which probably aren't wired anyway) are left in AUTO mode */
447void tenxpress_phy_blink(struct efx_nic *efx, int blink) 418void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
448{ 419{
449 int reg; 420 int reg;
450 421
@@ -459,52 +430,10 @@ void tenxpress_phy_blink(struct efx_nic *efx, int blink)
459 PMA_PMD_LED_OVERR_REG, reg); 430 PMA_PMD_LED_OVERR_REG, reg);
460} 431}
461 432
462static void tenxpress_reset_xaui(struct efx_nic *efx) 433static int tenxpress_phy_test(struct efx_nic *efx)
463{ 434{
464 int phy = efx->mii.phy_id; 435 /* BIST is automatically run after a special software reset */
465 int clk_ctrl, test_select, soft_rst2; 436 return tenxpress_special_reset(efx);
466
467 /* Real work is done on clock_ctrl other resets are thought to be
468 * optional but make the reset more reliable
469 */
470
471 /* Read */
472 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
473 PCS_CLOCK_CTRL_REG);
474 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
475 PCS_TEST_SELECT_REG);
476 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
477 PCS_SOFT_RST2_REG);
478
479 /* Put in reset */
480 test_select &= ~(1 << CLK312_EN_LBN);
481 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
482 PCS_TEST_SELECT_REG, test_select);
483
484 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
485 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
486 PCS_SOFT_RST2_REG, soft_rst2);
487
488 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
489 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
490 PCS_CLOCK_CTRL_REG, clk_ctrl);
491 udelay(10);
492
493 /* Remove reset */
494 clk_ctrl |= (1 << PLL312_RST_N_LBN);
495 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
496 PCS_CLOCK_CTRL_REG, clk_ctrl);
497 udelay(10);
498
499 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
500 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
501 PCS_SOFT_RST2_REG, soft_rst2);
502 udelay(10);
503
504 test_select |= (1 << CLK312_EN_LBN);
505 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
506 PCS_TEST_SELECT_REG, test_select);
507 udelay(10);
508} 437}
509 438
510struct efx_phy_operations falcon_tenxpress_phy_ops = { 439struct efx_phy_operations falcon_tenxpress_phy_ops = {
@@ -513,7 +442,7 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
513 .check_hw = tenxpress_phy_check_hw, 442 .check_hw = tenxpress_phy_check_hw,
514 .fini = tenxpress_phy_fini, 443 .fini = tenxpress_phy_fini,
515 .clear_interrupt = tenxpress_phy_clear_interrupt, 444 .clear_interrupt = tenxpress_phy_clear_interrupt,
516 .reset_xaui = tenxpress_reset_xaui, 445 .test = tenxpress_phy_test,
517 .mmds = TENXPRESS_REQUIRED_DEVS, 446 .mmds = TENXPRESS_REQUIRED_DEVS,
518 .loopbacks = TENXPRESS_LOOPBACKS, 447 .loopbacks = TENXPRESS_LOOPBACKS,
519}; 448};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5e8374ab28ee..da3e9ff339f5 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -47,7 +47,7 @@ void efx_stop_queue(struct efx_nic *efx)
47 * We want to be able to nest calls to netif_stop_queue(), since each 47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue. 48 * channel can have an individual stop on the queue.
49 */ 49 */
50inline void efx_wake_queue(struct efx_nic *efx) 50void efx_wake_queue(struct efx_nic *efx)
51{ 51{
52 local_bh_disable(); 52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count, 53 if (atomic_dec_and_lock(&efx->netif_stop_count,
@@ -59,19 +59,21 @@ inline void efx_wake_queue(struct efx_nic *efx)
59 local_bh_enable(); 59 local_bh_enable();
60} 60}
61 61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 62static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer) 63 struct efx_tx_buffer *buffer)
64{ 64{
65 if (buffer->unmap_len) { 65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
68 buffer->unmap_len);
67 if (buffer->unmap_single) 69 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, buffer->unmap_addr, 70 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
69 buffer->unmap_len, PCI_DMA_TODEVICE); 71 PCI_DMA_TODEVICE);
70 else 72 else
71 pci_unmap_page(pci_dev, buffer->unmap_addr, 73 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
72 buffer->unmap_len, PCI_DMA_TODEVICE); 74 PCI_DMA_TODEVICE);
73 buffer->unmap_len = 0; 75 buffer->unmap_len = 0;
74 buffer->unmap_single = 0; 76 buffer->unmap_single = false;
75 } 77 }
76 78
77 if (buffer->skb) { 79 if (buffer->skb) {
@@ -103,13 +105,13 @@ struct efx_tso_header {
103}; 105};
104 106
105static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 107static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
106 const struct sk_buff *skb); 108 struct sk_buff *skb);
107static void efx_fini_tso(struct efx_tx_queue *tx_queue); 109static void efx_fini_tso(struct efx_tx_queue *tx_queue);
108static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 110static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
109 struct efx_tso_header *tsoh); 111 struct efx_tso_header *tsoh);
110 112
111static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, 113static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
112 struct efx_tx_buffer *buffer) 114 struct efx_tx_buffer *buffer)
113{ 115{
114 if (buffer->tsoh) { 116 if (buffer->tsoh) {
115 if (likely(!buffer->tsoh->unmap_len)) { 117 if (likely(!buffer->tsoh->unmap_len)) {
@@ -136,8 +138,8 @@ static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
136 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
137 * You must hold netif_tx_lock() to call this function. 139 * You must hold netif_tx_lock() to call this function.
138 */ 140 */
139static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, 141static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
140 const struct sk_buff *skb) 142 struct sk_buff *skb)
141{ 143{
142 struct efx_nic *efx = tx_queue->efx; 144 struct efx_nic *efx = tx_queue->efx;
143 struct pci_dev *pci_dev = efx->pci_dev; 145 struct pci_dev *pci_dev = efx->pci_dev;
@@ -148,7 +150,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
148 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
149 dma_addr_t dma_addr, unmap_addr = 0; 151 dma_addr_t dma_addr, unmap_addr = 0;
150 unsigned int dma_len; 152 unsigned int dma_len;
151 unsigned unmap_single; 153 bool unmap_single;
152 int q_space, i = 0; 154 int q_space, i = 0;
153 int rc = NETDEV_TX_OK; 155 int rc = NETDEV_TX_OK;
154 156
@@ -167,7 +169,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
167 * since this is more efficient on machines with sparse 169 * since this is more efficient on machines with sparse
168 * memory. 170 * memory.
169 */ 171 */
170 unmap_single = 1; 172 unmap_single = true;
171 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 173 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
172 174
173 /* Process all fragments */ 175 /* Process all fragments */
@@ -213,7 +215,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
213 EFX_BUG_ON_PARANOID(buffer->tsoh); 215 EFX_BUG_ON_PARANOID(buffer->tsoh);
214 EFX_BUG_ON_PARANOID(buffer->skb); 216 EFX_BUG_ON_PARANOID(buffer->skb);
215 EFX_BUG_ON_PARANOID(buffer->len); 217 EFX_BUG_ON_PARANOID(buffer->len);
216 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 218 EFX_BUG_ON_PARANOID(!buffer->continuation);
217 EFX_BUG_ON_PARANOID(buffer->unmap_len); 219 EFX_BUG_ON_PARANOID(buffer->unmap_len);
218 220
219 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 221 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
@@ -233,7 +235,6 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
233 } while (len); 235 } while (len);
234 236
235 /* Transfer ownership of the unmapping to the final buffer */ 237 /* Transfer ownership of the unmapping to the final buffer */
236 buffer->unmap_addr = unmap_addr;
237 buffer->unmap_single = unmap_single; 238 buffer->unmap_single = unmap_single;
238 buffer->unmap_len = unmap_len; 239 buffer->unmap_len = unmap_len;
239 unmap_len = 0; 240 unmap_len = 0;
@@ -247,14 +248,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
247 page_offset = fragment->page_offset; 248 page_offset = fragment->page_offset;
248 i++; 249 i++;
249 /* Map for DMA */ 250 /* Map for DMA */
250 unmap_single = 0; 251 unmap_single = false;
251 dma_addr = pci_map_page(pci_dev, page, page_offset, len, 252 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
252 PCI_DMA_TODEVICE); 253 PCI_DMA_TODEVICE);
253 } 254 }
254 255
255 /* Transfer ownership of the skb to the final buffer */ 256 /* Transfer ownership of the skb to the final buffer */
256 buffer->skb = skb; 257 buffer->skb = skb;
257 buffer->continuation = 0; 258 buffer->continuation = false;
258 259
259 /* Pass off to hardware */ 260 /* Pass off to hardware */
260 falcon_push_buffers(tx_queue); 261 falcon_push_buffers(tx_queue);
@@ -287,9 +288,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
287 } 288 }
288 289
289 /* Free the fragment we were mid-way through pushing */ 290 /* Free the fragment we were mid-way through pushing */
290 if (unmap_len) 291 if (unmap_len) {
291 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 292 if (unmap_single)
292 PCI_DMA_TODEVICE); 293 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
294 PCI_DMA_TODEVICE);
295 else
296 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
297 PCI_DMA_TODEVICE);
298 }
293 299
294 return rc; 300 return rc;
295} 301}
@@ -299,8 +305,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
299 * This removes packets from the TX queue, up to and including the 305 * This removes packets from the TX queue, up to and including the
300 * specified index. 306 * specified index.
301 */ 307 */
302static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 308static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
303 unsigned int index) 309 unsigned int index)
304{ 310{
305 struct efx_nic *efx = tx_queue->efx; 311 struct efx_nic *efx = tx_queue->efx;
306 unsigned int stop_index, read_ptr; 312 unsigned int stop_index, read_ptr;
@@ -320,7 +326,7 @@ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
320 } 326 }
321 327
322 efx_dequeue_buffer(tx_queue, buffer); 328 efx_dequeue_buffer(tx_queue, buffer);
323 buffer->continuation = 1; 329 buffer->continuation = true;
324 buffer->len = 0; 330 buffer->len = 0;
325 331
326 ++tx_queue->read_count; 332 ++tx_queue->read_count;
@@ -367,8 +373,15 @@ inline int efx_xmit(struct efx_nic *efx,
367 */ 373 */
368int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 374int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
369{ 375{
370 struct efx_nic *efx = net_dev->priv; 376 struct efx_nic *efx = netdev_priv(net_dev);
371 return efx_xmit(efx, &efx->tx_queue[0], skb); 377 struct efx_tx_queue *tx_queue;
378
379 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
381 else
382 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
383
384 return efx_xmit(efx, tx_queue, skb);
372} 385}
373 386
374void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 387void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -412,30 +425,25 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
412 /* Allocate software ring */ 425 /* Allocate software ring */
413 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 426 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
414 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 427 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
415 if (!tx_queue->buffer) { 428 if (!tx_queue->buffer)
416 rc = -ENOMEM; 429 return -ENOMEM;
417 goto fail1;
418 }
419 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 430 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
420 tx_queue->buffer[i].continuation = 1; 431 tx_queue->buffer[i].continuation = true;
421 432
422 /* Allocate hardware ring */ 433 /* Allocate hardware ring */
423 rc = falcon_probe_tx(tx_queue); 434 rc = falcon_probe_tx(tx_queue);
424 if (rc) 435 if (rc)
425 goto fail2; 436 goto fail;
426 437
427 return 0; 438 return 0;
428 439
429 fail2: 440 fail:
430 kfree(tx_queue->buffer); 441 kfree(tx_queue->buffer);
431 tx_queue->buffer = NULL; 442 tx_queue->buffer = NULL;
432 fail1:
433 tx_queue->used = 0;
434
435 return rc; 443 return rc;
436} 444}
437 445
438int efx_init_tx_queue(struct efx_tx_queue *tx_queue) 446void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
439{ 447{
440 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); 448 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
441 449
@@ -446,7 +454,7 @@ int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
446 BUG_ON(tx_queue->stopped); 454 BUG_ON(tx_queue->stopped);
447 455
448 /* Set up TX descriptor ring */ 456 /* Set up TX descriptor ring */
449 return falcon_init_tx(tx_queue); 457 falcon_init_tx(tx_queue);
450} 458}
451 459
452void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 460void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -461,7 +469,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
461 buffer = &tx_queue->buffer[tx_queue->read_count & 469 buffer = &tx_queue->buffer[tx_queue->read_count &
462 tx_queue->efx->type->txd_ring_mask]; 470 tx_queue->efx->type->txd_ring_mask];
463 efx_dequeue_buffer(tx_queue, buffer); 471 efx_dequeue_buffer(tx_queue, buffer);
464 buffer->continuation = 1; 472 buffer->continuation = true;
465 buffer->len = 0; 473 buffer->len = 0;
466 474
467 ++tx_queue->read_count; 475 ++tx_queue->read_count;
@@ -494,7 +502,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
494 502
495 kfree(tx_queue->buffer); 503 kfree(tx_queue->buffer);
496 tx_queue->buffer = NULL; 504 tx_queue->buffer = NULL;
497 tx_queue->used = 0;
498} 505}
499 506
500 507
@@ -509,7 +516,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
509/* Number of bytes inserted at the start of a TSO header buffer, 516/* Number of bytes inserted at the start of a TSO header buffer,
510 * similar to NET_IP_ALIGN. 517 * similar to NET_IP_ALIGN.
511 */ 518 */
512#if defined(__i386__) || defined(__x86_64__) 519#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
513#define TSOH_OFFSET 0 520#define TSOH_OFFSET 0
514#else 521#else
515#define TSOH_OFFSET NET_IP_ALIGN 522#define TSOH_OFFSET NET_IP_ALIGN
@@ -533,47 +540,37 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
533 540
534/** 541/**
535 * struct tso_state - TSO state for an SKB 542 * struct tso_state - TSO state for an SKB
536 * @remaining_len: Bytes of data we've yet to segment 543 * @out_len: Remaining length in current segment
537 * @seqnum: Current sequence number 544 * @seqnum: Current sequence number
545 * @ipv4_id: Current IPv4 ID, host endian
538 * @packet_space: Remaining space in current packet 546 * @packet_space: Remaining space in current packet
539 * @ifc: Input fragment cursor. 547 * @dma_addr: DMA address of current position
540 * Where we are in the current fragment of the incoming SKB. These 548 * @in_len: Remaining length in current SKB fragment
541 * values get updated in place when we split a fragment over 549 * @unmap_len: Length of SKB fragment
542 * multiple packets. 550 * @unmap_addr: DMA address of SKB fragment
543 * @p: Parameters. 551 * @unmap_single: DMA single vs page mapping flag
544 * These values are set once at the start of the TSO send and do 552 * @header_len: Number of bytes of header
545 * not get changed as the routine progresses. 553 * @full_packet_size: Number of bytes to put in each outgoing segment
546 * 554 *
547 * The state used during segmentation. It is put into this data structure 555 * The state used during segmentation. It is put into this data structure
548 * just to make it easy to pass into inline functions. 556 * just to make it easy to pass into inline functions.
549 */ 557 */
550struct tso_state { 558struct tso_state {
551 unsigned remaining_len; 559 /* Output position */
560 unsigned out_len;
552 unsigned seqnum; 561 unsigned seqnum;
562 unsigned ipv4_id;
553 unsigned packet_space; 563 unsigned packet_space;
554 564
555 struct { 565 /* Input position */
556 /* DMA address of current position */ 566 dma_addr_t dma_addr;
557 dma_addr_t dma_addr; 567 unsigned in_len;
558 /* Remaining length */ 568 unsigned unmap_len;
559 unsigned int len; 569 dma_addr_t unmap_addr;
560 /* DMA address and length of the whole fragment */ 570 bool unmap_single;
561 unsigned int unmap_len; 571
562 dma_addr_t unmap_addr; 572 unsigned header_len;
563 struct page *page; 573 int full_packet_size;
564 unsigned page_off;
565 } ifc;
566
567 struct {
568 /* The number of bytes of header */
569 unsigned int header_length;
570
571 /* The number of bytes to put in each outgoing segment. */
572 int full_packet_size;
573
574 /* Current IPv4 ID, host endian. */
575 unsigned ipv4_id;
576 } p;
577}; 574};
578 575
579 576
@@ -581,11 +578,24 @@ struct tso_state {
581 * Verify that our various assumptions about sk_buffs and the conditions 578 * Verify that our various assumptions about sk_buffs and the conditions
582 * under which TSO will be attempted hold true. 579 * under which TSO will be attempted hold true.
583 */ 580 */
584static inline void efx_tso_check_safe(const struct sk_buff *skb) 581static void efx_tso_check_safe(struct sk_buff *skb)
585{ 582{
586 EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); 583 __be16 protocol = skb->protocol;
584
587 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 585 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
588 skb->protocol); 586 protocol);
587 if (protocol == htons(ETH_P_8021Q)) {
588 /* Find the encapsulated protocol; reset network header
589 * and transport header based on that. */
590 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
591 protocol = veh->h_vlan_encapsulated_proto;
592 skb_set_network_header(skb, sizeof(*veh));
593 if (protocol == htons(ETH_P_IP))
594 skb_set_transport_header(skb, sizeof(*veh) +
595 4 * ip_hdr(skb)->ihl);
596 }
597
598 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
589 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 599 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
590 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 600 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
591 + (tcp_hdr(skb)->doff << 2u)) > 601 + (tcp_hdr(skb)->doff << 2u)) >
@@ -685,18 +695,14 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
685 * @tx_queue: Efx TX queue 695 * @tx_queue: Efx TX queue
686 * @dma_addr: DMA address of fragment 696 * @dma_addr: DMA address of fragment
687 * @len: Length of fragment 697 * @len: Length of fragment
688 * @skb: Only non-null for end of last segment 698 * @final_buffer: The final buffer inserted into the queue
689 * @end_of_packet: True if last fragment in a packet
690 * @unmap_addr: DMA address of fragment for unmapping
691 * @unmap_len: Only set this in last segment of a fragment
692 * 699 *
693 * Push descriptors onto the TX queue. Return 0 on success or 1 if 700 * Push descriptors onto the TX queue. Return 0 on success or 1 if
694 * @tx_queue full. 701 * @tx_queue full.
695 */ 702 */
696static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 703static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
697 dma_addr_t dma_addr, unsigned len, 704 dma_addr_t dma_addr, unsigned len,
698 const struct sk_buff *skb, int end_of_packet, 705 struct efx_tx_buffer **final_buffer)
699 dma_addr_t unmap_addr, unsigned unmap_len)
700{ 706{
701 struct efx_tx_buffer *buffer; 707 struct efx_tx_buffer *buffer;
702 struct efx_nic *efx = tx_queue->efx; 708 struct efx_nic *efx = tx_queue->efx;
@@ -724,8 +730,10 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
724 fill_level = (tx_queue->insert_count 730 fill_level = (tx_queue->insert_count
725 - tx_queue->old_read_count); 731 - tx_queue->old_read_count);
726 q_space = efx->type->txd_ring_mask - 1 - fill_level; 732 q_space = efx->type->txd_ring_mask - 1 - fill_level;
727 if (unlikely(q_space-- <= 0)) 733 if (unlikely(q_space-- <= 0)) {
734 *final_buffer = NULL;
728 return 1; 735 return 1;
736 }
729 smp_mb(); 737 smp_mb();
730 --tx_queue->stopped; 738 --tx_queue->stopped;
731 } 739 }
@@ -742,7 +750,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
742 EFX_BUG_ON_PARANOID(buffer->len); 750 EFX_BUG_ON_PARANOID(buffer->len);
743 EFX_BUG_ON_PARANOID(buffer->unmap_len); 751 EFX_BUG_ON_PARANOID(buffer->unmap_len);
744 EFX_BUG_ON_PARANOID(buffer->skb); 752 EFX_BUG_ON_PARANOID(buffer->skb);
745 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 753 EFX_BUG_ON_PARANOID(!buffer->continuation);
746 EFX_BUG_ON_PARANOID(buffer->tsoh); 754 EFX_BUG_ON_PARANOID(buffer->tsoh);
747 755
748 buffer->dma_addr = dma_addr; 756 buffer->dma_addr = dma_addr;
@@ -765,10 +773,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
765 773
766 EFX_BUG_ON_PARANOID(!len); 774 EFX_BUG_ON_PARANOID(!len);
767 buffer->len = len; 775 buffer->len = len;
768 buffer->skb = skb; 776 *final_buffer = buffer;
769 buffer->continuation = !end_of_packet;
770 buffer->unmap_addr = unmap_addr;
771 buffer->unmap_len = unmap_len;
772 return 0; 777 return 0;
773} 778}
774 779
@@ -780,8 +785,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
780 * a single fragment, and we know it doesn't cross a page boundary. It 785 * a single fragment, and we know it doesn't cross a page boundary. It
781 * also allows us to not worry about end-of-packet etc. 786 * also allows us to not worry about end-of-packet etc.
782 */ 787 */
783static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, 788static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
784 struct efx_tso_header *tsoh, unsigned len) 789 struct efx_tso_header *tsoh, unsigned len)
785{ 790{
786 struct efx_tx_buffer *buffer; 791 struct efx_tx_buffer *buffer;
787 792
@@ -791,7 +796,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
791 EFX_BUG_ON_PARANOID(buffer->len); 796 EFX_BUG_ON_PARANOID(buffer->len);
792 EFX_BUG_ON_PARANOID(buffer->unmap_len); 797 EFX_BUG_ON_PARANOID(buffer->unmap_len);
793 EFX_BUG_ON_PARANOID(buffer->skb); 798 EFX_BUG_ON_PARANOID(buffer->skb);
794 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 799 EFX_BUG_ON_PARANOID(!buffer->continuation);
795 EFX_BUG_ON_PARANOID(buffer->tsoh); 800 EFX_BUG_ON_PARANOID(buffer->tsoh);
796 buffer->len = len; 801 buffer->len = len;
797 buffer->dma_addr = tsoh->dma_addr; 802 buffer->dma_addr = tsoh->dma_addr;
@@ -805,6 +810,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
805static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 810static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
806{ 811{
807 struct efx_tx_buffer *buffer; 812 struct efx_tx_buffer *buffer;
813 dma_addr_t unmap_addr;
808 814
809 /* Work backwards until we hit the original insert pointer value */ 815 /* Work backwards until we hit the original insert pointer value */
810 while (tx_queue->insert_count != tx_queue->write_count) { 816 while (tx_queue->insert_count != tx_queue->write_count) {
@@ -814,11 +820,18 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
814 efx_tsoh_free(tx_queue, buffer); 820 efx_tsoh_free(tx_queue, buffer);
815 EFX_BUG_ON_PARANOID(buffer->skb); 821 EFX_BUG_ON_PARANOID(buffer->skb);
816 buffer->len = 0; 822 buffer->len = 0;
817 buffer->continuation = 1; 823 buffer->continuation = true;
818 if (buffer->unmap_len) { 824 if (buffer->unmap_len) {
819 pci_unmap_page(tx_queue->efx->pci_dev, 825 unmap_addr = (buffer->dma_addr + buffer->len -
820 buffer->unmap_addr, 826 buffer->unmap_len);
821 buffer->unmap_len, PCI_DMA_TODEVICE); 827 if (buffer->unmap_single)
828 pci_unmap_single(tx_queue->efx->pci_dev,
829 unmap_addr, buffer->unmap_len,
830 PCI_DMA_TODEVICE);
831 else
832 pci_unmap_page(tx_queue->efx->pci_dev,
833 unmap_addr, buffer->unmap_len,
834 PCI_DMA_TODEVICE);
822 buffer->unmap_len = 0; 835 buffer->unmap_len = 0;
823 } 836 }
824 } 837 }
@@ -826,50 +839,57 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
826 839
827 840
828/* Parse the SKB header and initialise state. */ 841/* Parse the SKB header and initialise state. */
829static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) 842static void tso_start(struct tso_state *st, const struct sk_buff *skb)
830{ 843{
831 /* All ethernet/IP/TCP headers combined size is TCP header size 844 /* All ethernet/IP/TCP headers combined size is TCP header size
832 * plus offset of TCP header relative to start of packet. 845 * plus offset of TCP header relative to start of packet.
833 */ 846 */
834 st->p.header_length = ((tcp_hdr(skb)->doff << 2u) 847 st->header_len = ((tcp_hdr(skb)->doff << 2u)
835 + PTR_DIFF(tcp_hdr(skb), skb->data)); 848 + PTR_DIFF(tcp_hdr(skb), skb->data));
836 st->p.full_packet_size = (st->p.header_length 849 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
837 + skb_shinfo(skb)->gso_size);
838 850
839 st->p.ipv4_id = ntohs(ip_hdr(skb)->id); 851 st->ipv4_id = ntohs(ip_hdr(skb)->id);
840 st->seqnum = ntohl(tcp_hdr(skb)->seq); 852 st->seqnum = ntohl(tcp_hdr(skb)->seq);
841 853
842 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 854 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
843 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 855 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
844 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 856 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
845 857
846 st->packet_space = st->p.full_packet_size; 858 st->packet_space = st->full_packet_size;
847 st->remaining_len = skb->len - st->p.header_length; 859 st->out_len = skb->len - st->header_len;
860 st->unmap_len = 0;
861 st->unmap_single = false;
848} 862}
849 863
850 864static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
851/** 865 skb_frag_t *frag)
852 * tso_get_fragment - record fragment details and map for DMA
853 * @st: TSO state
854 * @efx: Efx NIC
855 * @data: Pointer to fragment data
856 * @len: Length of fragment
857 *
858 * Record fragment details and map for DMA. Return 0 on success, or
859 * -%ENOMEM if DMA mapping fails.
860 */
861static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
862 int len, struct page *page, int page_off)
863{ 866{
867 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
868 frag->page_offset, frag->size,
869 PCI_DMA_TODEVICE);
870 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
871 st->unmap_single = false;
872 st->unmap_len = frag->size;
873 st->in_len = frag->size;
874 st->dma_addr = st->unmap_addr;
875 return 0;
876 }
877 return -ENOMEM;
878}
864 879
865 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 880static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
866 len, PCI_DMA_TODEVICE); 881 const struct sk_buff *skb)
867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 882{
868 st->ifc.unmap_len = len; 883 int hl = st->header_len;
869 st->ifc.len = len; 884 int len = skb_headlen(skb) - hl;
870 st->ifc.dma_addr = st->ifc.unmap_addr; 885
871 st->ifc.page = page; 886 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
872 st->ifc.page_off = page_off; 887 len, PCI_DMA_TODEVICE);
888 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
889 st->unmap_single = true;
890 st->unmap_len = len;
891 st->in_len = len;
892 st->dma_addr = st->unmap_addr;
873 return 0; 893 return 0;
874 } 894 }
875 return -ENOMEM; 895 return -ENOMEM;
@@ -886,36 +906,45 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
886 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 906 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
887 * space in @tx_queue. 907 * space in @tx_queue.
888 */ 908 */
889static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 909static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
890 const struct sk_buff *skb, 910 const struct sk_buff *skb,
891 struct tso_state *st) 911 struct tso_state *st)
892{ 912{
893 913 struct efx_tx_buffer *buffer;
894 int n, end_of_packet, rc; 914 int n, end_of_packet, rc;
895 915
896 if (st->ifc.len == 0) 916 if (st->in_len == 0)
897 return 0; 917 return 0;
898 if (st->packet_space == 0) 918 if (st->packet_space == 0)
899 return 0; 919 return 0;
900 920
901 EFX_BUG_ON_PARANOID(st->ifc.len <= 0); 921 EFX_BUG_ON_PARANOID(st->in_len <= 0);
902 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 922 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
903 923
904 n = min(st->ifc.len, st->packet_space); 924 n = min(st->in_len, st->packet_space);
905 925
906 st->packet_space -= n; 926 st->packet_space -= n;
907 st->remaining_len -= n; 927 st->out_len -= n;
908 st->ifc.len -= n; 928 st->in_len -= n;
909 st->ifc.page_off += n; 929
910 end_of_packet = st->remaining_len == 0 || st->packet_space == 0; 930 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
911 931 if (likely(rc == 0)) {
912 rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, 932 if (st->out_len == 0)
913 st->remaining_len ? NULL : skb, 933 /* Transfer ownership of the skb */
914 end_of_packet, st->ifc.unmap_addr, 934 buffer->skb = skb;
915 st->ifc.len ? 0 : st->ifc.unmap_len); 935
916 936 end_of_packet = st->out_len == 0 || st->packet_space == 0;
917 st->ifc.dma_addr += n; 937 buffer->continuation = !end_of_packet;
938
939 if (st->in_len == 0) {
940 /* Transfer ownership of the pci mapping */
941 buffer->unmap_len = st->unmap_len;
942 buffer->unmap_single = st->unmap_single;
943 st->unmap_len = 0;
944 }
945 }
918 946
947 st->dma_addr += n;
919 return rc; 948 return rc;
920} 949}
921 950
@@ -929,9 +958,9 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
929 * Generate a new header and prepare for the new packet. Return 0 on 958 * Generate a new header and prepare for the new packet. Return 0 on
930 * success, or -1 if failed to alloc header. 959 * success, or -1 if failed to alloc header.
931 */ 960 */
932static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, 961static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
933 const struct sk_buff *skb, 962 const struct sk_buff *skb,
934 struct tso_state *st) 963 struct tso_state *st)
935{ 964{
936 struct efx_tso_header *tsoh; 965 struct efx_tso_header *tsoh;
937 struct iphdr *tsoh_iph; 966 struct iphdr *tsoh_iph;
@@ -940,7 +969,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
940 u8 *header; 969 u8 *header;
941 970
942 /* Allocate a DMA-mapped header buffer. */ 971 /* Allocate a DMA-mapped header buffer. */
943 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 972 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
944 if (tx_queue->tso_headers_free == NULL) { 973 if (tx_queue->tso_headers_free == NULL) {
945 if (efx_tsoh_block_alloc(tx_queue)) 974 if (efx_tsoh_block_alloc(tx_queue))
946 return -1; 975 return -1;
@@ -951,7 +980,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
951 tsoh->unmap_len = 0; 980 tsoh->unmap_len = 0;
952 } else { 981 } else {
953 tx_queue->tso_long_headers++; 982 tx_queue->tso_long_headers++;
954 tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); 983 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
955 if (unlikely(!tsoh)) 984 if (unlikely(!tsoh))
956 return -1; 985 return -1;
957 } 986 }
@@ -961,33 +990,32 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
961 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 990 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
962 991
963 /* Copy and update the headers. */ 992 /* Copy and update the headers. */
964 memcpy(header, skb->data, st->p.header_length); 993 memcpy(header, skb->data, st->header_len);
965 994
966 tsoh_th->seq = htonl(st->seqnum); 995 tsoh_th->seq = htonl(st->seqnum);
967 st->seqnum += skb_shinfo(skb)->gso_size; 996 st->seqnum += skb_shinfo(skb)->gso_size;
968 if (st->remaining_len > skb_shinfo(skb)->gso_size) { 997 if (st->out_len > skb_shinfo(skb)->gso_size) {
969 /* This packet will not finish the TSO burst. */ 998 /* This packet will not finish the TSO burst. */
970 ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); 999 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
971 tsoh_th->fin = 0; 1000 tsoh_th->fin = 0;
972 tsoh_th->psh = 0; 1001 tsoh_th->psh = 0;
973 } else { 1002 } else {
974 /* This packet will be the last in the TSO burst. */ 1003 /* This packet will be the last in the TSO burst. */
975 ip_length = (st->p.header_length - ETH_HDR_LEN(skb) 1004 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
976 + st->remaining_len);
977 tsoh_th->fin = tcp_hdr(skb)->fin; 1005 tsoh_th->fin = tcp_hdr(skb)->fin;
978 tsoh_th->psh = tcp_hdr(skb)->psh; 1006 tsoh_th->psh = tcp_hdr(skb)->psh;
979 } 1007 }
980 tsoh_iph->tot_len = htons(ip_length); 1008 tsoh_iph->tot_len = htons(ip_length);
981 1009
982 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1010 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
983 tsoh_iph->id = htons(st->p.ipv4_id); 1011 tsoh_iph->id = htons(st->ipv4_id);
984 st->p.ipv4_id++; 1012 st->ipv4_id++;
985 1013
986 st->packet_space = skb_shinfo(skb)->gso_size; 1014 st->packet_space = skb_shinfo(skb)->gso_size;
987 ++tx_queue->tso_packets; 1015 ++tx_queue->tso_packets;
988 1016
989 /* Form a descriptor for this header. */ 1017 /* Form a descriptor for this header. */
990 efx_tso_put_header(tx_queue, tsoh, st->p.header_length); 1018 efx_tso_put_header(tx_queue, tsoh, st->header_len);
991 1019
992 return 0; 1020 return 0;
993} 1021}
@@ -1005,11 +1033,11 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1005 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1033 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1006 */ 1034 */
1007static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1035static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1008 const struct sk_buff *skb) 1036 struct sk_buff *skb)
1009{ 1037{
1038 struct efx_nic *efx = tx_queue->efx;
1010 int frag_i, rc, rc2 = NETDEV_TX_OK; 1039 int frag_i, rc, rc2 = NETDEV_TX_OK;
1011 struct tso_state state; 1040 struct tso_state state;
1012 skb_frag_t *f;
1013 1041
1014 /* Verify TSO is safe - these checks should never fail. */ 1042 /* Verify TSO is safe - these checks should never fail. */
1015 efx_tso_check_safe(skb); 1043 efx_tso_check_safe(skb);
@@ -1021,29 +1049,16 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1021 /* Assume that skb header area contains exactly the headers, and 1049 /* Assume that skb header area contains exactly the headers, and
1022 * all payload is in the frag list. 1050 * all payload is in the frag list.
1023 */ 1051 */
1024 if (skb_headlen(skb) == state.p.header_length) { 1052 if (skb_headlen(skb) == state.header_len) {
1025 /* Grab the first payload fragment. */ 1053 /* Grab the first payload fragment. */
1026 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1054 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1027 frag_i = 0; 1055 frag_i = 0;
1028 f = &skb_shinfo(skb)->frags[frag_i]; 1056 rc = tso_get_fragment(&state, efx,
1029 rc = tso_get_fragment(&state, tx_queue->efx, 1057 skb_shinfo(skb)->frags + frag_i);
1030 f->size, f->page, f->page_offset);
1031 if (rc) 1058 if (rc)
1032 goto mem_err; 1059 goto mem_err;
1033 } else { 1060 } else {
1034 /* It may look like this code fragment assumes that the 1061 rc = tso_get_head_fragment(&state, efx, skb);
1035 * skb->data portion does not cross a page boundary, but
1036 * that is not the case. It is guaranteed to be direct
1037 * mapped memory, and therefore is physically contiguous,
1038 * and so DMA will work fine. kmap_atomic() on this region
1039 * will just return the direct mapping, so that will work
1040 * too.
1041 */
1042 int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
1043 int hl = state.p.header_length;
1044 rc = tso_get_fragment(&state, tx_queue->efx,
1045 skb_headlen(skb) - hl,
1046 virt_to_page(skb->data), page_off + hl);
1047 if (rc) 1062 if (rc)
1048 goto mem_err; 1063 goto mem_err;
1049 frag_i = -1; 1064 frag_i = -1;
@@ -1058,13 +1073,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1058 goto stop; 1073 goto stop;
1059 1074
1060 /* Move onto the next fragment? */ 1075 /* Move onto the next fragment? */
1061 if (state.ifc.len == 0) { 1076 if (state.in_len == 0) {
1062 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1077 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1063 /* End of payload reached. */ 1078 /* End of payload reached. */
1064 break; 1079 break;
1065 f = &skb_shinfo(skb)->frags[frag_i]; 1080 rc = tso_get_fragment(&state, efx,
1066 rc = tso_get_fragment(&state, tx_queue->efx, 1081 skb_shinfo(skb)->frags + frag_i);
1067 f->size, f->page, f->page_offset);
1068 if (rc) 1082 if (rc)
1069 goto mem_err; 1083 goto mem_err;
1070 } 1084 }
@@ -1082,8 +1096,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1082 return NETDEV_TX_OK; 1096 return NETDEV_TX_OK;
1083 1097
1084 mem_err: 1098 mem_err:
1085 EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" 1099 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1086 " error\n");
1087 dev_kfree_skb_any((struct sk_buff *)skb); 1100 dev_kfree_skb_any((struct sk_buff *)skb);
1088 goto unwind; 1101 goto unwind;
1089 1102
@@ -1092,9 +1105,19 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1092 1105
1093 /* Stop the queue if it wasn't stopped before. */ 1106 /* Stop the queue if it wasn't stopped before. */
1094 if (tx_queue->stopped == 1) 1107 if (tx_queue->stopped == 1)
1095 efx_stop_queue(tx_queue->efx); 1108 efx_stop_queue(efx);
1096 1109
1097 unwind: 1110 unwind:
1111 /* Free the DMA mapping we were in the process of writing out */
1112 if (state.unmap_len) {
1113 if (state.unmap_single)
1114 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1115 state.unmap_len, PCI_DMA_TODEVICE);
1116 else
1117 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1118 state.unmap_len, PCI_DMA_TODEVICE);
1119 }
1120
1098 efx_enqueue_unwind(tx_queue); 1121 efx_enqueue_unwind(tx_queue);
1099 return rc2; 1122 return rc2;
1100} 1123}
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
index 1526a73b4b51..5e1cc234e42f 100644
--- a/drivers/net/sfc/tx.h
+++ b/drivers/net/sfc/tx.h
@@ -15,7 +15,7 @@
15 15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18int efx_init_tx_queue(struct efx_tx_queue *tx_queue); 18void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20 20
21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 35ab19c27f8d..fa7b49d69288 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -20,14 +20,10 @@
20 20
21/* XAUI resets if link not detected */ 21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* SNAP frames have TOBE_DISC set */
24#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
25/* RX PCIe double split performance issue */ 23/* RX PCIe double split performance issue */
26#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 24#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
27/* TX pkt parser problem with <= 16 byte TXes */ 25/* TX pkt parser problem with <= 16 byte TXes */
28#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 26#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
29/* XGXS and XAUI reset sequencing in SW */
30#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
31/* Low rate CRC errors require XAUI reset */ 27/* Low rate CRC errors require XAUI reset */
32#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS 28#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
33/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 29/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index f3684ad28887..276151df3a70 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -40,7 +40,7 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
40} 40}
41 41
42struct xfp_phy_data { 42struct xfp_phy_data {
43 int tx_disabled; 43 enum efx_phy_mode phy_mode;
44}; 44};
45 45
46#define XFP_MAX_RESET_TIME 500 46#define XFP_MAX_RESET_TIME 500
@@ -93,7 +93,7 @@ static int xfp_phy_init(struct efx_nic *efx)
93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
94 MDIO_ID_REV(devid)); 94 MDIO_ID_REV(devid));
95 95
96 phy_data->tx_disabled = efx->tx_disabled; 96 phy_data->phy_mode = efx->phy_mode;
97 97
98 rc = xfp_reset_phy(efx); 98 rc = xfp_reset_phy(efx);
99 99
@@ -136,13 +136,14 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
136 struct xfp_phy_data *phy_data = efx->phy_data; 136 struct xfp_phy_data *phy_data = efx->phy_data;
137 137
138 /* Reset the PHY when moving from tx off to tx on */ 138 /* Reset the PHY when moving from tx off to tx on */
139 if (phy_data->tx_disabled && !efx->tx_disabled) 139 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
140 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
140 xfp_reset_phy(efx); 141 xfp_reset_phy(efx);
141 142
142 mdio_clause45_transmit_disable(efx); 143 mdio_clause45_transmit_disable(efx);
143 mdio_clause45_phy_reconfigure(efx); 144 mdio_clause45_phy_reconfigure(efx);
144 145
145 phy_data->tx_disabled = efx->tx_disabled; 146 phy_data->phy_mode = efx->phy_mode;
146 efx->link_up = xfp_link_ok(efx); 147 efx->link_up = xfp_link_ok(efx);
147 efx->link_options = GM_LPA_10000FULL; 148 efx->link_options = GM_LPA_10000FULL;
148} 149}
@@ -151,7 +152,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
151static void xfp_phy_fini(struct efx_nic *efx) 152static void xfp_phy_fini(struct efx_nic *efx)
152{ 153{
153 /* Clobber the LED if it was blinking */ 154 /* Clobber the LED if it was blinking */
154 efx->board_info.blink(efx, 0); 155 efx->board_info.blink(efx, false);
155 156
156 /* Free the context block */ 157 /* Free the context block */
157 kfree(efx->phy_data); 158 kfree(efx->phy_data);
@@ -164,7 +165,6 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
164 .check_hw = xfp_phy_check_hw, 165 .check_hw = xfp_phy_check_hw,
165 .fini = xfp_phy_fini, 166 .fini = xfp_phy_fini,
166 .clear_interrupt = xfp_phy_clear_interrupt, 167 .clear_interrupt = xfp_phy_clear_interrupt,
167 .reset_xaui = efx_port_dummy_op_void,
168 .mmds = XFP_REQUIRED_DEVS, 168 .mmds = XFP_REQUIRED_DEVS,
169 .loopbacks = XFP_LOOPBACKS, 169 .loopbacks = XFP_LOOPBACKS,
170}; 170};
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index ea85de918233..79e665e0853d 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -44,17 +44,10 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
44 int set, int local); 44 int set, int local);
45static int port_to_mib(struct s_smc *smc, int p); 45static int port_to_mib(struct s_smc *smc, int p);
46 46
47#define MOFFSS(e) ((int)&(((struct fddi_mib *)0)->e)) 47#define MOFFSS(e) offsetof(struct fddi_mib, e)
48#define MOFFSA(e) ((int) (((struct fddi_mib *)0)->e)) 48#define MOFFMS(e) offsetof(struct fddi_mib_m, e)
49 49#define MOFFAS(e) offsetof(struct fddi_mib_a, e)
50#define MOFFMS(e) ((int)&(((struct fddi_mib_m *)0)->e)) 50#define MOFFPS(e) offsetof(struct fddi_mib_p, e)
51#define MOFFMA(e) ((int) (((struct fddi_mib_m *)0)->e))
52
53#define MOFFAS(e) ((int)&(((struct fddi_mib_a *)0)->e))
54#define MOFFAA(e) ((int) (((struct fddi_mib_a *)0)->e))
55
56#define MOFFPS(e) ((int)&(((struct fddi_mib_p *)0)->e))
57#define MOFFPA(e) ((int) (((struct fddi_mib_p *)0)->e))
58 51
59 52
60#define AC_G 0x01 /* Get */ 53#define AC_G 0x01 /* Get */
@@ -87,8 +80,8 @@ static const struct s_p_tab {
87 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } , 80 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } ,
88 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } , 81 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } ,
89 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } , 82 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } ,
90 { SMT_P1010,AC_G, MOFFSA(fddiSMTManufacturerData), "D" } , 83 { SMT_P1010,AC_G, MOFFSS(fddiSMTManufacturerData), "D" } ,
91 { SMT_P1011,AC_GR, MOFFSA(fddiSMTUserData), "D" } , 84 { SMT_P1011,AC_GR, MOFFSS(fddiSMTUserData), "D" } ,
92 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } , 85 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } ,
93 86
94 /* StationConfigGrp */ 87 /* StationConfigGrp */
@@ -103,7 +96,7 @@ static const struct s_p_tab {
103 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } , 96 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } ,
104 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } , 97 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } ,
105 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } , 98 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } ,
106 { SMT_P1020,AC_G, MOFFSA(fddiSMTPORTIndexes), "II" } , 99 { SMT_P1020,AC_G, MOFFSS(fddiSMTPORTIndexes), "II" } ,
107 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } , 100 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } ,
108 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } , 101 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } ,
109 102
@@ -117,8 +110,8 @@ static const struct s_p_tab {
117 110
118 /* MIBOperationGrp */ 111 /* MIBOperationGrp */
119 { SMT_P1032,AC_GROUP } , 112 { SMT_P1032,AC_GROUP } ,
120 { SMT_P1033,AC_G, MOFFSA(fddiSMTTimeStamp),"P" } , 113 { SMT_P1033,AC_G, MOFFSS(fddiSMTTimeStamp),"P" } ,
121 { SMT_P1034,AC_G, MOFFSA(fddiSMTTransitionTimeStamp),"P" } , 114 { SMT_P1034,AC_G, MOFFSS(fddiSMTTransitionTimeStamp),"P" } ,
122 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */ 115 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
123 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } , 116 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } ,
124 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } , 117 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } ,
@@ -129,7 +122,7 @@ static const struct s_p_tab {
129 * PRIVATE EXTENSIONS 122 * PRIVATE EXTENSIONS
130 * only accessible locally to get/set passwd 123 * only accessible locally to get/set passwd
131 */ 124 */
132 { SMT_P10F0,AC_GR, MOFFSA(fddiPRPMFPasswd), "8" } , 125 { SMT_P10F0,AC_GR, MOFFSS(fddiPRPMFPasswd), "8" } ,
133 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } , 126 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } ,
134#ifdef ESS 127#ifdef ESS
135 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } , 128 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } ,
@@ -245,7 +238,7 @@ static const struct s_p_tab {
245 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } , 238 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } ,
246 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } , 239 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } ,
247 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } , 240 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } ,
248 { SMT_P4011,AC_GR, MOFFPA(fddiPORTRequestedPaths), "l4" } , 241 { SMT_P4011,AC_GR, MOFFPS(fddiPORTRequestedPaths), "l4" } ,
249 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } , 242 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } ,
250 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } , 243 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } ,
251 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } , 244 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index e24b25ca1c69..3805b9318be7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3732,27 +3732,63 @@ static int sky2_get_eeprom_len(struct net_device *dev)
3732 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 3732 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3733} 3733}
3734 3734
3735static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) 3735static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
3736{ 3736{
3737 u32 val; 3737 unsigned long start = jiffies;
3738 3738
3739 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); 3739 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
3740 /* Can take up to 10.6 ms for write */
3741 if (time_after(jiffies, start + HZ/4)) {
3742 dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
3743 return -ETIMEDOUT;
3744 }
3745 mdelay(1);
3746 }
3740 3747
3741 do { 3748 return 0;
3742 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3749}
3743 } while (!(offset & PCI_VPD_ADDR_F)); 3750
3751static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
3752 u16 offset, size_t length)
3753{
3754 int rc = 0;
3755
3756 while (length > 0) {
3757 u32 val;
3758
3759 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
3760 rc = sky2_vpd_wait(hw, cap, 0);
3761 if (rc)
3762 break;
3744 3763
3745 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); 3764 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
3746 return val; 3765
3766 memcpy(data, &val, min(sizeof(val), length));
3767 offset += sizeof(u32);
3768 data += sizeof(u32);
3769 length -= sizeof(u32);
3770 }
3771
3772 return rc;
3747} 3773}
3748 3774
3749static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) 3775static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
3776 u16 offset, unsigned int length)
3750{ 3777{
3751 sky2_pci_write16(hw, cap + PCI_VPD_DATA, val); 3778 unsigned int i;
3752 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); 3779 int rc = 0;
3753 do { 3780
3754 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3781 for (i = 0; i < length; i += sizeof(u32)) {
3755 } while (offset & PCI_VPD_ADDR_F); 3782 u32 val = *(u32 *)(data + i);
3783
3784 sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
3785 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
3786
3787 rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
3788 if (rc)
3789 break;
3790 }
3791 return rc;
3756} 3792}
3757 3793
3758static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3794static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3760,24 +3796,13 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3760{ 3796{
3761 struct sky2_port *sky2 = netdev_priv(dev); 3797 struct sky2_port *sky2 = netdev_priv(dev);
3762 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3798 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3763 int length = eeprom->len;
3764 u16 offset = eeprom->offset;
3765 3799
3766 if (!cap) 3800 if (!cap)
3767 return -EINVAL; 3801 return -EINVAL;
3768 3802
3769 eeprom->magic = SKY2_EEPROM_MAGIC; 3803 eeprom->magic = SKY2_EEPROM_MAGIC;
3770 3804
3771 while (length > 0) { 3805 return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3772 u32 val = sky2_vpd_read(sky2->hw, cap, offset);
3773 int n = min_t(int, length, sizeof(val));
3774
3775 memcpy(data, &val, n);
3776 length -= n;
3777 data += n;
3778 offset += n;
3779 }
3780 return 0;
3781} 3806}
3782 3807
3783static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3808static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3785,8 +3810,6 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3785{ 3810{
3786 struct sky2_port *sky2 = netdev_priv(dev); 3811 struct sky2_port *sky2 = netdev_priv(dev);
3787 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3812 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3788 int length = eeprom->len;
3789 u16 offset = eeprom->offset;
3790 3813
3791 if (!cap) 3814 if (!cap)
3792 return -EINVAL; 3815 return -EINVAL;
@@ -3794,21 +3817,11 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3794 if (eeprom->magic != SKY2_EEPROM_MAGIC) 3817 if (eeprom->magic != SKY2_EEPROM_MAGIC)
3795 return -EINVAL; 3818 return -EINVAL;
3796 3819
3797 while (length > 0) { 3820 /* Partial writes not supported */
3798 u32 val; 3821 if ((eeprom->offset & 3) || (eeprom->len & 3))
3799 int n = min_t(int, length, sizeof(val)); 3822 return -EINVAL;
3800
3801 if (n < sizeof(val))
3802 val = sky2_vpd_read(sky2->hw, cap, offset);
3803 memcpy(&val, data, n);
3804
3805 sky2_vpd_write(sky2->hw, cap, offset, val);
3806 3823
3807 length -= n; 3824 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3808 data += n;
3809 offset += n;
3810 }
3811 return 0;
3812} 3825}
3813 3826
3814 3827
@@ -4178,6 +4191,69 @@ static int __devinit pci_wake_enabled(struct pci_dev *dev)
4178 return value & PCI_PM_CTRL_PME_ENABLE; 4191 return value & PCI_PM_CTRL_PME_ENABLE;
4179} 4192}
4180 4193
4194/*
4195 * Read and parse the first part of Vital Product Data
4196 */
4197#define VPD_SIZE 128
4198#define VPD_MAGIC 0x82
4199
4200static void __devinit sky2_vpd_info(struct sky2_hw *hw)
4201{
4202 int cap = pci_find_capability(hw->pdev, PCI_CAP_ID_VPD);
4203 const u8 *p;
4204 u8 *vpd_buf = NULL;
4205 u16 len;
4206 static struct vpd_tag {
4207 char tag[2];
4208 char *label;
4209 } vpd_tags[] = {
4210 { "PN", "Part Number" },
4211 { "EC", "Engineering Level" },
4212 { "MN", "Manufacturer" },
4213 };
4214
4215 if (!cap)
4216 goto out;
4217
4218 vpd_buf = kmalloc(VPD_SIZE, GFP_KERNEL);
4219 if (!vpd_buf)
4220 goto out;
4221
4222 if (sky2_vpd_read(hw, cap, vpd_buf, 0, VPD_SIZE))
4223 goto out;
4224
4225 if (vpd_buf[0] != VPD_MAGIC)
4226 goto out;
4227 len = vpd_buf[1];
4228 if (len == 0 || len > VPD_SIZE - 4)
4229 goto out;
4230 p = vpd_buf + 3;
4231 dev_info(&hw->pdev->dev, "%.*s\n", len, p);
4232 p += len;
4233
4234 while (p < vpd_buf + VPD_SIZE - 4) {
4235 int i;
4236
4237 if (!memcmp("RW", p, 2)) /* end marker */
4238 break;
4239
4240 len = p[2];
4241 if (len > (p - vpd_buf) - 4)
4242 break;
4243
4244 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4245 if (!memcmp(vpd_tags[i].tag, p, 2)) {
4246 printk(KERN_DEBUG " %s: %.*s\n",
4247 vpd_tags[i].label, len, p + 3);
4248 break;
4249 }
4250 }
4251 p += len + 3;
4252 }
4253out:
4254 kfree(vpd_buf);
4255}
4256
4181/* This driver supports yukon2 chipset only */ 4257/* This driver supports yukon2 chipset only */
4182static const char *sky2_name(u8 chipid, char *buf, int sz) 4258static const char *sky2_name(u8 chipid, char *buf, int sz)
4183{ 4259{
@@ -4276,13 +4352,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4276 if (err) 4352 if (err)
4277 goto err_out_iounmap; 4353 goto err_out_iounmap;
4278 4354
4279 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-2 %s rev %d\n", 4355 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4280 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 4356 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4281 pdev->irq, sky2_name(hw->chip_id, buf1, sizeof(buf1)),
4282 hw->chip_rev);
4283 4357
4284 sky2_reset(hw); 4358 sky2_reset(hw);
4285 4359
4360 sky2_vpd_info(hw);
4361
4286 dev = sky2_init_netdev(hw, 0, using_dac, wol_default); 4362 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4287 if (!dev) { 4363 if (!dev) {
4288 err = -ENOMEM; 4364 err = -ENOMEM;
@@ -4533,6 +4609,8 @@ static struct pci_driver sky2_driver = {
4533 4609
4534static int __init sky2_init_module(void) 4610static int __init sky2_init_module(void)
4535{ 4611{
4612 pr_info(PFX "driver version " DRV_VERSION "\n");
4613
4536 sky2_debug_init(); 4614 sky2_debug_init();
4537 return pci_register_driver(&sky2_driver); 4615 return pci_register_driver(&sky2_driver);
4538} 4616}
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index c5871624f972..02cc064c2c8b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -183,7 +183,7 @@ static void smc911x_reset(struct net_device *dev)
183 unsigned int reg, timeout=0, resets=1; 183 unsigned int reg, timeout=0, resets=1;
184 unsigned long flags; 184 unsigned long flags;
185 185
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
187 187
188 /* Take out of PM setting first */ 188 /* Take out of PM setting first */
189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { 189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -272,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
272 unsigned mask, cfg, cr; 272 unsigned mask, cfg, cr;
273 unsigned long flags; 273 unsigned long flags;
274 274
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 276
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 277 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 278
@@ -329,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
329 unsigned cr; 329 unsigned cr;
330 unsigned long flags; 330 unsigned long flags;
331 331
332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); 332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
333 333
334 /* Disable IRQ's */ 334 /* Disable IRQ's */
335 SMC_SET_INT_EN(lp, 0); 335 SMC_SET_INT_EN(lp, 0);
@@ -348,7 +348,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
348 struct smc911x_local *lp = netdev_priv(dev); 348 struct smc911x_local *lp = netdev_priv(dev);
349 unsigned int fifo_count, timeout, reg; 349 unsigned int fifo_count, timeout, reg;
350 350
351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); 351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; 352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
353 if (fifo_count <= 4) { 353 if (fifo_count <= 4) {
354 /* Manually dump the packet data */ 354 /* Manually dump the packet data */
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
382 unsigned char *data; 382 unsigned char *data;
383 383
384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
385 dev->name, __FUNCTION__); 385 dev->name, __func__);
386 status = SMC_GET_RX_STS_FIFO(lp); 386 status = SMC_GET_RX_STS_FIFO(lp);
387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
@@ -460,7 +460,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
460 unsigned char *buf; 460 unsigned char *buf;
461 unsigned long flags; 461 unsigned long flags;
462 462
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__); 463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 464 BUG_ON(lp->pending_tx_skb == NULL);
465 465
466 skb = lp->pending_tx_skb; 466 skb = lp->pending_tx_skb;
@@ -524,7 +524,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
524 unsigned long flags; 524 unsigned long flags;
525 525
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __FUNCTION__); 527 dev->name, __func__);
528 528
529 BUG_ON(lp->pending_tx_skb != NULL); 529 BUG_ON(lp->pending_tx_skb != NULL);
530 530
@@ -596,7 +596,7 @@ static void smc911x_tx(struct net_device *dev)
596 unsigned int tx_status; 596 unsigned int tx_status;
597 597
598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
599 dev->name, __FUNCTION__); 599 dev->name, __func__);
600 600
601 /* Collect the TX status */ 601 /* Collect the TX status */
602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { 602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
@@ -647,7 +647,7 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
647 SMC_GET_MII(lp, phyreg, phyaddr, phydata); 647 SMC_GET_MII(lp, phyreg, phyaddr, phydata);
648 648
649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", 649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
650 __FUNCTION__, phyaddr, phyreg, phydata); 650 __func__, phyaddr, phyreg, phydata);
651 return phydata; 651 return phydata;
652} 652}
653 653
@@ -661,7 +661,7 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
661 struct smc911x_local *lp = netdev_priv(dev); 661 struct smc911x_local *lp = netdev_priv(dev);
662 662
663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
664 __FUNCTION__, phyaddr, phyreg, phydata); 664 __func__, phyaddr, phyreg, phydata);
665 665
666 SMC_SET_MII(lp, phyreg, phyaddr, phydata); 666 SMC_SET_MII(lp, phyreg, phyaddr, phydata);
667} 667}
@@ -676,7 +676,7 @@ static void smc911x_phy_detect(struct net_device *dev)
676 int phyaddr; 676 int phyaddr;
677 unsigned int cfg, id1, id2; 677 unsigned int cfg, id1, id2;
678 678
679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
680 680
681 lp->phy_type = 0; 681 lp->phy_type = 0;
682 682
@@ -746,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
746 int phyaddr = lp->mii.phy_id; 746 int phyaddr = lp->mii.phy_id;
747 int bmcr; 747 int bmcr;
748 748
749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
750 750
751 /* Enter Link Disable state */ 751 /* Enter Link Disable state */
752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); 752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -793,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
793 unsigned long flags; 793 unsigned long flags;
794 unsigned int reg; 794 unsigned int reg;
795 795
796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
797 797
798 spin_lock_irqsave(&lp->lock, flags); 798 spin_lock_irqsave(&lp->lock, flags);
799 reg = SMC_GET_PMT_CTRL(lp); 799 reg = SMC_GET_PMT_CTRL(lp);
@@ -852,7 +852,7 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
852 int phyaddr = lp->mii.phy_id; 852 int phyaddr = lp->mii.phy_id;
853 unsigned int bmcr, cr; 853 unsigned int bmcr, cr;
854 854
855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
856 856
857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { 857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
858 /* duplex state has changed */ 858 /* duplex state has changed */
@@ -892,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
892 int status; 892 int status;
893 unsigned long flags; 893 unsigned long flags;
894 894
895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
896 896
897 /* 897 /*
898 * We should not be called if phy_type is zero. 898 * We should not be called if phy_type is zero.
@@ -985,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
985 int phyaddr = lp->mii.phy_id; 985 int phyaddr = lp->mii.phy_id;
986 int status; 986 int status;
987 987
988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
989 989
990 if (lp->phy_type == 0) 990 if (lp->phy_type == 0)
991 return; 991 return;
@@ -1013,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1013 unsigned int rx_overrun=0, cr, pkts; 1013 unsigned int rx_overrun=0, cr, pkts;
1014 unsigned long flags; 1014 unsigned long flags;
1015 1015
1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1017 1017
1018 spin_lock_irqsave(&lp->lock, flags); 1018 spin_lock_irqsave(&lp->lock, flags);
1019 1019
@@ -1174,8 +1174,6 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1174 1174
1175 spin_unlock_irqrestore(&lp->lock, flags); 1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 1176
1177 DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
1178
1179 return IRQ_HANDLED; 1177 return IRQ_HANDLED;
1180} 1178}
1181 1179
@@ -1188,7 +1186,7 @@ smc911x_tx_dma_irq(int dma, void *data)
1188 struct sk_buff *skb = lp->current_tx_skb; 1186 struct sk_buff *skb = lp->current_tx_skb;
1189 unsigned long flags; 1187 unsigned long flags;
1190 1188
1191 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1189 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1192 1190
1193 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); 1191 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
1194 /* Clear the DMA interrupt sources */ 1192 /* Clear the DMA interrupt sources */
@@ -1224,7 +1222,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1224 unsigned long flags; 1222 unsigned long flags;
1225 unsigned int pkts; 1223 unsigned int pkts;
1226 1224
1227 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1225 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1228 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); 1226 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
1229 /* Clear the DMA interrupt sources */ 1227 /* Clear the DMA interrupt sources */
1230 SMC_DMA_ACK_IRQ(dev, dma); 1228 SMC_DMA_ACK_IRQ(dev, dma);
@@ -1272,7 +1270,7 @@ static void smc911x_timeout(struct net_device *dev)
1272 int status, mask; 1270 int status, mask;
1273 unsigned long flags; 1271 unsigned long flags;
1274 1272
1275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1273 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1276 1274
1277 spin_lock_irqsave(&lp->lock, flags); 1275 spin_lock_irqsave(&lp->lock, flags);
1278 status = SMC_GET_INT(lp); 1276 status = SMC_GET_INT(lp);
@@ -1310,7 +1308,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1310 unsigned int mcr, update_multicast = 0; 1308 unsigned int mcr, update_multicast = 0;
1311 unsigned long flags; 1309 unsigned long flags;
1312 1310
1313 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1311 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1314 1312
1315 spin_lock_irqsave(&lp->lock, flags); 1313 spin_lock_irqsave(&lp->lock, flags);
1316 SMC_GET_MAC_CR(lp, mcr); 1314 SMC_GET_MAC_CR(lp, mcr);
@@ -1412,7 +1410,7 @@ smc911x_open(struct net_device *dev)
1412{ 1410{
1413 struct smc911x_local *lp = netdev_priv(dev); 1411 struct smc911x_local *lp = netdev_priv(dev);
1414 1412
1415 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1413 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1416 1414
1417 /* 1415 /*
1418 * Check that the address is valid. If its not, refuse 1416 * Check that the address is valid. If its not, refuse
@@ -1420,7 +1418,7 @@ smc911x_open(struct net_device *dev)
1420 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1418 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1421 */ 1419 */
1422 if (!is_valid_ether_addr(dev->dev_addr)) { 1420 if (!is_valid_ether_addr(dev->dev_addr)) {
1423 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1421 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1424 return -EINVAL; 1422 return -EINVAL;
1425 } 1423 }
1426 1424
@@ -1449,7 +1447,7 @@ static int smc911x_close(struct net_device *dev)
1449{ 1447{
1450 struct smc911x_local *lp = netdev_priv(dev); 1448 struct smc911x_local *lp = netdev_priv(dev);
1451 1449
1452 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1450 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1453 1451
1454 netif_stop_queue(dev); 1452 netif_stop_queue(dev);
1455 netif_carrier_off(dev); 1453 netif_carrier_off(dev);
@@ -1483,7 +1481,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1483 int ret, status; 1481 int ret, status;
1484 unsigned long flags; 1482 unsigned long flags;
1485 1483
1486 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1484 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1487 cmd->maxtxpkt = 1; 1485 cmd->maxtxpkt = 1;
1488 cmd->maxrxpkt = 1; 1486 cmd->maxrxpkt = 1;
1489 1487
@@ -1621,7 +1619,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1621 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { 1619 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
1622 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { 1620 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
1623 PRINTK("%s: %s timeout waiting for EEPROM to respond\n", 1621 PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
1624 dev->name, __FUNCTION__); 1622 dev->name, __func__);
1625 return -EFAULT; 1623 return -EFAULT;
1626 } 1624 }
1627 mdelay(1); 1625 mdelay(1);
@@ -1629,7 +1627,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1629 } 1627 }
1630 if (timeout == 0) { 1628 if (timeout == 0) {
1631 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", 1629 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
1632 dev->name, __FUNCTION__); 1630 dev->name, __func__);
1633 return -ETIMEDOUT; 1631 return -ETIMEDOUT;
1634 } 1632 }
1635 return 0; 1633 return 0;
@@ -1742,7 +1740,7 @@ static int __init smc911x_findirq(struct net_device *dev)
1742 int timeout = 20; 1740 int timeout = 20;
1743 unsigned long cookie; 1741 unsigned long cookie;
1744 1742
1745 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 1743 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
1746 1744
1747 cookie = probe_irq_on(); 1745 cookie = probe_irq_on();
1748 1746
@@ -1808,7 +1806,7 @@ static int __init smc911x_probe(struct net_device *dev)
1808 const char *version_string; 1806 const char *version_string;
1809 unsigned long irq_flags; 1807 unsigned long irq_flags;
1810 1808
1811 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1809 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1812 1810
1813 /* First, see if the endian word is recognized */ 1811 /* First, see if the endian word is recognized */
1814 val = SMC_GET_BYTE_TEST(lp); 1812 val = SMC_GET_BYTE_TEST(lp);
@@ -2058,7 +2056,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2058 unsigned int *addr; 2056 unsigned int *addr;
2059 int ret; 2057 int ret;
2060 2058
2061 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2059 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2060 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2063 if (!res) { 2061 if (!res) {
2064 ret = -ENODEV; 2062 ret = -ENODEV;
@@ -2129,7 +2127,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2129 struct smc911x_local *lp = netdev_priv(ndev); 2127 struct smc911x_local *lp = netdev_priv(ndev);
2130 struct resource *res; 2128 struct resource *res;
2131 2129
2132 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2130 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2133 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
2134 2132
2135 unregister_netdev(ndev); 2133 unregister_netdev(ndev);
@@ -2159,7 +2157,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
2159 struct net_device *ndev = platform_get_drvdata(dev); 2157 struct net_device *ndev = platform_get_drvdata(dev);
2160 struct smc911x_local *lp = netdev_priv(ndev); 2158 struct smc911x_local *lp = netdev_priv(ndev);
2161 2159
2162 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2160 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2163 if (ndev) { 2161 if (ndev) {
2164 if (netif_running(ndev)) { 2162 if (netif_running(ndev)) {
2165 netif_device_detach(ndev); 2163 netif_device_detach(ndev);
@@ -2177,7 +2175,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2177{ 2175{
2178 struct net_device *ndev = platform_get_drvdata(dev); 2176 struct net_device *ndev = platform_get_drvdata(dev);
2179 2177
2180 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2178 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2181 if (ndev) { 2179 if (ndev) {
2182 struct smc911x_local *lp = netdev_priv(ndev); 2180 struct smc911x_local *lp = netdev_priv(ndev);
2183 2181
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 24768c10cadb..ef5ce8845c9d 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -270,7 +270,7 @@ static void smc_reset(struct net_device *dev)
270 unsigned int ctl, cfg; 270 unsigned int ctl, cfg;
271 struct sk_buff *pending_skb; 271 struct sk_buff *pending_skb;
272 272
273 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 273 DBG(2, "%s: %s\n", dev->name, __func__);
274 274
275 /* Disable all interrupts, block TX tasklet */ 275 /* Disable all interrupts, block TX tasklet */
276 spin_lock_irq(&lp->lock); 276 spin_lock_irq(&lp->lock);
@@ -363,7 +363,7 @@ static void smc_enable(struct net_device *dev)
363 void __iomem *ioaddr = lp->base; 363 void __iomem *ioaddr = lp->base;
364 int mask; 364 int mask;
365 365
366 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 366 DBG(2, "%s: %s\n", dev->name, __func__);
367 367
368 /* see the header file for options in TCR/RCR DEFAULT */ 368 /* see the header file for options in TCR/RCR DEFAULT */
369 SMC_SELECT_BANK(lp, 0); 369 SMC_SELECT_BANK(lp, 0);
@@ -397,7 +397,7 @@ static void smc_shutdown(struct net_device *dev)
397 void __iomem *ioaddr = lp->base; 397 void __iomem *ioaddr = lp->base;
398 struct sk_buff *pending_skb; 398 struct sk_buff *pending_skb;
399 399
400 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 400 DBG(2, "%s: %s\n", CARDNAME, __func__);
401 401
402 /* no more interrupts for me */ 402 /* no more interrupts for me */
403 spin_lock_irq(&lp->lock); 403 spin_lock_irq(&lp->lock);
@@ -430,7 +430,7 @@ static inline void smc_rcv(struct net_device *dev)
430 void __iomem *ioaddr = lp->base; 430 void __iomem *ioaddr = lp->base;
431 unsigned int packet_number, status, packet_len; 431 unsigned int packet_number, status, packet_len;
432 432
433 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 433 DBG(3, "%s: %s\n", dev->name, __func__);
434 434
435 packet_number = SMC_GET_RXFIFO(lp); 435 packet_number = SMC_GET_RXFIFO(lp);
436 if (unlikely(packet_number & RXFIFO_REMPTY)) { 436 if (unlikely(packet_number & RXFIFO_REMPTY)) {
@@ -577,7 +577,7 @@ static void smc_hardware_send_pkt(unsigned long data)
577 unsigned int packet_no, len; 577 unsigned int packet_no, len;
578 unsigned char *buf; 578 unsigned char *buf;
579 579
580 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 580 DBG(3, "%s: %s\n", dev->name, __func__);
581 581
582 if (!smc_special_trylock(&lp->lock)) { 582 if (!smc_special_trylock(&lp->lock)) {
583 netif_stop_queue(dev); 583 netif_stop_queue(dev);
@@ -662,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 void __iomem *ioaddr = lp->base; 662 void __iomem *ioaddr = lp->base;
663 unsigned int numPages, poll_count, status; 663 unsigned int numPages, poll_count, status;
664 664
665 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 665 DBG(3, "%s: %s\n", dev->name, __func__);
666 666
667 BUG_ON(lp->pending_tx_skb != NULL); 667 BUG_ON(lp->pending_tx_skb != NULL);
668 668
@@ -734,7 +734,7 @@ static void smc_tx(struct net_device *dev)
734 void __iomem *ioaddr = lp->base; 734 void __iomem *ioaddr = lp->base;
735 unsigned int saved_packet, packet_no, tx_status, pkt_len; 735 unsigned int saved_packet, packet_no, tx_status, pkt_len;
736 736
737 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 737 DBG(3, "%s: %s\n", dev->name, __func__);
738 738
739 /* If the TX FIFO is empty then nothing to do */ 739 /* If the TX FIFO is empty then nothing to do */
740 packet_no = SMC_GET_TXFIFO(lp); 740 packet_no = SMC_GET_TXFIFO(lp);
@@ -856,7 +856,7 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
857 857
858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
859 __FUNCTION__, phyaddr, phyreg, phydata); 859 __func__, phyaddr, phyreg, phydata);
860 860
861 SMC_SELECT_BANK(lp, 2); 861 SMC_SELECT_BANK(lp, 2);
862 return phydata; 862 return phydata;
@@ -883,7 +883,7 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
884 884
885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
886 __FUNCTION__, phyaddr, phyreg, phydata); 886 __func__, phyaddr, phyreg, phydata);
887 887
888 SMC_SELECT_BANK(lp, 2); 888 SMC_SELECT_BANK(lp, 2);
889} 889}
@@ -896,7 +896,7 @@ static void smc_phy_detect(struct net_device *dev)
896 struct smc_local *lp = netdev_priv(dev); 896 struct smc_local *lp = netdev_priv(dev);
897 int phyaddr; 897 int phyaddr;
898 898
899 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 899 DBG(2, "%s: %s\n", dev->name, __func__);
900 900
901 lp->phy_type = 0; 901 lp->phy_type = 0;
902 902
@@ -935,7 +935,7 @@ static int smc_phy_fixed(struct net_device *dev)
935 int phyaddr = lp->mii.phy_id; 935 int phyaddr = lp->mii.phy_id;
936 int bmcr, cfg1; 936 int bmcr, cfg1;
937 937
938 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 938 DBG(3, "%s: %s\n", dev->name, __func__);
939 939
940 /* Enter Link Disable state */ 940 /* Enter Link Disable state */
941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); 941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1168,7 +1168,7 @@ static void smc_phy_interrupt(struct net_device *dev)
1168 int phyaddr = lp->mii.phy_id; 1168 int phyaddr = lp->mii.phy_id;
1169 int phy18; 1169 int phy18;
1170 1170
1171 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1171 DBG(2, "%s: %s\n", dev->name, __func__);
1172 1172
1173 if (lp->phy_type == 0) 1173 if (lp->phy_type == 0)
1174 return; 1174 return;
@@ -1236,7 +1236,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1236 int status, mask, timeout, card_stats; 1236 int status, mask, timeout, card_stats;
1237 int saved_pointer; 1237 int saved_pointer;
1238 1238
1239 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 1239 DBG(3, "%s: %s\n", dev->name, __func__);
1240 1240
1241 spin_lock(&lp->lock); 1241 spin_lock(&lp->lock);
1242 1242
@@ -1358,7 +1358,7 @@ static void smc_timeout(struct net_device *dev)
1358 void __iomem *ioaddr = lp->base; 1358 void __iomem *ioaddr = lp->base;
1359 int status, mask, eph_st, meminfo, fifo; 1359 int status, mask, eph_st, meminfo, fifo;
1360 1360
1361 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1361 DBG(2, "%s: %s\n", dev->name, __func__);
1362 1362
1363 spin_lock_irq(&lp->lock); 1363 spin_lock_irq(&lp->lock);
1364 status = SMC_GET_INT(lp); 1364 status = SMC_GET_INT(lp);
@@ -1402,7 +1402,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1402 unsigned char multicast_table[8]; 1402 unsigned char multicast_table[8];
1403 int update_multicast = 0; 1403 int update_multicast = 0;
1404 1404
1405 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1405 DBG(2, "%s: %s\n", dev->name, __func__);
1406 1406
1407 if (dev->flags & IFF_PROMISC) { 1407 if (dev->flags & IFF_PROMISC) {
1408 DBG(2, "%s: RCR_PRMS\n", dev->name); 1408 DBG(2, "%s: RCR_PRMS\n", dev->name);
@@ -1505,7 +1505,7 @@ smc_open(struct net_device *dev)
1505{ 1505{
1506 struct smc_local *lp = netdev_priv(dev); 1506 struct smc_local *lp = netdev_priv(dev);
1507 1507
1508 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1508 DBG(2, "%s: %s\n", dev->name, __func__);
1509 1509
1510 /* 1510 /*
1511 * Check that the address is valid. If its not, refuse 1511 * Check that the address is valid. If its not, refuse
@@ -1513,7 +1513,7 @@ smc_open(struct net_device *dev)
1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1514 */ 1514 */
1515 if (!is_valid_ether_addr(dev->dev_addr)) { 1515 if (!is_valid_ether_addr(dev->dev_addr)) {
1516 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1516 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1517 return -EINVAL; 1517 return -EINVAL;
1518 } 1518 }
1519 1519
@@ -1557,7 +1557,7 @@ static int smc_close(struct net_device *dev)
1557{ 1557{
1558 struct smc_local *lp = netdev_priv(dev); 1558 struct smc_local *lp = netdev_priv(dev);
1559 1559
1560 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1560 DBG(2, "%s: %s\n", dev->name, __func__);
1561 1561
1562 netif_stop_queue(dev); 1562 netif_stop_queue(dev);
1563 netif_carrier_off(dev); 1563 netif_carrier_off(dev);
@@ -1700,7 +1700,7 @@ static int __init smc_findirq(struct smc_local *lp)
1700 int timeout = 20; 1700 int timeout = 20;
1701 unsigned long cookie; 1701 unsigned long cookie;
1702 1702
1703 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1703 DBG(2, "%s: %s\n", CARDNAME, __func__);
1704 1704
1705 cookie = probe_irq_on(); 1705 cookie = probe_irq_on();
1706 1706
@@ -1778,7 +1778,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1778 const char *version_string; 1778 const char *version_string;
1779 DECLARE_MAC_BUF(mac); 1779 DECLARE_MAC_BUF(mac);
1780 1780
1781 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1781 DBG(2, "%s: %s\n", CARDNAME, __func__);
1782 1782
1783 /* First, see if the high byte is 0x33 */ 1783 /* First, see if the high byte is 0x33 */
1784 val = SMC_CURRENT_BANK(lp); 1784 val = SMC_CURRENT_BANK(lp);
@@ -1961,7 +1961,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1961 if (dev->dma != (unsigned char)-1) 1961 if (dev->dma != (unsigned char)-1)
1962 printk(" DMA %d", dev->dma); 1962 printk(" DMA %d", dev->dma);
1963 1963
1964 printk("%s%s\n", nowait ? " [nowait]" : "", 1964 printk("%s%s\n",
1965 lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
1965 THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); 1966 THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
1966 1967
1967 if (!is_valid_ether_addr(dev->dev_addr)) { 1968 if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 997e7f1d5c6e..edea0732f145 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -446,6 +446,8 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
446#define SMC_CAN_USE_32BIT 1 446#define SMC_CAN_USE_32BIT 1
447#define SMC_NOWAIT 1 447#define SMC_NOWAIT 1
448 448
449#define SMC_IO_SHIFT (lp->io_shift)
450
449#define SMC_inb(a, r) readb((a) + (r)) 451#define SMC_inb(a, r) readb((a) + (r))
450#define SMC_inw(a, r) readw((a) + (r)) 452#define SMC_inw(a, r) readw((a) + (r))
451#define SMC_inl(a, r) readl((a) + (r)) 453#define SMC_inl(a, r) readl((a) + (r))
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 7d5561b8241c..f860ea150395 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -409,6 +409,7 @@ static int change_mtu(struct net_device *dev, int new_mtu);
409static int eeprom_read(void __iomem *ioaddr, int location); 409static int eeprom_read(void __iomem *ioaddr, int location);
410static int mdio_read(struct net_device *dev, int phy_id, int location); 410static int mdio_read(struct net_device *dev, int phy_id, int location);
411static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 411static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412static int mdio_wait_link(struct net_device *dev, int wait);
412static int netdev_open(struct net_device *dev); 413static int netdev_open(struct net_device *dev);
413static void check_duplex(struct net_device *dev); 414static void check_duplex(struct net_device *dev);
414static void netdev_timer(unsigned long data); 415static void netdev_timer(unsigned long data);
@@ -785,6 +786,24 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
785 return; 786 return;
786} 787}
787 788
789static int mdio_wait_link(struct net_device *dev, int wait)
790{
791 int bmsr;
792 int phy_id;
793 struct netdev_private *np;
794
795 np = netdev_priv(dev);
796 phy_id = np->phys[0];
797
798 do {
799 bmsr = mdio_read(dev, phy_id, MII_BMSR);
800 if (bmsr & 0x0004)
801 return 0;
802 mdelay(1);
803 } while (--wait > 0);
804 return -1;
805}
806
788static int netdev_open(struct net_device *dev) 807static int netdev_open(struct net_device *dev)
789{ 808{
790 struct netdev_private *np = netdev_priv(dev); 809 struct netdev_private *np = netdev_priv(dev);
@@ -1393,41 +1412,51 @@ static void netdev_error(struct net_device *dev, int intr_status)
1393 int speed; 1412 int speed;
1394 1413
1395 if (intr_status & LinkChange) { 1414 if (intr_status & LinkChange) {
1396 if (np->an_enable) { 1415 if (mdio_wait_link(dev, 10) == 0) {
1397 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE); 1416 printk(KERN_INFO "%s: Link up\n", dev->name);
1398 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA); 1417 if (np->an_enable) {
1399 mii_advertise &= mii_lpa; 1418 mii_advertise = mdio_read(dev, np->phys[0],
1400 printk (KERN_INFO "%s: Link changed: ", dev->name); 1419 MII_ADVERTISE);
1401 if (mii_advertise & ADVERTISE_100FULL) { 1420 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1402 np->speed = 100; 1421 mii_advertise &= mii_lpa;
1403 printk ("100Mbps, full duplex\n"); 1422 printk(KERN_INFO "%s: Link changed: ",
1404 } else if (mii_advertise & ADVERTISE_100HALF) { 1423 dev->name);
1405 np->speed = 100; 1424 if (mii_advertise & ADVERTISE_100FULL) {
1406 printk ("100Mbps, half duplex\n"); 1425 np->speed = 100;
1407 } else if (mii_advertise & ADVERTISE_10FULL) { 1426 printk("100Mbps, full duplex\n");
1408 np->speed = 10; 1427 } else if (mii_advertise & ADVERTISE_100HALF) {
1409 printk ("10Mbps, full duplex\n"); 1428 np->speed = 100;
1410 } else if (mii_advertise & ADVERTISE_10HALF) { 1429 printk("100Mbps, half duplex\n");
1411 np->speed = 10; 1430 } else if (mii_advertise & ADVERTISE_10FULL) {
1412 printk ("10Mbps, half duplex\n"); 1431 np->speed = 10;
1413 } else 1432 printk("10Mbps, full duplex\n");
1414 printk ("\n"); 1433 } else if (mii_advertise & ADVERTISE_10HALF) {
1434 np->speed = 10;
1435 printk("10Mbps, half duplex\n");
1436 } else
1437 printk("\n");
1415 1438
1439 } else {
1440 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1441 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1442 np->speed = speed;
1443 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1444 dev->name, speed);
1445 printk("%s duplex.\n",
1446 (mii_ctl & BMCR_FULLDPLX) ?
1447 "full" : "half");
1448 }
1449 check_duplex(dev);
1450 if (np->flowctrl && np->mii_if.full_duplex) {
1451 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1452 ioaddr + MulticastFilter1+2);
1453 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1454 ioaddr + MACCtrl0);
1455 }
1456 netif_carrier_on(dev);
1416 } else { 1457 } else {
1417 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR); 1458 printk(KERN_INFO "%s: Link down\n", dev->name);
1418 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; 1459 netif_carrier_off(dev);
1419 np->speed = speed;
1420 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1421 dev->name, speed);
1422 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1423 "full" : "half");
1424 }
1425 check_duplex (dev);
1426 if (np->flowctrl && np->mii_if.full_duplex) {
1427 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428 ioaddr + MulticastFilter1+2);
1429 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1430 ioaddr + MACCtrl0);
1431 } 1460 }
1432 } 1461 }
1433 if (intr_status & StatsMax) { 1462 if (intr_status & StatsMax) {
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 7db48f1cd949..efaf84d9757d 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -539,22 +539,22 @@ struct txd_desc {
539 539
540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) 540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
541#define DBG2(fmt, args...) \ 541#define DBG2(fmt, args...) \
542 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 542 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
543 543
544#define BDX_ASSERT(x) BUG_ON(x) 544#define BDX_ASSERT(x) BUG_ON(x)
545 545
546#ifdef DEBUG 546#ifdef DEBUG
547 547
548#define ENTER do { \ 548#define ENTER do { \
549 printk(KERN_ERR "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \ 549 printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
550} while (0) 550} while (0)
551 551
552#define RET(args...) do { \ 552#define RET(args...) do { \
553 printk(KERN_ERR "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \ 553 printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
554return args; } while (0) 554return args; } while (0)
555 555
556#define DBG(fmt, args...) \ 556#define DBG(fmt, args...) \
557 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 557 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
558#else 558#else
559#define ENTER do { } while (0) 559#define ENTER do { } while (0)
560#define RET(args...) return args 560#define RET(args...) return args
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 71d2c5cfdad9..123920759efd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3861,10 +3861,7 @@ static void tg3_tx(struct tg3 *tp)
3861 return; 3861 return;
3862 } 3862 }
3863 3863
3864 pci_unmap_single(tp->pdev, 3864 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3865 pci_unmap_addr(ri, mapping),
3866 skb_headlen(skb),
3867 PCI_DMA_TODEVICE);
3868 3865
3869 ri->skb = NULL; 3866 ri->skb = NULL;
3870 3867
@@ -3874,12 +3871,6 @@ static void tg3_tx(struct tg3 *tp)
3874 ri = &tp->tx_buffers[sw_idx]; 3871 ri = &tp->tx_buffers[sw_idx];
3875 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 3872 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3876 tx_bug = 1; 3873 tx_bug = 1;
3877
3878 pci_unmap_page(tp->pdev,
3879 pci_unmap_addr(ri, mapping),
3880 skb_shinfo(skb)->frags[i].size,
3881 PCI_DMA_TODEVICE);
3882
3883 sw_idx = NEXT_TX(sw_idx); 3874 sw_idx = NEXT_TX(sw_idx);
3884 } 3875 }
3885 3876
@@ -4633,12 +4624,16 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4633 } else { 4624 } else {
4634 /* New SKB is guaranteed to be linear. */ 4625 /* New SKB is guaranteed to be linear. */
4635 entry = *start; 4626 entry = *start;
4636 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 4627 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4637 PCI_DMA_TODEVICE); 4628 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4629
4638 /* Make sure new skb does not cross any 4G boundaries. 4630 /* Make sure new skb does not cross any 4G boundaries.
4639 * Drop the packet if it does. 4631 * Drop the packet if it does.
4640 */ 4632 */
4641 if (tg3_4g_overflow_test(new_addr, new_skb->len)) { 4633 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4634 if (!ret)
4635 skb_dma_unmap(&tp->pdev->dev, new_skb,
4636 DMA_TO_DEVICE);
4642 ret = -1; 4637 ret = -1;
4643 dev_kfree_skb(new_skb); 4638 dev_kfree_skb(new_skb);
4644 new_skb = NULL; 4639 new_skb = NULL;
@@ -4652,18 +4647,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4652 /* Now clean up the sw ring entries. */ 4647 /* Now clean up the sw ring entries. */
4653 i = 0; 4648 i = 0;
4654 while (entry != last_plus_one) { 4649 while (entry != last_plus_one) {
4655 int len;
4656
4657 if (i == 0)
4658 len = skb_headlen(skb);
4659 else
4660 len = skb_shinfo(skb)->frags[i-1].size;
4661 pci_unmap_single(tp->pdev,
4662 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4663 len, PCI_DMA_TODEVICE);
4664 if (i == 0) { 4650 if (i == 0) {
4665 tp->tx_buffers[entry].skb = new_skb; 4651 tp->tx_buffers[entry].skb = new_skb;
4666 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4667 } else { 4652 } else {
4668 tp->tx_buffers[entry].skb = NULL; 4653 tp->tx_buffers[entry].skb = NULL;
4669 } 4654 }
@@ -4671,6 +4656,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4671 i++; 4656 i++;
4672 } 4657 }
4673 4658
4659 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4674 dev_kfree_skb(skb); 4660 dev_kfree_skb(skb);
4675 4661
4676 return ret; 4662 return ret;
@@ -4705,8 +4691,9 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
4705static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 4691static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4706{ 4692{
4707 struct tg3 *tp = netdev_priv(dev); 4693 struct tg3 *tp = netdev_priv(dev);
4708 dma_addr_t mapping;
4709 u32 len, entry, base_flags, mss; 4694 u32 len, entry, base_flags, mss;
4695 struct skb_shared_info *sp;
4696 dma_addr_t mapping;
4710 4697
4711 len = skb_headlen(skb); 4698 len = skb_headlen(skb);
4712 4699
@@ -4765,11 +4752,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4765 (vlan_tx_tag_get(skb) << 16)); 4752 (vlan_tx_tag_get(skb) << 16));
4766#endif 4753#endif
4767 4754
4768 /* Queue skb data, a.k.a. the main skb fragment. */ 4755 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4769 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 4756 dev_kfree_skb(skb);
4757 goto out_unlock;
4758 }
4759
4760 sp = skb_shinfo(skb);
4761
4762 mapping = sp->dma_maps[0];
4770 4763
4771 tp->tx_buffers[entry].skb = skb; 4764 tp->tx_buffers[entry].skb = skb;
4772 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4773 4765
4774 tg3_set_txd(tp, entry, mapping, len, base_flags, 4766 tg3_set_txd(tp, entry, mapping, len, base_flags,
4775 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 4767 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -4785,13 +4777,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4785 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4786 4778
4787 len = frag->size; 4779 len = frag->size;
4788 mapping = pci_map_page(tp->pdev, 4780 mapping = sp->dma_maps[i + 1];
4789 frag->page,
4790 frag->page_offset,
4791 len, PCI_DMA_TODEVICE);
4792
4793 tp->tx_buffers[entry].skb = NULL; 4781 tp->tx_buffers[entry].skb = NULL;
4794 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4795 4782
4796 tg3_set_txd(tp, entry, mapping, len, 4783 tg3_set_txd(tp, entry, mapping, len,
4797 base_flags, (i == last) | (mss << 1)); 4784 base_flags, (i == last) | (mss << 1));
@@ -4859,9 +4846,10 @@ tg3_tso_bug_end:
4859static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) 4846static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4860{ 4847{
4861 struct tg3 *tp = netdev_priv(dev); 4848 struct tg3 *tp = netdev_priv(dev);
4862 dma_addr_t mapping;
4863 u32 len, entry, base_flags, mss; 4849 u32 len, entry, base_flags, mss;
4850 struct skb_shared_info *sp;
4864 int would_hit_hwbug; 4851 int would_hit_hwbug;
4852 dma_addr_t mapping;
4865 4853
4866 len = skb_headlen(skb); 4854 len = skb_headlen(skb);
4867 4855
@@ -4942,11 +4930,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4942 (vlan_tx_tag_get(skb) << 16)); 4930 (vlan_tx_tag_get(skb) << 16));
4943#endif 4931#endif
4944 4932
4945 /* Queue skb data, a.k.a. the main skb fragment. */ 4933 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4946 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 4934 dev_kfree_skb(skb);
4935 goto out_unlock;
4936 }
4937
4938 sp = skb_shinfo(skb);
4939
4940 mapping = sp->dma_maps[0];
4947 4941
4948 tp->tx_buffers[entry].skb = skb; 4942 tp->tx_buffers[entry].skb = skb;
4949 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4950 4943
4951 would_hit_hwbug = 0; 4944 would_hit_hwbug = 0;
4952 4945
@@ -4969,13 +4962,9 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4969 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4962 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970 4963
4971 len = frag->size; 4964 len = frag->size;
4972 mapping = pci_map_page(tp->pdev, 4965 mapping = sp->dma_maps[i + 1];
4973 frag->page,
4974 frag->page_offset,
4975 len, PCI_DMA_TODEVICE);
4976 4966
4977 tp->tx_buffers[entry].skb = NULL; 4967 tp->tx_buffers[entry].skb = NULL;
4978 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4979 4968
4980 if (tg3_4g_overflow_test(mapping, len)) 4969 if (tg3_4g_overflow_test(mapping, len))
4981 would_hit_hwbug = 1; 4970 would_hit_hwbug = 1;
@@ -5128,7 +5117,6 @@ static void tg3_free_rings(struct tg3 *tp)
5128 for (i = 0; i < TG3_TX_RING_SIZE; ) { 5117 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5129 struct tx_ring_info *txp; 5118 struct tx_ring_info *txp;
5130 struct sk_buff *skb; 5119 struct sk_buff *skb;
5131 int j;
5132 5120
5133 txp = &tp->tx_buffers[i]; 5121 txp = &tp->tx_buffers[i];
5134 skb = txp->skb; 5122 skb = txp->skb;
@@ -5138,22 +5126,11 @@ static void tg3_free_rings(struct tg3 *tp)
5138 continue; 5126 continue;
5139 } 5127 }
5140 5128
5141 pci_unmap_single(tp->pdev, 5129 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5142 pci_unmap_addr(txp, mapping),
5143 skb_headlen(skb),
5144 PCI_DMA_TODEVICE);
5145 txp->skb = NULL;
5146 5130
5147 i++; 5131 txp->skb = NULL;
5148 5132
5149 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { 5133 i += skb_shinfo(skb)->nr_frags + 1;
5150 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
5151 pci_unmap_page(tp->pdev,
5152 pci_unmap_addr(txp, mapping),
5153 skb_shinfo(skb)->frags[j].size,
5154 PCI_DMA_TODEVICE);
5155 i++;
5156 }
5157 5134
5158 dev_kfree_skb_any(skb); 5135 dev_kfree_skb_any(skb);
5159 } 5136 }
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f5b8cab8d4b5..6c7b5e303dbb 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2197,7 +2197,6 @@ struct ring_info {
2197 2197
2198struct tx_ring_info { 2198struct tx_ring_info {
2199 struct sk_buff *skb; 2199 struct sk_buff *skb;
2200 DECLARE_PCI_UNMAP_ADDR(mapping)
2201 u32 prev_vlan_tag; 2200 u32 prev_vlan_tag;
2202}; 2201};
2203 2202
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 43fde99b24ac..eb1da6f0b086 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
263 return; 263 return;
264 udelay(10); 264 udelay(10);
265 } 265 }
266 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 266 printk(KERN_ERR "%s function time out \n", __func__);
267} 267}
268 268
269static int mii_speed(struct mii_if_info *mii) 269static int mii_speed(struct mii_if_info *mii)
@@ -1059,7 +1059,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
1059 return; 1059 return;
1060 udelay(10); 1060 udelay(10);
1061 } 1061 }
1062 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1062 printk(KERN_ERR "%s function time out \n", __func__);
1063} 1063}
1064 1064
1065static void tsi108_reset_ether(struct tsi108_prv_data * data) 1065static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1244,7 +1244,7 @@ static void tsi108_init_phy(struct net_device *dev)
1244 udelay(10); 1244 udelay(10);
1245 } 1245 }
1246 if (i == 0) 1246 if (i == 0)
1247 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1247 printk(KERN_ERR "%s function time out \n", __func__);
1248 1248
1249 if (data->phy_type == TSI108_PHY_BCM54XX) { 1249 if (data->phy_type == TSI108_PHY_BCM54XX) {
1250 tsi108_write_mii(data, 0x09, 0x0300); 1250 tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 9281d06d5aaa..f54c45049d50 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1418,7 +1418,6 @@ static int de_close (struct net_device *dev)
1418 1418
1419 de_free_rings(de); 1419 de_free_rings(de);
1420 de_adapter_sleep(de); 1420 de_adapter_sleep(de);
1421 pci_disable_device(de->pdev);
1422 return 0; 1421 return 0;
1423} 1422}
1424 1423
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 617ef41bdfea..6444cbec0bdc 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -832,7 +832,7 @@ struct de4x5_private {
832 s32 csr14; /* Saved SIA TX/RX Register */ 832 s32 csr14; /* Saved SIA TX/RX Register */
833 s32 csr15; /* Saved SIA General Register */ 833 s32 csr15; /* Saved SIA General Register */
834 int save_cnt; /* Flag if state already saved */ 834 int save_cnt; /* Flag if state already saved */
835 struct sk_buff *skb; /* Save the (re-ordered) skb's */ 835 struct sk_buff_head queue; /* Save the (re-ordered) skb's */
836 } cache; 836 } cache;
837 struct de4x5_srom srom; /* A copy of the SROM */ 837 struct de4x5_srom srom; /* A copy of the SROM */
838 int cfrv; /* Card CFRV copy */ 838 int cfrv; /* Card CFRV copy */
@@ -1128,6 +1128,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1128 printk(" which has an Ethernet PROM CRC error.\n"); 1128 printk(" which has an Ethernet PROM CRC error.\n");
1129 return -ENXIO; 1129 return -ENXIO;
1130 } else { 1130 } else {
1131 skb_queue_head_init(&lp->cache.queue);
1131 lp->cache.gepc = GEP_INIT; 1132 lp->cache.gepc = GEP_INIT;
1132 lp->asBit = GEP_SLNK; 1133 lp->asBit = GEP_SLNK;
1133 lp->asPolarity = GEP_SLNK; 1134 lp->asPolarity = GEP_SLNK;
@@ -1487,7 +1488,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1487 } 1488 }
1488 } else if (skb->len > 0) { 1489 } else if (skb->len > 0) {
1489 /* If we already have stuff queued locally, use that first */ 1490 /* If we already have stuff queued locally, use that first */
1490 if (lp->cache.skb && !lp->interrupt) { 1491 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1491 de4x5_put_cache(dev, skb); 1492 de4x5_put_cache(dev, skb);
1492 skb = de4x5_get_cache(dev); 1493 skb = de4x5_get_cache(dev);
1493 } 1494 }
@@ -1580,7 +1581,7 @@ de4x5_interrupt(int irq, void *dev_id)
1580 1581
1581 /* Load the TX ring with any locally stored packets */ 1582 /* Load the TX ring with any locally stored packets */
1582 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { 1583 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1583 while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) { 1584 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1584 de4x5_queue_pkt(de4x5_get_cache(dev), dev); 1585 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1585 } 1586 }
1586 lp->cache.lock = 0; 1587 lp->cache.lock = 0;
@@ -3679,11 +3680,7 @@ de4x5_free_tx_buffs(struct net_device *dev)
3679 } 3680 }
3680 3681
3681 /* Unload the locally queued packets */ 3682 /* Unload the locally queued packets */
3682 while (lp->cache.skb) { 3683 __skb_queue_purge(&lp->cache.queue);
3683 dev_kfree_skb(de4x5_get_cache(dev));
3684 }
3685
3686 return;
3687} 3684}
3688 3685
3689/* 3686/*
@@ -3781,43 +3778,24 @@ static void
3781de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) 3778de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3782{ 3779{
3783 struct de4x5_private *lp = netdev_priv(dev); 3780 struct de4x5_private *lp = netdev_priv(dev);
3784 struct sk_buff *p;
3785
3786 if (lp->cache.skb) {
3787 for (p=lp->cache.skb; p->next; p=p->next);
3788 p->next = skb;
3789 } else {
3790 lp->cache.skb = skb;
3791 }
3792 skb->next = NULL;
3793 3781
3794 return; 3782 __skb_queue_tail(&lp->cache.queue, skb);
3795} 3783}
3796 3784
3797static void 3785static void
3798de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) 3786de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3799{ 3787{
3800 struct de4x5_private *lp = netdev_priv(dev); 3788 struct de4x5_private *lp = netdev_priv(dev);
3801 struct sk_buff *p = lp->cache.skb;
3802
3803 lp->cache.skb = skb;
3804 skb->next = p;
3805 3789
3806 return; 3790 __skb_queue_head(&lp->cache.queue, skb);
3807} 3791}
3808 3792
3809static struct sk_buff * 3793static struct sk_buff *
3810de4x5_get_cache(struct net_device *dev) 3794de4x5_get_cache(struct net_device *dev)
3811{ 3795{
3812 struct de4x5_private *lp = netdev_priv(dev); 3796 struct de4x5_private *lp = netdev_priv(dev);
3813 struct sk_buff *p = lp->cache.skb;
3814 3797
3815 if (p) { 3798 return __skb_dequeue(&lp->cache.queue);
3816 lp->cache.skb = p->next;
3817 p->next = NULL;
3818 }
3819
3820 return p;
3821} 3799}
3822 3800
3823/* 3801/*
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8f944e57fd55..c87747bb24c5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -400,7 +400,7 @@ static struct enet_addr_container *get_enet_addr_container(void)
400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
401 if (!enet_addr_cont) { 401 if (!enet_addr_cont) {
402 ugeth_err("%s: No memory for enet_addr_container object.", 402 ugeth_err("%s: No memory for enet_addr_container object.",
403 __FUNCTION__); 403 __func__);
404 return NULL; 404 return NULL;
405 } 405 }
406 406
@@ -427,7 +427,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
428 428
429 if (!(paddr_num < NUM_OF_PADDRS)) { 429 if (!(paddr_num < NUM_OF_PADDRS)) {
430 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 430 ugeth_warn("%s: Illegal paddr_num.", __func__);
431 return -EINVAL; 431 return -EINVAL;
432 } 432 }
433 433
@@ -447,7 +447,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
448 448
449 if (!(paddr_num < NUM_OF_PADDRS)) { 449 if (!(paddr_num < NUM_OF_PADDRS)) {
450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 ugeth_warn("%s: Illagel paddr_num.", __func__);
451 return -EINVAL; 451 return -EINVAL;
452 } 452 }
453 453
@@ -1441,7 +1441,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1441 u32 upsmr, maccfg2, tbiBaseAddress; 1441 u32 upsmr, maccfg2, tbiBaseAddress;
1442 u16 value; 1442 u16 value;
1443 1443
1444 ugeth_vdbg("%s: IN", __FUNCTION__); 1444 ugeth_vdbg("%s: IN", __func__);
1445 1445
1446 ug_info = ugeth->ug_info; 1446 ug_info = ugeth->ug_info;
1447 ug_regs = ugeth->ug_regs; 1447 ug_regs = ugeth->ug_regs;
@@ -1504,7 +1504,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1504 if (ret_val != 0) { 1504 if (ret_val != 0) {
1505 if (netif_msg_probe(ugeth)) 1505 if (netif_msg_probe(ugeth))
1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1507 __FUNCTION__); 1507 __func__);
1508 return ret_val; 1508 return ret_val;
1509 } 1509 }
1510 1510
@@ -1744,7 +1744,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1744 /* check if the UCC number is in range. */ 1744 /* check if the UCC number is in range. */
1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1746 if (netif_msg_probe(ugeth)) 1746 if (netif_msg_probe(ugeth))
1747 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1747 ugeth_err("%s: ucc_num out of range.", __func__);
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 1750
@@ -1773,7 +1773,7 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1773 /* check if the UCC number is in range. */ 1773 /* check if the UCC number is in range. */
1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1775 if (netif_msg_probe(ugeth)) 1775 if (netif_msg_probe(ugeth))
1776 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1776 ugeth_err("%s: ucc_num out of range.", __func__);
1777 return -EINVAL; 1777 return -EINVAL;
1778 } 1778 }
1779 1779
@@ -2062,7 +2062,7 @@ static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth
2062 ugeth_warn 2062 ugeth_warn
2063 ("%s: multicast address added to paddr will have no " 2063 ("%s: multicast address added to paddr will have no "
2064 "effect - is this what you wanted?", 2064 "effect - is this what you wanted?",
2065 __FUNCTION__); 2065 __func__);
2066 2066
2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2068 /* store address in our database */ 2068 /* store address in our database */
@@ -2278,7 +2278,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2278 struct phy_device *phydev = ugeth->phydev; 2278 struct phy_device *phydev = ugeth->phydev;
2279 u32 tempval; 2279 u32 tempval;
2280 2280
2281 ugeth_vdbg("%s: IN", __FUNCTION__); 2281 ugeth_vdbg("%s: IN", __func__);
2282 2282
2283 /* Disable the controller */ 2283 /* Disable the controller */
2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -2315,7 +2315,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2316 if (netif_msg_probe(ugeth)) 2316 if (netif_msg_probe(ugeth))
2317 ugeth_err("%s: Bad memory partition value.", 2317 ugeth_err("%s: Bad memory partition value.",
2318 __FUNCTION__); 2318 __func__);
2319 return -EINVAL; 2319 return -EINVAL;
2320 } 2320 }
2321 2321
@@ -2327,7 +2327,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2327 if (netif_msg_probe(ugeth)) 2327 if (netif_msg_probe(ugeth))
2328 ugeth_err 2328 ugeth_err
2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2330 __FUNCTION__); 2330 __func__);
2331 return -EINVAL; 2331 return -EINVAL;
2332 } 2332 }
2333 } 2333 }
@@ -2338,7 +2338,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2338 if (netif_msg_probe(ugeth)) 2338 if (netif_msg_probe(ugeth))
2339 ugeth_err 2339 ugeth_err
2340 ("%s: Tx BD ring length must be no smaller than 2.", 2340 ("%s: Tx BD ring length must be no smaller than 2.",
2341 __FUNCTION__); 2341 __func__);
2342 return -EINVAL; 2342 return -EINVAL;
2343 } 2343 }
2344 } 2344 }
@@ -2349,21 +2349,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2349 if (netif_msg_probe(ugeth)) 2349 if (netif_msg_probe(ugeth))
2350 ugeth_err 2350 ugeth_err
2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2352 __FUNCTION__); 2352 __func__);
2353 return -EINVAL; 2353 return -EINVAL;
2354 } 2354 }
2355 2355
2356 /* num Tx queues */ 2356 /* num Tx queues */
2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2358 if (netif_msg_probe(ugeth)) 2358 if (netif_msg_probe(ugeth))
2359 ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2359 ugeth_err("%s: number of tx queues too large.", __func__);
2360 return -EINVAL; 2360 return -EINVAL;
2361 } 2361 }
2362 2362
2363 /* num Rx queues */ 2363 /* num Rx queues */
2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2365 if (netif_msg_probe(ugeth)) 2365 if (netif_msg_probe(ugeth))
2366 ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2366 ugeth_err("%s: number of rx queues too large.", __func__);
2367 return -EINVAL; 2367 return -EINVAL;
2368 } 2368 }
2369 2369
@@ -2374,7 +2374,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2374 ugeth_err 2374 ugeth_err
2375 ("%s: VLAN priority table entry must not be" 2375 ("%s: VLAN priority table entry must not be"
2376 " larger than number of Rx queues.", 2376 " larger than number of Rx queues.",
2377 __FUNCTION__); 2377 __func__);
2378 return -EINVAL; 2378 return -EINVAL;
2379 } 2379 }
2380 } 2380 }
@@ -2386,7 +2386,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2386 ugeth_err 2386 ugeth_err
2387 ("%s: IP priority table entry must not be" 2387 ("%s: IP priority table entry must not be"
2388 " larger than number of Rx queues.", 2388 " larger than number of Rx queues.",
2389 __FUNCTION__); 2389 __func__);
2390 return -EINVAL; 2390 return -EINVAL;
2391 } 2391 }
2392 } 2392 }
@@ -2394,7 +2394,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2394 if (ug_info->cam && !ug_info->ecamptr) { 2394 if (ug_info->cam && !ug_info->ecamptr) {
2395 if (netif_msg_probe(ugeth)) 2395 if (netif_msg_probe(ugeth))
2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2397 __FUNCTION__); 2397 __func__);
2398 return -EINVAL; 2398 return -EINVAL;
2399 } 2399 }
2400 2400
@@ -2404,7 +2404,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2404 if (netif_msg_probe(ugeth)) 2404 if (netif_msg_probe(ugeth))
2405 ugeth_err("%s: Number of station addresses greater than 1 " 2405 ugeth_err("%s: Number of station addresses greater than 1 "
2406 "not allowed in extended parsing mode.", 2406 "not allowed in extended parsing mode.",
2407 __FUNCTION__); 2407 __func__);
2408 return -EINVAL; 2408 return -EINVAL;
2409 } 2409 }
2410 2410
@@ -2418,7 +2418,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2418 /* Initialize the general fast UCC block. */ 2418 /* Initialize the general fast UCC block. */
2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2420 if (netif_msg_probe(ugeth)) 2420 if (netif_msg_probe(ugeth))
2421 ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2421 ugeth_err("%s: Failed to init uccf.", __func__);
2422 ucc_geth_memclean(ugeth); 2422 ucc_geth_memclean(ugeth);
2423 return -ENOMEM; 2423 return -ENOMEM;
2424 } 2424 }
@@ -2448,7 +2448,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2448 u8 __iomem *endOfRing; 2448 u8 __iomem *endOfRing;
2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2450 2450
2451 ugeth_vdbg("%s: IN", __FUNCTION__); 2451 ugeth_vdbg("%s: IN", __func__);
2452 uccf = ugeth->uccf; 2452 uccf = ugeth->uccf;
2453 ug_info = ugeth->ug_info; 2453 ug_info = ugeth->ug_info;
2454 uf_info = &ug_info->uf_info; 2454 uf_info = &ug_info->uf_info;
@@ -2474,7 +2474,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2474 default: 2474 default:
2475 if (netif_msg_ifup(ugeth)) 2475 if (netif_msg_ifup(ugeth))
2476 ugeth_err("%s: Bad number of Rx threads value.", 2476 ugeth_err("%s: Bad number of Rx threads value.",
2477 __FUNCTION__); 2477 __func__);
2478 ucc_geth_memclean(ugeth); 2478 ucc_geth_memclean(ugeth);
2479 return -EINVAL; 2479 return -EINVAL;
2480 break; 2480 break;
@@ -2499,7 +2499,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2499 default: 2499 default:
2500 if (netif_msg_ifup(ugeth)) 2500 if (netif_msg_ifup(ugeth))
2501 ugeth_err("%s: Bad number of Tx threads value.", 2501 ugeth_err("%s: Bad number of Tx threads value.",
2502 __FUNCTION__); 2502 __func__);
2503 ucc_geth_memclean(ugeth); 2503 ucc_geth_memclean(ugeth);
2504 return -EINVAL; 2504 return -EINVAL;
2505 break; 2505 break;
@@ -2553,7 +2553,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2553 if (ret_val != 0) { 2553 if (ret_val != 0) {
2554 if (netif_msg_ifup(ugeth)) 2554 if (netif_msg_ifup(ugeth))
2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2555 ugeth_err("%s: IPGIFG initialization parameter too large.",
2556 __FUNCTION__); 2556 __func__);
2557 ucc_geth_memclean(ugeth); 2557 ucc_geth_memclean(ugeth);
2558 return ret_val; 2558 return ret_val;
2559 } 2559 }
@@ -2571,7 +2571,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2571 if (ret_val != 0) { 2571 if (ret_val != 0) {
2572 if (netif_msg_ifup(ugeth)) 2572 if (netif_msg_ifup(ugeth))
2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2573 ugeth_err("%s: Half Duplex initialization parameter too large.",
2574 __FUNCTION__); 2574 __func__);
2575 ucc_geth_memclean(ugeth); 2575 ucc_geth_memclean(ugeth);
2576 return ret_val; 2576 return ret_val;
2577 } 2577 }
@@ -2626,7 +2626,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2626 if (netif_msg_ifup(ugeth)) 2626 if (netif_msg_ifup(ugeth))
2627 ugeth_err 2627 ugeth_err
2628 ("%s: Can not allocate memory for Tx bd rings.", 2628 ("%s: Can not allocate memory for Tx bd rings.",
2629 __FUNCTION__); 2629 __func__);
2630 ucc_geth_memclean(ugeth); 2630 ucc_geth_memclean(ugeth);
2631 return -ENOMEM; 2631 return -ENOMEM;
2632 } 2632 }
@@ -2662,7 +2662,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2662 if (netif_msg_ifup(ugeth)) 2662 if (netif_msg_ifup(ugeth))
2663 ugeth_err 2663 ugeth_err
2664 ("%s: Can not allocate memory for Rx bd rings.", 2664 ("%s: Can not allocate memory for Rx bd rings.",
2665 __FUNCTION__); 2665 __func__);
2666 ucc_geth_memclean(ugeth); 2666 ucc_geth_memclean(ugeth);
2667 return -ENOMEM; 2667 return -ENOMEM;
2668 } 2668 }
@@ -2678,7 +2678,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2678 if (ugeth->tx_skbuff[j] == NULL) { 2678 if (ugeth->tx_skbuff[j] == NULL) {
2679 if (netif_msg_ifup(ugeth)) 2679 if (netif_msg_ifup(ugeth))
2680 ugeth_err("%s: Could not allocate tx_skbuff", 2680 ugeth_err("%s: Could not allocate tx_skbuff",
2681 __FUNCTION__); 2681 __func__);
2682 ucc_geth_memclean(ugeth); 2682 ucc_geth_memclean(ugeth);
2683 return -ENOMEM; 2683 return -ENOMEM;
2684 } 2684 }
@@ -2710,7 +2710,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2710 if (ugeth->rx_skbuff[j] == NULL) { 2710 if (ugeth->rx_skbuff[j] == NULL) {
2711 if (netif_msg_ifup(ugeth)) 2711 if (netif_msg_ifup(ugeth))
2712 ugeth_err("%s: Could not allocate rx_skbuff", 2712 ugeth_err("%s: Could not allocate rx_skbuff",
2713 __FUNCTION__); 2713 __func__);
2714 ucc_geth_memclean(ugeth); 2714 ucc_geth_memclean(ugeth);
2715 return -ENOMEM; 2715 return -ENOMEM;
2716 } 2716 }
@@ -2744,7 +2744,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2744 if (netif_msg_ifup(ugeth)) 2744 if (netif_msg_ifup(ugeth))
2745 ugeth_err 2745 ugeth_err
2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2747 __FUNCTION__); 2747 __func__);
2748 ucc_geth_memclean(ugeth); 2748 ucc_geth_memclean(ugeth);
2749 return -ENOMEM; 2749 return -ENOMEM;
2750 } 2750 }
@@ -2767,7 +2767,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2767 if (netif_msg_ifup(ugeth)) 2767 if (netif_msg_ifup(ugeth))
2768 ugeth_err 2768 ugeth_err
2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2770 __FUNCTION__); 2770 __func__);
2771 ucc_geth_memclean(ugeth); 2771 ucc_geth_memclean(ugeth);
2772 return -ENOMEM; 2772 return -ENOMEM;
2773 } 2773 }
@@ -2797,7 +2797,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2797 if (netif_msg_ifup(ugeth)) 2797 if (netif_msg_ifup(ugeth))
2798 ugeth_err 2798 ugeth_err
2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2800 __FUNCTION__); 2800 __func__);
2801 ucc_geth_memclean(ugeth); 2801 ucc_geth_memclean(ugeth);
2802 return -ENOMEM; 2802 return -ENOMEM;
2803 } 2803 }
@@ -2841,7 +2841,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2841 if (netif_msg_ifup(ugeth)) 2841 if (netif_msg_ifup(ugeth))
2842 ugeth_err 2842 ugeth_err
2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2843 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2844 __FUNCTION__); 2844 __func__);
2845 ucc_geth_memclean(ugeth); 2845 ucc_geth_memclean(ugeth);
2846 return -ENOMEM; 2846 return -ENOMEM;
2847 } 2847 }
@@ -2892,7 +2892,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2892 ugeth_err 2892 ugeth_err
2893 ("%s: Can not allocate DPRAM memory for" 2893 ("%s: Can not allocate DPRAM memory for"
2894 " p_tx_fw_statistics_pram.", 2894 " p_tx_fw_statistics_pram.",
2895 __FUNCTION__); 2895 __func__);
2896 ucc_geth_memclean(ugeth); 2896 ucc_geth_memclean(ugeth);
2897 return -ENOMEM; 2897 return -ENOMEM;
2898 } 2898 }
@@ -2932,7 +2932,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2932 if (netif_msg_ifup(ugeth)) 2932 if (netif_msg_ifup(ugeth))
2933 ugeth_err 2933 ugeth_err
2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2935 __FUNCTION__); 2935 __func__);
2936 ucc_geth_memclean(ugeth); 2936 ucc_geth_memclean(ugeth);
2937 return -ENOMEM; 2937 return -ENOMEM;
2938 } 2938 }
@@ -2954,7 +2954,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2954 if (netif_msg_ifup(ugeth)) 2954 if (netif_msg_ifup(ugeth))
2955 ugeth_err 2955 ugeth_err
2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2957 __FUNCTION__); 2957 __func__);
2958 ucc_geth_memclean(ugeth); 2958 ucc_geth_memclean(ugeth);
2959 return -ENOMEM; 2959 return -ENOMEM;
2960 } 2960 }
@@ -2978,7 +2978,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2978 if (netif_msg_ifup(ugeth)) 2978 if (netif_msg_ifup(ugeth))
2979 ugeth_err 2979 ugeth_err
2980 ("%s: Can not allocate DPRAM memory for" 2980 ("%s: Can not allocate DPRAM memory for"
2981 " p_rx_fw_statistics_pram.", __FUNCTION__); 2981 " p_rx_fw_statistics_pram.", __func__);
2982 ucc_geth_memclean(ugeth); 2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM; 2983 return -ENOMEM;
2984 } 2984 }
@@ -3001,7 +3001,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3001 if (netif_msg_ifup(ugeth)) 3001 if (netif_msg_ifup(ugeth))
3002 ugeth_err 3002 ugeth_err
3003 ("%s: Can not allocate DPRAM memory for" 3003 ("%s: Can not allocate DPRAM memory for"
3004 " p_rx_irq_coalescing_tbl.", __FUNCTION__); 3004 " p_rx_irq_coalescing_tbl.", __func__);
3005 ucc_geth_memclean(ugeth); 3005 ucc_geth_memclean(ugeth);
3006 return -ENOMEM; 3006 return -ENOMEM;
3007 } 3007 }
@@ -3070,7 +3070,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3070 if (netif_msg_ifup(ugeth)) 3070 if (netif_msg_ifup(ugeth))
3071 ugeth_err 3071 ugeth_err
3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3073 __FUNCTION__); 3073 __func__);
3074 ucc_geth_memclean(ugeth); 3074 ucc_geth_memclean(ugeth);
3075 return -ENOMEM; 3075 return -ENOMEM;
3076 } 3076 }
@@ -3147,7 +3147,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3147 if (!ug_info->extendedFilteringChainPointer) { 3147 if (!ug_info->extendedFilteringChainPointer) {
3148 if (netif_msg_ifup(ugeth)) 3148 if (netif_msg_ifup(ugeth))
3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3150 __FUNCTION__); 3150 __func__);
3151 ucc_geth_memclean(ugeth); 3151 ucc_geth_memclean(ugeth);
3152 return -EINVAL; 3152 return -EINVAL;
3153 } 3153 }
@@ -3161,7 +3161,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3161 if (netif_msg_ifup(ugeth)) 3161 if (netif_msg_ifup(ugeth))
3162 ugeth_err 3162 ugeth_err
3163 ("%s: Can not allocate DPRAM memory for" 3163 ("%s: Can not allocate DPRAM memory for"
3164 " p_exf_glbl_param.", __FUNCTION__); 3164 " p_exf_glbl_param.", __func__);
3165 ucc_geth_memclean(ugeth); 3165 ucc_geth_memclean(ugeth);
3166 return -ENOMEM; 3166 return -ENOMEM;
3167 } 3167 }
@@ -3209,7 +3209,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3209 if (netif_msg_ifup(ugeth)) 3209 if (netif_msg_ifup(ugeth))
3210 ugeth_err 3210 ugeth_err
3211 ("%s: Can not allocate memory for" 3211 ("%s: Can not allocate memory for"
3212 " p_UccInitEnetParamShadows.", __FUNCTION__); 3212 " p_UccInitEnetParamShadows.", __func__);
3213 ucc_geth_memclean(ugeth); 3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM; 3214 return -ENOMEM;
3215 } 3215 }
@@ -3244,7 +3244,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3245 if (netif_msg_ifup(ugeth)) 3245 if (netif_msg_ifup(ugeth))
3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3246 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3247 __FUNCTION__); 3247 __func__);
3248 ucc_geth_memclean(ugeth); 3248 ucc_geth_memclean(ugeth);
3249 return -EINVAL; 3249 return -EINVAL;
3250 } 3250 }
@@ -3271,7 +3271,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3271 ug_info->riscRx, 1)) != 0) { 3271 ug_info->riscRx, 1)) != 0) {
3272 if (netif_msg_ifup(ugeth)) 3272 if (netif_msg_ifup(ugeth))
3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3274 __FUNCTION__); 3274 __func__);
3275 ucc_geth_memclean(ugeth); 3275 ucc_geth_memclean(ugeth);
3276 return ret_val; 3276 return ret_val;
3277 } 3277 }
@@ -3287,7 +3287,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3287 ug_info->riscTx, 0)) != 0) { 3287 ug_info->riscTx, 0)) != 0) {
3288 if (netif_msg_ifup(ugeth)) 3288 if (netif_msg_ifup(ugeth))
3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3290 __FUNCTION__); 3290 __func__);
3291 ucc_geth_memclean(ugeth); 3291 ucc_geth_memclean(ugeth);
3292 return ret_val; 3292 return ret_val;
3293 } 3293 }
@@ -3297,7 +3297,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3298 if (netif_msg_ifup(ugeth)) 3298 if (netif_msg_ifup(ugeth))
3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3299 ugeth_err("%s: Can not fill Rx bds with buffers.",
3300 __FUNCTION__); 3300 __func__);
3301 ucc_geth_memclean(ugeth); 3301 ucc_geth_memclean(ugeth);
3302 return ret_val; 3302 return ret_val;
3303 } 3303 }
@@ -3309,7 +3309,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3309 if (netif_msg_ifup(ugeth)) 3309 if (netif_msg_ifup(ugeth))
3310 ugeth_err 3310 ugeth_err
3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3312 __FUNCTION__); 3312 __func__);
3313 ucc_geth_memclean(ugeth); 3313 ucc_geth_memclean(ugeth);
3314 return -ENOMEM; 3314 return -ENOMEM;
3315 } 3315 }
@@ -3360,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev)
3360{ 3360{
3361 struct ucc_geth_private *ugeth = netdev_priv(dev); 3361 struct ucc_geth_private *ugeth = netdev_priv(dev);
3362 3362
3363 ugeth_vdbg("%s: IN", __FUNCTION__); 3363 ugeth_vdbg("%s: IN", __func__);
3364 3364
3365 dev->stats.tx_errors++; 3365 dev->stats.tx_errors++;
3366 3366
@@ -3386,7 +3386,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3386 u32 bd_status; 3386 u32 bd_status;
3387 u8 txQ = 0; 3387 u8 txQ = 0;
3388 3388
3389 ugeth_vdbg("%s: IN", __FUNCTION__); 3389 ugeth_vdbg("%s: IN", __func__);
3390 3390
3391 spin_lock_irq(&ugeth->lock); 3391 spin_lock_irq(&ugeth->lock);
3392 3392
@@ -3459,7 +3459,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3459 u8 *bdBuffer; 3459 u8 *bdBuffer;
3460 struct net_device *dev; 3460 struct net_device *dev;
3461 3461
3462 ugeth_vdbg("%s: IN", __FUNCTION__); 3462 ugeth_vdbg("%s: IN", __func__);
3463 3463
3464 dev = ugeth->dev; 3464 dev = ugeth->dev;
3465 3465
@@ -3481,7 +3481,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3481 (bd_status & R_ERRORS_FATAL)) { 3481 (bd_status & R_ERRORS_FATAL)) {
3482 if (netif_msg_rx_err(ugeth)) 3482 if (netif_msg_rx_err(ugeth))
3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3484 __FUNCTION__, __LINE__, (u32) skb); 3484 __func__, __LINE__, (u32) skb);
3485 if (skb) 3485 if (skb)
3486 dev_kfree_skb_any(skb); 3486 dev_kfree_skb_any(skb);
3487 3487
@@ -3507,7 +3507,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3507 skb = get_new_skb(ugeth, bd); 3507 skb = get_new_skb(ugeth, bd);
3508 if (!skb) { 3508 if (!skb) {
3509 if (netif_msg_rx_err(ugeth)) 3509 if (netif_msg_rx_err(ugeth))
3510 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3510 ugeth_warn("%s: No Rx Data Buffer", __func__);
3511 dev->stats.rx_dropped++; 3511 dev->stats.rx_dropped++;
3512 break; 3512 break;
3513 } 3513 }
@@ -3613,7 +3613,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3613 register u32 tx_mask; 3613 register u32 tx_mask;
3614 u8 i; 3614 u8 i;
3615 3615
3616 ugeth_vdbg("%s: IN", __FUNCTION__); 3616 ugeth_vdbg("%s: IN", __func__);
3617 3617
3618 uccf = ugeth->uccf; 3618 uccf = ugeth->uccf;
3619 ug_info = ugeth->ug_info; 3619 ug_info = ugeth->ug_info;
@@ -3683,13 +3683,13 @@ static int ucc_geth_open(struct net_device *dev)
3683 struct ucc_geth_private *ugeth = netdev_priv(dev); 3683 struct ucc_geth_private *ugeth = netdev_priv(dev);
3684 int err; 3684 int err;
3685 3685
3686 ugeth_vdbg("%s: IN", __FUNCTION__); 3686 ugeth_vdbg("%s: IN", __func__);
3687 3687
3688 /* Test station address */ 3688 /* Test station address */
3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3690 if (netif_msg_ifup(ugeth)) 3690 if (netif_msg_ifup(ugeth))
3691 ugeth_err("%s: Multicast address used for station address" 3691 ugeth_err("%s: Multicast address used for station address"
3692 " - is this what you wanted?", __FUNCTION__); 3692 " - is this what you wanted?", __func__);
3693 return -EINVAL; 3693 return -EINVAL;
3694 } 3694 }
3695 3695
@@ -3772,7 +3772,7 @@ static int ucc_geth_close(struct net_device *dev)
3772{ 3772{
3773 struct ucc_geth_private *ugeth = netdev_priv(dev); 3773 struct ucc_geth_private *ugeth = netdev_priv(dev);
3774 3774
3775 ugeth_vdbg("%s: IN", __FUNCTION__); 3775 ugeth_vdbg("%s: IN", __func__);
3776 3776
3777 napi_disable(&ugeth->napi); 3777 napi_disable(&ugeth->napi);
3778 3778
@@ -3840,7 +3840,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3841 }; 3841 };
3842 3842
3843 ugeth_vdbg("%s: IN", __FUNCTION__); 3843 ugeth_vdbg("%s: IN", __func__);
3844 3844
3845 prop = of_get_property(np, "cell-index", NULL); 3845 prop = of_get_property(np, "cell-index", NULL);
3846 if (!prop) { 3846 if (!prop) {
@@ -3857,7 +3857,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3857 if (ug_info == NULL) { 3857 if (ug_info == NULL) {
3858 if (netif_msg_probe(&debug)) 3858 if (netif_msg_probe(&debug))
3859 ugeth_err("%s: [%d] Missing additional data!", 3859 ugeth_err("%s: [%d] Missing additional data!",
3860 __FUNCTION__, ucc_num); 3860 __func__, ucc_num);
3861 return -ENODEV; 3861 return -ENODEV;
3862 } 3862 }
3863 3863
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6e42b5a8c22b..1164c52e2c0a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -92,9 +92,6 @@
92 92
93#define HSO_NET_TX_TIMEOUT (HZ*10) 93#define HSO_NET_TX_TIMEOUT (HZ*10)
94 94
95/* Serial port defines and structs. */
96#define HSO_SERIAL_FLAG_RX_SENT 0
97
98#define HSO_SERIAL_MAGIC 0x48534f31 95#define HSO_SERIAL_MAGIC 0x48534f31
99 96
100/* Number of ttys to handle */ 97/* Number of ttys to handle */
@@ -179,6 +176,12 @@ struct hso_net {
179 unsigned long flags; 176 unsigned long flags;
180}; 177};
181 178
179enum rx_ctrl_state{
180 RX_IDLE,
181 RX_SENT,
182 RX_PENDING
183};
184
182struct hso_serial { 185struct hso_serial {
183 struct hso_device *parent; 186 struct hso_device *parent;
184 int magic; 187 int magic;
@@ -205,7 +208,7 @@ struct hso_serial {
205 struct usb_endpoint_descriptor *in_endp; 208 struct usb_endpoint_descriptor *in_endp;
206 struct usb_endpoint_descriptor *out_endp; 209 struct usb_endpoint_descriptor *out_endp;
207 210
208 unsigned long flags; 211 enum rx_ctrl_state rx_state;
209 u8 rts_state; 212 u8 rts_state;
210 u8 dtr_state; 213 u8 dtr_state;
211 unsigned tx_urb_used:1; 214 unsigned tx_urb_used:1;
@@ -216,6 +219,15 @@ struct hso_serial {
216 spinlock_t serial_lock; 219 spinlock_t serial_lock;
217 220
218 int (*write_data) (struct hso_serial *serial); 221 int (*write_data) (struct hso_serial *serial);
222 /* Hacks required to get flow control
223 * working on the serial receive buffers
224 * so as not to drop characters on the floor.
225 */
226 int curr_rx_urb_idx;
227 u16 curr_rx_urb_offset;
228 u8 rx_urb_filled[MAX_RX_URBS];
229 struct tasklet_struct unthrottle_tasklet;
230 struct work_struct retry_unthrottle_workqueue;
219}; 231};
220 232
221struct hso_device { 233struct hso_device {
@@ -271,7 +283,7 @@ struct hso_device {
271static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 283static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
272 unsigned int set, unsigned int clear); 284 unsigned int set, unsigned int clear);
273static void ctrl_callback(struct urb *urb); 285static void ctrl_callback(struct urb *urb);
274static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 286static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
275static void hso_kick_transmit(struct hso_serial *serial); 287static void hso_kick_transmit(struct hso_serial *serial);
276/* Helper functions */ 288/* Helper functions */
277static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, 289static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
@@ -287,6 +299,8 @@ static int hso_start_net_device(struct hso_device *hso_dev);
287static void hso_free_shared_int(struct hso_shared_int *shared_int); 299static void hso_free_shared_int(struct hso_shared_int *shared_int);
288static int hso_stop_net_device(struct hso_device *hso_dev); 300static int hso_stop_net_device(struct hso_device *hso_dev);
289static void hso_serial_ref_free(struct kref *ref); 301static void hso_serial_ref_free(struct kref *ref);
302static void hso_std_serial_read_bulk_callback(struct urb *urb);
303static int hso_mux_serial_read(struct hso_serial *serial);
290static void async_get_intf(struct work_struct *data); 304static void async_get_intf(struct work_struct *data);
291static void async_put_intf(struct work_struct *data); 305static void async_put_intf(struct work_struct *data);
292static int hso_put_activity(struct hso_device *hso_dev); 306static int hso_put_activity(struct hso_device *hso_dev);
@@ -458,6 +472,17 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
458} 472}
459static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); 473static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
460 474
475static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
476{
477 int idx;
478
479 for (idx = 0; idx < serial->num_rx_urbs; idx++)
480 if (serial->rx_urb[idx] == urb)
481 return idx;
482 dev_err(serial->parent->dev, "hso_urb_to_index failed\n");
483 return -1;
484}
485
461/* converts mux value to a port spec value */ 486/* converts mux value to a port spec value */
462static u32 hso_mux_to_port(int mux) 487static u32 hso_mux_to_port(int mux)
463{ 488{
@@ -1039,6 +1064,158 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
1039 return; 1064 return;
1040} 1065}
1041 1066
1067static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
1068{
1069 int result;
1070#ifdef CONFIG_HSO_AUTOPM
1071 usb_mark_last_busy(urb->dev);
1072#endif
1073 /* We are done with this URB, resubmit it. Prep the USB to wait for
1074 * another frame */
1075 usb_fill_bulk_urb(urb, serial->parent->usb,
1076 usb_rcvbulkpipe(serial->parent->usb,
1077 serial->in_endp->
1078 bEndpointAddress & 0x7F),
1079 urb->transfer_buffer, serial->rx_data_length,
1080 hso_std_serial_read_bulk_callback, serial);
1081 /* Give this to the USB subsystem so it can tell us when more data
1082 * arrives. */
1083 result = usb_submit_urb(urb, GFP_ATOMIC);
1084 if (result) {
1085 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n",
1086 __func__, result);
1087 }
1088}
1089
1090
1091
1092
1093static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial)
1094{
1095 int count;
1096 struct urb *curr_urb;
1097
1098 while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) {
1099 curr_urb = serial->rx_urb[serial->curr_rx_urb_idx];
1100 count = put_rxbuf_data(curr_urb, serial);
1101 if (count == -1)
1102 return;
1103 if (count == 0) {
1104 serial->curr_rx_urb_idx++;
1105 if (serial->curr_rx_urb_idx >= serial->num_rx_urbs)
1106 serial->curr_rx_urb_idx = 0;
1107 hso_resubmit_rx_bulk_urb(serial, curr_urb);
1108 }
1109 }
1110}
1111
1112static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
1113{
1114 int count = 0;
1115 struct urb *urb;
1116
1117 urb = serial->rx_urb[0];
1118 if (serial->open_count > 0) {
1119 count = put_rxbuf_data(urb, serial);
1120 if (count == -1)
1121 return;
1122 }
1123 /* Re issue a read as long as we receive data. */
1124
1125 if (count == 0 && ((urb->actual_length != 0) ||
1126 (serial->rx_state == RX_PENDING))) {
1127 serial->rx_state = RX_SENT;
1128 hso_mux_serial_read(serial);
1129 } else
1130 serial->rx_state = RX_IDLE;
1131}
1132
1133
1134/* read callback for Diag and CS port */
1135static void hso_std_serial_read_bulk_callback(struct urb *urb)
1136{
1137 struct hso_serial *serial = urb->context;
1138 int status = urb->status;
1139
1140 /* sanity check */
1141 if (!serial) {
1142 D1("serial == NULL");
1143 return;
1144 } else if (status) {
1145 log_usb_status(status, __func__);
1146 return;
1147 }
1148
1149 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1150 D1("Actual length = %d\n", urb->actual_length);
1151 DUMP1(urb->transfer_buffer, urb->actual_length);
1152
1153 /* Anyone listening? */
1154 if (serial->open_count == 0)
1155 return;
1156
1157 if (status == 0) {
1158 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1159 u32 rest;
1160 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1161 rest =
1162 urb->actual_length %
1163 serial->in_endp->wMaxPacketSize;
1164 if (((rest == 5) || (rest == 6))
1165 && !memcmp(((u8 *) urb->transfer_buffer) +
1166 urb->actual_length - 4, crc_check, 4)) {
1167 urb->actual_length -= 4;
1168 }
1169 }
1170 /* Valid data, handle RX data */
1171 spin_lock(&serial->serial_lock);
1172 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
1173 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1174 spin_unlock(&serial->serial_lock);
1175 } else if (status == -ENOENT || status == -ECONNRESET) {
1176 /* Unlinked - check for throttled port. */
1177 D2("Port %d, successfully unlinked urb", serial->minor);
1178 spin_lock(&serial->serial_lock);
1179 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1180 hso_resubmit_rx_bulk_urb(serial, urb);
1181 spin_unlock(&serial->serial_lock);
1182 } else {
1183 D2("Port %d, status = %d for read urb", serial->minor, status);
1184 return;
1185 }
1186}
1187
1188/*
1189 * This needs to be a tasklet otherwise we will
1190 * end up recursively calling this function.
1191 */
1192void hso_unthrottle_tasklet(struct hso_serial *serial)
1193{
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&serial->serial_lock, flags);
1197 if ((serial->parent->port_spec & HSO_INTF_MUX))
1198 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1199 else
1200 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1201 spin_unlock_irqrestore(&serial->serial_lock, flags);
1202}
1203
1204static void hso_unthrottle(struct tty_struct *tty)
1205{
1206 struct hso_serial *serial = get_serial_by_tty(tty);
1207
1208 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1209}
1210
1211void hso_unthrottle_workfunc(struct work_struct *work)
1212{
1213 struct hso_serial *serial =
1214 container_of(work, struct hso_serial,
1215 retry_unthrottle_workqueue);
1216 hso_unthrottle_tasklet(serial);
1217}
1218
1042/* open the requested serial port */ 1219/* open the requested serial port */
1043static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1220static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1044{ 1221{
@@ -1064,13 +1241,18 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1064 tty->driver_data = serial; 1241 tty->driver_data = serial;
1065 serial->tty = tty; 1242 serial->tty = tty;
1066 1243
1067 /* check for port allready opened, if not set the termios */ 1244 /* check for port already opened, if not set the termios */
1068 serial->open_count++; 1245 serial->open_count++;
1069 if (serial->open_count == 1) { 1246 if (serial->open_count == 1) {
1070 tty->low_latency = 1; 1247 tty->low_latency = 1;
1071 serial->flags = 0; 1248 serial->rx_state = RX_IDLE;
1072 /* Force default termio settings */ 1249 /* Force default termio settings */
1073 _hso_serial_set_termios(tty, NULL); 1250 _hso_serial_set_termios(tty, NULL);
1251 tasklet_init(&serial->unthrottle_tasklet,
1252 (void (*)(unsigned long))hso_unthrottle_tasklet,
1253 (unsigned long)serial);
1254 INIT_WORK(&serial->retry_unthrottle_workqueue,
1255 hso_unthrottle_workfunc);
1074 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1256 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1075 if (result) { 1257 if (result) {
1076 hso_stop_serial_device(serial->parent); 1258 hso_stop_serial_device(serial->parent);
@@ -1117,9 +1299,13 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1117 } 1299 }
1118 if (!usb_gone) 1300 if (!usb_gone)
1119 hso_stop_serial_device(serial->parent); 1301 hso_stop_serial_device(serial->parent);
1302 tasklet_kill(&serial->unthrottle_tasklet);
1303 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1120 } 1304 }
1305
1121 if (!usb_gone) 1306 if (!usb_gone)
1122 usb_autopm_put_interface(serial->parent->interface); 1307 usb_autopm_put_interface(serial->parent->interface);
1308
1123 mutex_unlock(&serial->parent->mutex); 1309 mutex_unlock(&serial->parent->mutex);
1124} 1310}
1125 1311
@@ -1422,15 +1608,21 @@ static void intr_callback(struct urb *urb)
1422 (1 << i)); 1608 (1 << i));
1423 if (serial != NULL) { 1609 if (serial != NULL) {
1424 D1("Pending read interrupt on port %d\n", i); 1610 D1("Pending read interrupt on port %d\n", i);
1425 if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT, 1611 spin_lock(&serial->serial_lock);
1426 &serial->flags)) { 1612 if (serial->rx_state == RX_IDLE) {
1427 /* Setup and send a ctrl req read on 1613 /* Setup and send a ctrl req read on
1428 * port i */ 1614 * port i */
1429 hso_mux_serial_read(serial); 1615 if (!serial->rx_urb_filled[0]) {
1616 serial->rx_state = RX_SENT;
1617 hso_mux_serial_read(serial);
1618 } else
1619 serial->rx_state = RX_PENDING;
1620
1430 } else { 1621 } else {
1431 D1("Already pending a read on " 1622 D1("Already pending a read on "
1432 "port %d\n", i); 1623 "port %d\n", i);
1433 } 1624 }
1625 spin_unlock(&serial->serial_lock);
1434 } 1626 }
1435 } 1627 }
1436 } 1628 }
@@ -1532,16 +1724,10 @@ static void ctrl_callback(struct urb *urb)
1532 if (req->bRequestType == 1724 if (req->bRequestType ==
1533 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { 1725 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
1534 /* response to a read command */ 1726 /* response to a read command */
1535 if (serial->open_count > 0) { 1727 serial->rx_urb_filled[0] = 1;
1536 /* handle RX data the normal way */ 1728 spin_lock(&serial->serial_lock);
1537 put_rxbuf_data(urb, serial); 1729 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1538 } 1730 spin_unlock(&serial->serial_lock);
1539
1540 /* Re issue a read as long as we receive data. */
1541 if (urb->actual_length != 0)
1542 hso_mux_serial_read(serial);
1543 else
1544 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
1545 } else { 1731 } else {
1546 hso_put_activity(serial->parent); 1732 hso_put_activity(serial->parent);
1547 if (serial->tty) 1733 if (serial->tty)
@@ -1552,91 +1738,42 @@ static void ctrl_callback(struct urb *urb)
1552} 1738}
1553 1739
1554/* handle RX data for serial port */ 1740/* handle RX data for serial port */
1555static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 1741static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
1556{ 1742{
1557 struct tty_struct *tty = serial->tty; 1743 struct tty_struct *tty = serial->tty;
1558 1744 int write_length_remaining = 0;
1745 int curr_write_len;
1559 /* Sanity check */ 1746 /* Sanity check */
1560 if (urb == NULL || serial == NULL) { 1747 if (urb == NULL || serial == NULL) {
1561 D1("serial = NULL"); 1748 D1("serial = NULL");
1562 return; 1749 return -2;
1563 } 1750 }
1564 1751
1565 /* Push data to tty */ 1752 /* Push data to tty */
1566 if (tty && urb->actual_length) { 1753 if (tty) {
1754 write_length_remaining = urb->actual_length -
1755 serial->curr_rx_urb_offset;
1567 D1("data to push to tty"); 1756 D1("data to push to tty");
1568 tty_insert_flip_string(tty, urb->transfer_buffer, 1757 while (write_length_remaining) {
1569 urb->actual_length); 1758 if (test_bit(TTY_THROTTLED, &tty->flags))
1570 tty_flip_buffer_push(tty); 1759 return -1;
1571 } 1760 curr_write_len = tty_insert_flip_string
1572} 1761 (tty, urb->transfer_buffer +
1573 1762 serial->curr_rx_urb_offset,
1574/* read callback for Diag and CS port */ 1763 write_length_remaining);
1575static void hso_std_serial_read_bulk_callback(struct urb *urb) 1764 serial->curr_rx_urb_offset += curr_write_len;
1576{ 1765 write_length_remaining -= curr_write_len;
1577 struct hso_serial *serial = urb->context; 1766 tty_flip_buffer_push(tty);
1578 int result;
1579 int status = urb->status;
1580
1581 /* sanity check */
1582 if (!serial) {
1583 D1("serial == NULL");
1584 return;
1585 } else if (status) {
1586 log_usb_status(status, __func__);
1587 return;
1588 }
1589
1590 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1591 D1("Actual length = %d\n", urb->actual_length);
1592 DUMP1(urb->transfer_buffer, urb->actual_length);
1593
1594 /* Anyone listening? */
1595 if (serial->open_count == 0)
1596 return;
1597
1598 if (status == 0) {
1599 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1600 u32 rest;
1601 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1602 rest =
1603 urb->actual_length %
1604 serial->in_endp->wMaxPacketSize;
1605 if (((rest == 5) || (rest == 6))
1606 && !memcmp(((u8 *) urb->transfer_buffer) +
1607 urb->actual_length - 4, crc_check, 4)) {
1608 urb->actual_length -= 4;
1609 }
1610 } 1767 }
1611 /* Valid data, handle RX data */
1612 put_rxbuf_data(urb, serial);
1613 } else if (status == -ENOENT || status == -ECONNRESET) {
1614 /* Unlinked - check for throttled port. */
1615 D2("Port %d, successfully unlinked urb", serial->minor);
1616 } else {
1617 D2("Port %d, status = %d for read urb", serial->minor, status);
1618 return;
1619 } 1768 }
1620 1769 if (write_length_remaining == 0) {
1621 usb_mark_last_busy(urb->dev); 1770 serial->curr_rx_urb_offset = 0;
1622 1771 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1623 /* We are done with this URB, resubmit it. Prep the USB to wait for
1624 * another frame */
1625 usb_fill_bulk_urb(urb, serial->parent->usb,
1626 usb_rcvbulkpipe(serial->parent->usb,
1627 serial->in_endp->
1628 bEndpointAddress & 0x7F),
1629 urb->transfer_buffer, serial->rx_data_length,
1630 hso_std_serial_read_bulk_callback, serial);
1631 /* Give this to the USB subsystem so it can tell us when more data
1632 * arrives. */
1633 result = usb_submit_urb(urb, GFP_ATOMIC);
1634 if (result) {
1635 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d",
1636 __func__, result);
1637 } 1772 }
1773 return write_length_remaining;
1638} 1774}
1639 1775
1776
1640/* Base driver functions */ 1777/* Base driver functions */
1641 1778
1642static void hso_log_port(struct hso_device *hso_dev) 1779static void hso_log_port(struct hso_device *hso_dev)
@@ -1794,9 +1931,13 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
1794 return -ENODEV; 1931 return -ENODEV;
1795 1932
1796 for (i = 0; i < serial->num_rx_urbs; i++) { 1933 for (i = 0; i < serial->num_rx_urbs; i++) {
1797 if (serial->rx_urb[i]) 1934 if (serial->rx_urb[i]) {
1798 usb_kill_urb(serial->rx_urb[i]); 1935 usb_kill_urb(serial->rx_urb[i]);
1936 serial->rx_urb_filled[i] = 0;
1937 }
1799 } 1938 }
1939 serial->curr_rx_urb_idx = 0;
1940 serial->curr_rx_urb_offset = 0;
1800 1941
1801 if (serial->tx_urb) 1942 if (serial->tx_urb)
1802 usb_kill_urb(serial->tx_urb); 1943 usb_kill_urb(serial->tx_urb);
@@ -2211,14 +2352,14 @@ static struct hso_device *hso_create_bulk_serial_device(
2211 USB_DIR_IN); 2352 USB_DIR_IN);
2212 if (!serial->in_endp) { 2353 if (!serial->in_endp) {
2213 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2354 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2214 goto exit; 2355 goto exit2;
2215 } 2356 }
2216 2357
2217 if (! 2358 if (!
2218 (serial->out_endp = 2359 (serial->out_endp =
2219 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { 2360 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
2220 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2361 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2221 goto exit; 2362 goto exit2;
2222 } 2363 }
2223 2364
2224 serial->write_data = hso_std_serial_write_data; 2365 serial->write_data = hso_std_serial_write_data;
@@ -2231,9 +2372,10 @@ static struct hso_device *hso_create_bulk_serial_device(
2231 2372
2232 /* done, return it */ 2373 /* done, return it */
2233 return hso_dev; 2374 return hso_dev;
2375
2376exit2:
2377 hso_serial_common_free(serial);
2234exit: 2378exit:
2235 if (hso_dev && serial)
2236 hso_serial_common_free(serial);
2237 kfree(serial); 2379 kfree(serial);
2238 hso_free_device(hso_dev); 2380 hso_free_device(hso_dev);
2239 return NULL; 2381 return NULL;
@@ -2740,6 +2882,7 @@ static const struct tty_operations hso_serial_ops = {
2740 .chars_in_buffer = hso_serial_chars_in_buffer, 2882 .chars_in_buffer = hso_serial_chars_in_buffer,
2741 .tiocmget = hso_serial_tiocmget, 2883 .tiocmget = hso_serial_tiocmget,
2742 .tiocmset = hso_serial_tiocmset, 2884 .tiocmset = hso_serial_tiocmset,
2885 .unthrottle = hso_unthrottle
2743}; 2886};
2744 2887
2745static struct usb_driver hso_driver = { 2888static struct usb_driver hso_driver = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index ca9d00c1194e..b5143509e8be 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -118,7 +118,7 @@ static void mcs7830_async_cmd_callback(struct urb *urb)
118 118
119 if (urb->status < 0) 119 if (urb->status < 0)
120 printk(KERN_DEBUG "%s() failed with %d\n", 120 printk(KERN_DEBUG "%s() failed with %d\n",
121 __FUNCTION__, urb->status); 121 __func__, urb->status);
122 122
123 kfree(req); 123 kfree(req);
124 usb_free_urb(urb); 124 usb_free_urb(urb);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index a84ba487c713..38b90e7a7ed3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -117,9 +117,9 @@ static void ctrl_callback(struct urb *urb)
117 case -ENOENT: 117 case -ENOENT:
118 break; 118 break;
119 default: 119 default:
120 if (netif_msg_drv(pegasus)) 120 if (netif_msg_drv(pegasus) && printk_ratelimit())
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __FUNCTION__, urb->status); 122 __func__, urb->status);
123 } 123 }
124 pegasus->flags &= ~ETH_REGS_CHANGED; 124 pegasus->flags &= ~ETH_REGS_CHANGED;
125 wake_up(&pegasus->ctrl_wait); 125 wake_up(&pegasus->ctrl_wait);
@@ -136,7 +136,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
136 if (!buffer) { 136 if (!buffer) {
137 if (netif_msg_drv(pegasus)) 137 if (netif_msg_drv(pegasus))
138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
139 __FUNCTION__); 139 __func__);
140 return -ENOMEM; 140 return -ENOMEM;
141 } 141 }
142 add_wait_queue(&pegasus->ctrl_wait, &wait); 142 add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -166,7 +166,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
166 set_current_state(TASK_RUNNING); 166 set_current_state(TASK_RUNNING);
167 if (ret == -ENODEV) 167 if (ret == -ENODEV)
168 netif_device_detach(pegasus->net); 168 netif_device_detach(pegasus->net);
169 if (netif_msg_drv(pegasus)) 169 if (netif_msg_drv(pegasus) && printk_ratelimit())
170 dev_err(&pegasus->intf->dev, "%s, status %d\n", 170 dev_err(&pegasus->intf->dev, "%s, status %d\n",
171 __FUNCTION__, ret); 171 __FUNCTION__, ret);
172 goto out; 172 goto out;
@@ -224,7 +224,7 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
224 netif_device_detach(pegasus->net); 224 netif_device_detach(pegasus->net);
225 if (netif_msg_drv(pegasus)) 225 if (netif_msg_drv(pegasus))
226 dev_err(&pegasus->intf->dev, "%s, status %d\n", 226 dev_err(&pegasus->intf->dev, "%s, status %d\n",
227 __FUNCTION__, ret); 227 __func__, ret);
228 goto out; 228 goto out;
229 } 229 }
230 230
@@ -246,7 +246,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
246 if (!tmp) { 246 if (!tmp) {
247 if (netif_msg_drv(pegasus)) 247 if (netif_msg_drv(pegasus))
248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
249 __FUNCTION__); 249 __func__);
250 return -ENOMEM; 250 return -ENOMEM;
251 } 251 }
252 memcpy(tmp, &data, 1); 252 memcpy(tmp, &data, 1);
@@ -275,9 +275,9 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
275 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { 275 if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
276 if (ret == -ENODEV) 276 if (ret == -ENODEV)
277 netif_device_detach(pegasus->net); 277 netif_device_detach(pegasus->net);
278 if (netif_msg_drv(pegasus)) 278 if (netif_msg_drv(pegasus) && printk_ratelimit())
279 dev_err(&pegasus->intf->dev, "%s, status %d\n", 279 dev_err(&pegasus->intf->dev, "%s, status %d\n",
280 __FUNCTION__, ret); 280 __func__, ret);
281 goto out; 281 goto out;
282 } 282 }
283 283
@@ -310,7 +310,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
310 netif_device_detach(pegasus->net); 310 netif_device_detach(pegasus->net);
311 if (netif_msg_drv(pegasus)) 311 if (netif_msg_drv(pegasus))
312 dev_err(&pegasus->intf->dev, "%s, status %d\n", 312 dev_err(&pegasus->intf->dev, "%s, status %d\n",
313 __FUNCTION__, ret); 313 __func__, ret);
314 } 314 }
315 315
316 return ret; 316 return ret;
@@ -341,7 +341,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
341 } 341 }
342fail: 342fail:
343 if (netif_msg_drv(pegasus)) 343 if (netif_msg_drv(pegasus))
344 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 344 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
345 345
346 return ret; 346 return ret;
347} 347}
@@ -378,7 +378,7 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
378 378
379fail: 379fail:
380 if (netif_msg_drv(pegasus)) 380 if (netif_msg_drv(pegasus))
381 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 381 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
382 return -ETIMEDOUT; 382 return -ETIMEDOUT;
383} 383}
384 384
@@ -415,7 +415,7 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
415 415
416fail: 416fail:
417 if (netif_msg_drv(pegasus)) 417 if (netif_msg_drv(pegasus))
418 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 418 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
419 return -ETIMEDOUT; 419 return -ETIMEDOUT;
420} 420}
421 421
@@ -463,7 +463,7 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
463 return ret; 463 return ret;
464fail: 464fail:
465 if (netif_msg_drv(pegasus)) 465 if (netif_msg_drv(pegasus))
466 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 466 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
467 return -ETIMEDOUT; 467 return -ETIMEDOUT;
468} 468}
469#endif /* PEGASUS_WRITE_EEPROM */ 469#endif /* PEGASUS_WRITE_EEPROM */
@@ -1209,8 +1209,7 @@ static void pegasus_set_multicast(struct net_device *net)
1209 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS; 1209 pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
1210 if (netif_msg_link(pegasus)) 1210 if (netif_msg_link(pegasus))
1211 pr_info("%s: Promiscuous mode enabled.\n", net->name); 1211 pr_info("%s: Promiscuous mode enabled.\n", net->name);
1212 } else if (net->mc_count || 1212 } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
1213 (net->flags & IFF_ALLMULTI)) {
1214 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; 1213 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
1215 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1214 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1216 if (netif_msg_link(pegasus)) 1215 if (netif_msg_link(pegasus))
@@ -1220,6 +1219,8 @@ static void pegasus_set_multicast(struct net_device *net)
1220 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1219 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1221 } 1220 }
1222 1221
1222 pegasus->ctrl_urb->status = 0;
1223
1223 pegasus->flags |= ETH_REGS_CHANGE; 1224 pegasus->flags |= ETH_REGS_CHANGE;
1224 ctrl_callback(pegasus->ctrl_urb); 1225 ctrl_callback(pegasus->ctrl_urb);
1225} 1226}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8463efb9e0b1..02d25c743994 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -512,14 +512,13 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
512 int count = 0; 512 int count = 0;
513 513
514 spin_lock_irqsave (&q->lock, flags); 514 spin_lock_irqsave (&q->lock, flags);
515 for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) { 515 skb_queue_walk_safe(q, skb, skbnext) {
516 struct skb_data *entry; 516 struct skb_data *entry;
517 struct urb *urb; 517 struct urb *urb;
518 int retval; 518 int retval;
519 519
520 entry = (struct skb_data *) skb->cb; 520 entry = (struct skb_data *) skb->cb;
521 urb = entry->urb; 521 urb = entry->urb;
522 skbnext = skb->next;
523 522
524 // during some PM-driven resume scenarios, 523 // during some PM-driven resume scenarios,
525 // these (async) unlinks complete immediately 524 // these (async) unlinks complete immediately
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 96dff04334b8..5b7870080c56 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -914,7 +914,7 @@ static void alloc_rbufs(struct net_device *dev)
914 914
915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
916 for (i = 0; i < RX_RING_SIZE; i++) { 916 for (i = 0; i < RX_RING_SIZE; i++) {
917 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz); 917 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
918 rp->rx_skbuff[i] = skb; 918 rp->rx_skbuff[i] = skb;
919 if (skb == NULL) 919 if (skb == NULL)
920 break; 920 break;
@@ -1473,8 +1473,8 @@ static int rhine_rx(struct net_device *dev, int limit)
1473 /* Check if the packet is long enough to accept without 1473 /* Check if the packet is long enough to accept without
1474 copying to a minimally-sized skbuff. */ 1474 copying to a minimally-sized skbuff. */
1475 if (pkt_len < rx_copybreak && 1475 if (pkt_len < rx_copybreak &&
1476 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1476 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
1477 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1477 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
1478 pci_dma_sync_single_for_cpu(rp->pdev, 1478 pci_dma_sync_single_for_cpu(rp->pdev,
1479 rp->rx_skbuff_dma[entry], 1479 rp->rx_skbuff_dma[entry],
1480 rp->rx_buf_sz, 1480 rp->rx_buf_sz,
@@ -1518,7 +1518,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1518 struct sk_buff *skb; 1518 struct sk_buff *skb;
1519 entry = rp->dirty_rx % RX_RING_SIZE; 1519 entry = rp->dirty_rx % RX_RING_SIZE;
1520 if (rp->rx_skbuff[entry] == NULL) { 1520 if (rp->rx_skbuff[entry] == NULL) {
1521 skb = dev_alloc_skb(rp->rx_buf_sz); 1521 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1522 rp->rx_skbuff[entry] = skb; 1522 rp->rx_skbuff[entry] = skb;
1523 if (skb == NULL) 1523 if (skb == NULL)
1524 break; /* Better luck next round. */ 1524 break; /* Better luck next round. */
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 1b95b04c9257..29a33090d3d4 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1381,7 +1381,7 @@ enum velocity_msg_level {
1381#define ASSERT(x) { \ 1381#define ASSERT(x) { \
1382 if (!(x)) { \ 1382 if (!(x)) { \
1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ 1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
1384 __FUNCTION__, __LINE__);\ 1384 __func__, __LINE__);\
1385 BUG(); \ 1385 BUG(); \
1386 }\ 1386 }\
1387} 1387}
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index d14e6678deed..a5ddc6c8963e 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -407,7 +407,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
407 if (cfm->version != CFM_VERSION) { 407 if (cfm->version != CFM_VERSION) {
408 printk(KERN_ERR "%s:%s: firmware format %u rejected! " 408 printk(KERN_ERR "%s:%s: firmware format %u rejected! "
409 "Expecting %u.\n", 409 "Expecting %u.\n",
410 modname, __FUNCTION__, cfm->version, CFM_VERSION); 410 modname, __func__, cfm->version, CFM_VERSION);
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
413 413
@@ -420,7 +420,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
420*/ 420*/
421 if (cksum != cfm->checksum) { 421 if (cksum != cfm->checksum) {
422 printk(KERN_ERR "%s:%s: firmware corrupted!\n", 422 printk(KERN_ERR "%s:%s: firmware corrupted!\n",
423 modname, __FUNCTION__); 423 modname, __func__);
424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n", 424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
425 len - (int)sizeof(struct cycx_firmware) - 1, 425 len - (int)sizeof(struct cycx_firmware) - 1,
426 cfm->info.codesize); 426 cfm->info.codesize);
@@ -432,7 +432,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
432 /* If everything is ok, set reset, data and code pointers */ 432 /* If everything is ok, set reset, data and code pointers */
433 img_hdr = (struct cycx_fw_header *)&cfm->image; 433 img_hdr = (struct cycx_fw_header *)&cfm->image;
434#ifdef FIRMWARE_DEBUG 434#ifdef FIRMWARE_DEBUG
435 printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname); 435 printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname);
436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size); 436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size); 437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size); 438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index d3b28b01b9f9..5a7303dc0965 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -874,7 +874,7 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1); 874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
875 875
876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n", 876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
877 __FUNCTION__, lcn, loc, rem); 877 __func__, lcn, loc, rem);
878 878
879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem); 879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
880 if (!dev) { 880 if (!dev) {
@@ -902,7 +902,7 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key)); 903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n", 904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
905 card->devname, __FUNCTION__, lcn, key); 905 card->devname, __func__, lcn, key);
906 906
907 dev = cycx_x25_get_dev_by_lcn(wandev, -key); 907 dev = cycx_x25_get_dev_by_lcn(wandev, -key);
908 if (!dev) { 908 if (!dev) {
@@ -929,7 +929,7 @@ static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
929 929
930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n", 931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
932 card->devname, __FUNCTION__, lcn); 932 card->devname, __func__, lcn);
933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
934 if (!dev) { 934 if (!dev) {
935 /* Invalid channel, discard packet */ 935 /* Invalid channel, discard packet */
@@ -950,7 +950,7 @@ static void cycx_x25_irq_disconnect(struct cycx_device *card,
950 u8 lcn; 950 u8 lcn;
951 951
952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn); 953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
954 954
955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
956 if (dev) { 956 if (dev) {
@@ -1381,7 +1381,7 @@ static void cycx_x25_chan_timer(unsigned long d)
1381 cycx_x25_chan_disconnect(dev); 1381 cycx_x25_chan_disconnect(dev);
1382 else 1382 else
1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n", 1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
1384 chan->card->devname, __FUNCTION__, dev->name); 1384 chan->card->devname, __func__, dev->name);
1385} 1385}
1386 1386
1387/* Set logical channel state. */ 1387/* Set logical channel state. */
@@ -1485,7 +1485,7 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
1485 unsigned char *ptr; 1485 unsigned char *ptr;
1486 1486
1487 if ((skb = dev_alloc_skb(1)) == NULL) { 1487 if ((skb = dev_alloc_skb(1)) == NULL) {
1488 printk(KERN_ERR "%s: out of memory\n", __FUNCTION__); 1488 printk(KERN_ERR "%s: out of memory\n", __func__);
1489 return; 1489 return;
1490 } 1490 }
1491 1491
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index f5d55ad02267..5f1ccb2b08b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -647,7 +647,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
647 647
648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
649 if (!skb) { 649 if (!skb) {
650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); 650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
651 goto refill; 651 goto refill;
652 } 652 }
653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); 653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 8b7e5d2e2ac9..cbcbf6f0414c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -163,15 +163,17 @@ static void x25_close(struct net_device *dev)
163 163
164static int x25_rx(struct sk_buff *skb) 164static int x25_rx(struct sk_buff *skb)
165{ 165{
166 struct net_device *dev = skb->dev;
167
166 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 168 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
167 skb->dev->stats.rx_dropped++; 169 dev->stats.rx_dropped++;
168 return NET_RX_DROP; 170 return NET_RX_DROP;
169 } 171 }
170 172
171 if (lapb_data_received(skb->dev, skb) == LAPB_OK) 173 if (lapb_data_received(dev, skb) == LAPB_OK)
172 return NET_RX_SUCCESS; 174 return NET_RX_SUCCESS;
173 175
174 skb->dev->stats.rx_errors++; 176 dev->stats.rx_errors++;
175 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
176 return NET_RX_DROP; 178 return NET_RX_DROP;
177} 179}
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4518d0aa2480..4917a94943bd 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -548,7 +548,7 @@ static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
548{ 548{
549 st_cpc_tty_area *cpc_tty; 549 st_cpc_tty_area *cpc_tty;
550 550
551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear); 551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear);
552 552
553 if (!tty || !tty->driver_data ) { 553 if (!tty || !tty->driver_data ) {
554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); 554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 6596cd0742b9..f972fef87c98 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -856,7 +856,7 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
856 len = SBNI_MIN_LEN; 856 len = SBNI_MIN_LEN;
857 857
858 nl->tx_buf_p = skb; 858 nl->tx_buf_p = skb;
859 nl->tx_frameno = (len + nl->maxframe - 1) / nl->maxframe; 859 nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
860 nl->framelen = len < nl->maxframe ? len : nl->maxframe; 860 nl->framelen = len < nl->maxframe ? len : nl->maxframe;
861 861
862 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); 862 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 9931b5ab59cd..45bdf0b339bb 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -300,6 +300,19 @@ config LIBERTAS_DEBUG
300 ---help--- 300 ---help---
301 Debugging support. 301 Debugging support.
302 302
303config LIBERTAS_THINFIRM
304 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
305 depends on WLAN_80211 && MAC80211
306 select FW_LOADER
307 ---help---
308 A library for Marvell Libertas 8xxx devices using thinfirm.
309
310config LIBERTAS_THINFIRM_USB
311 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
312 depends on LIBERTAS_THINFIRM && USB
313 ---help---
314 A driver for Marvell Libertas 8388 USB devices using thinfirm.
315
303config AIRO 316config AIRO
304 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 317 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
305 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) 318 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
@@ -322,6 +335,9 @@ config HERMES
322 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 335 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
323 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 336 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
324 select WIRELESS_EXT 337 select WIRELESS_EXT
338 select FW_LOADER
339 select CRYPTO
340 select CRYPTO_MICHAEL_MIC
325 ---help--- 341 ---help---
326 A driver for 802.11b wireless cards based on the "Hermes" or 342 A driver for 802.11b wireless cards based on the "Hermes" or
327 Intersil HFA384x (Prism 2) MAC controller. This includes the vast 343 Intersil HFA384x (Prism 2) MAC controller. This includes the vast
@@ -411,7 +427,6 @@ config PCMCIA_HERMES
411config PCMCIA_SPECTRUM 427config PCMCIA_SPECTRUM
412 tristate "Symbol Spectrum24 Trilogy PCMCIA card support" 428 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
413 depends on PCMCIA && HERMES 429 depends on PCMCIA && HERMES
414 select FW_LOADER
415 ---help--- 430 ---help---
416 431
417 This is a driver for 802.11b cards using RAM-loadable Symbol 432 This is a driver for 802.11b cards using RAM-loadable Symbol
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 59aa89ec6e81..59d2d805f60b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_WAVELAN) += wavelan.o
16obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o 16obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
17obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o 17obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
18 18
19obj-$(CONFIG_HERMES) += orinoco.o hermes.o 19obj-$(CONFIG_HERMES) += orinoco.o hermes.o hermes_dld.o
20obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o 20obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
21obj-$(CONFIG_APPLE_AIRPORT) += airport.o 21obj-$(CONFIG_APPLE_AIRPORT) += airport.o
22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o 22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
@@ -48,6 +48,8 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
48obj-$(CONFIG_USB_ZD1201) += zd1201.o 48obj-$(CONFIG_USB_ZD1201) += zd1201.o
49obj-$(CONFIG_LIBERTAS) += libertas/ 49obj-$(CONFIG_LIBERTAS) += libertas/
50 50
51obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/
52
51rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o 53rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
52rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o 54rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
53 55
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3333d4596b8d..b2c050b68890 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -765,11 +765,11 @@ static void adm8211_update_mode(struct ieee80211_hw *dev)
765 765
766 priv->soft_rx_crc = 0; 766 priv->soft_rx_crc = 0;
767 switch (priv->mode) { 767 switch (priv->mode) {
768 case IEEE80211_IF_TYPE_STA: 768 case NL80211_IFTYPE_STATION:
769 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); 769 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
770 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; 770 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
771 break; 771 break;
772 case IEEE80211_IF_TYPE_IBSS: 772 case NL80211_IFTYPE_ADHOC:
773 priv->nar &= ~ADM8211_NAR_PR; 773 priv->nar &= ~ADM8211_NAR_PR;
774 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; 774 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
775 775
@@ -777,7 +777,7 @@ static void adm8211_update_mode(struct ieee80211_hw *dev)
777 if (priv->pdev->revision >= ADM8211_REV_BA) 777 if (priv->pdev->revision >= ADM8211_REV_BA)
778 priv->soft_rx_crc = 1; 778 priv->soft_rx_crc = 1;
779 break; 779 break;
780 case IEEE80211_IF_TYPE_MNTR: 780 case NL80211_IFTYPE_MONITOR:
781 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); 781 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
782 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; 782 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
783 break; 783 break;
@@ -1410,11 +1410,11 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1410 struct ieee80211_if_init_conf *conf) 1410 struct ieee80211_if_init_conf *conf)
1411{ 1411{
1412 struct adm8211_priv *priv = dev->priv; 1412 struct adm8211_priv *priv = dev->priv;
1413 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 1413 if (priv->mode != NL80211_IFTYPE_MONITOR)
1414 return -EOPNOTSUPP; 1414 return -EOPNOTSUPP;
1415 1415
1416 switch (conf->type) { 1416 switch (conf->type) {
1417 case IEEE80211_IF_TYPE_STA: 1417 case NL80211_IFTYPE_STATION:
1418 priv->mode = conf->type; 1418 priv->mode = conf->type;
1419 break; 1419 break;
1420 default: 1420 default:
@@ -1437,7 +1437,7 @@ static void adm8211_remove_interface(struct ieee80211_hw *dev,
1437 struct ieee80211_if_init_conf *conf) 1437 struct ieee80211_if_init_conf *conf)
1438{ 1438{
1439 struct adm8211_priv *priv = dev->priv; 1439 struct adm8211_priv *priv = dev->priv;
1440 priv->mode = IEEE80211_IF_TYPE_MNTR; 1440 priv->mode = NL80211_IFTYPE_MONITOR;
1441} 1441}
1442 1442
1443static int adm8211_init_rings(struct ieee80211_hw *dev) 1443static int adm8211_init_rings(struct ieee80211_hw *dev)
@@ -1556,7 +1556,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1556 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | 1556 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
1557 ADM8211_IER_RCIE | ADM8211_IER_TCIE | 1557 ADM8211_IER_RCIE | ADM8211_IER_TCIE |
1558 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); 1558 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
1559 priv->mode = IEEE80211_IF_TYPE_MNTR; 1559 priv->mode = NL80211_IFTYPE_MONITOR;
1560 adm8211_update_mode(dev); 1560 adm8211_update_mode(dev);
1561 ADM8211_CSR_WRITE(RDR, 0); 1561 ADM8211_CSR_WRITE(RDR, 0);
1562 1562
@@ -1571,7 +1571,7 @@ static void adm8211_stop(struct ieee80211_hw *dev)
1571{ 1571{
1572 struct adm8211_priv *priv = dev->priv; 1572 struct adm8211_priv *priv = dev->priv;
1573 1573
1574 priv->mode = IEEE80211_IF_TYPE_INVALID; 1574 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1575 priv->nar = 0; 1575 priv->nar = 0;
1576 ADM8211_CSR_WRITE(NAR, 0); 1576 ADM8211_CSR_WRITE(NAR, 0);
1577 ADM8211_CSR_WRITE(IER, 0); 1577 ADM8211_CSR_WRITE(IER, 0);
@@ -1884,6 +1884,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1884 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); 1884 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1885 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ 1885 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1886 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; 1886 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
1887 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1887 1888
1888 dev->channel_change_time = 1000; 1889 dev->channel_change_time = 1000;
1889 dev->max_signal = 100; /* FIXME: find better value */ 1890 dev->max_signal = 100; /* FIXME: find better value */
@@ -1895,7 +1896,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1895 priv->tx_power = 0x40; 1896 priv->tx_power = 0x40;
1896 priv->lpf_cutoff = 0xFF; 1897 priv->lpf_cutoff = 0xFF;
1897 priv->lnags_threshold = 0xFF; 1898 priv->lnags_threshold = 0xFF;
1898 priv->mode = IEEE80211_IF_TYPE_INVALID; 1899 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1899 1900
1900 /* Power-on issue. EEPROM won't read correctly without */ 1901 /* Power-on issue. EEPROM won't read correctly without */
1901 if (pdev->revision >= ADM8211_REV_BA) { 1902 if (pdev->revision >= ADM8211_REV_BA) {
@@ -1985,7 +1986,7 @@ static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
1985 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 1986 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1986 struct adm8211_priv *priv = dev->priv; 1987 struct adm8211_priv *priv = dev->priv;
1987 1988
1988 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 1989 if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
1989 ieee80211_stop_queues(dev); 1990 ieee80211_stop_queues(dev);
1990 adm8211_stop(dev); 1991 adm8211_stop(dev);
1991 } 1992 }
@@ -2003,7 +2004,7 @@ static int adm8211_resume(struct pci_dev *pdev)
2003 pci_set_power_state(pdev, PCI_D0); 2004 pci_set_power_state(pdev, PCI_D0);
2004 pci_restore_state(pdev); 2005 pci_restore_state(pdev);
2005 2006
2006 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 2007 if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
2007 adm8211_start(dev); 2008 adm8211_start(dev);
2008 ieee80211_wake_queues(dev); 2009 ieee80211_wake_queues(dev);
2009 } 2010 }
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index b5cd850a4a59..ae58a12befd3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1915,7 +1915,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1915 struct airo_info *ai = dev->priv; 1915 struct airo_info *ai = dev->priv;
1916 1916
1917 if (!skb) { 1917 if (!skb) {
1918 airo_print_err(dev->name, "%s: skb == NULL!",__FUNCTION__); 1918 airo_print_err(dev->name, "%s: skb == NULL!",__func__);
1919 return 0; 1919 return 0;
1920 } 1920 }
1921 npacks = skb_queue_len (&ai->txq); 1921 npacks = skb_queue_len (&ai->txq);
@@ -1964,7 +1964,7 @@ static int mpi_send_packet (struct net_device *dev)
1964 if ((skb = skb_dequeue(&ai->txq)) == NULL) { 1964 if ((skb = skb_dequeue(&ai->txq)) == NULL) {
1965 airo_print_err(dev->name, 1965 airo_print_err(dev->name,
1966 "%s: Dequeue'd zero in send_packet()", 1966 "%s: Dequeue'd zero in send_packet()",
1967 __FUNCTION__); 1967 __func__);
1968 return 0; 1968 return 0;
1969 } 1969 }
1970 1970
@@ -2115,7 +2115,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2115 u32 *fids = priv->fids; 2115 u32 *fids = priv->fids;
2116 2116
2117 if ( skb == NULL ) { 2117 if ( skb == NULL ) {
2118 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); 2118 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
2119 return 0; 2119 return 0;
2120 } 2120 }
2121 2121
@@ -2186,7 +2186,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2186 } 2186 }
2187 2187
2188 if ( skb == NULL ) { 2188 if ( skb == NULL ) {
2189 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); 2189 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
2190 return 0; 2190 return 0;
2191 } 2191 }
2192 2192
@@ -4127,7 +4127,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4127 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) 4127 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
4128 airo_print_err(ai->dev->name, 4128 airo_print_err(ai->dev->name,
4129 "%s: MAC should be disabled (rid=%04x)", 4129 "%s: MAC should be disabled (rid=%04x)",
4130 __FUNCTION__, rid); 4130 __func__, rid);
4131 memset(&cmd, 0, sizeof(cmd)); 4131 memset(&cmd, 0, sizeof(cmd));
4132 memset(&rsp, 0, sizeof(rsp)); 4132 memset(&rsp, 0, sizeof(rsp));
4133 4133
@@ -4142,7 +4142,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4142 &ai->config_desc.rid_desc, sizeof(Rid)); 4142 &ai->config_desc.rid_desc, sizeof(Rid));
4143 4143
4144 if (len < 4 || len > 2047) { 4144 if (len < 4 || len > 2047) {
4145 airo_print_err(ai->dev->name, "%s: len=%d", __FUNCTION__, len); 4145 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
4146 rc = -1; 4146 rc = -1;
4147 } else { 4147 } else {
4148 memcpy((char *)ai->config_desc.virtual_host_addr, 4148 memcpy((char *)ai->config_desc.virtual_host_addr,
@@ -4151,9 +4151,9 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4151 rc = issuecommand(ai, &cmd, &rsp); 4151 rc = issuecommand(ai, &cmd, &rsp);
4152 if ((rc & 0xff00) != 0) { 4152 if ((rc & 0xff00) != 0) {
4153 airo_print_err(ai->dev->name, "%s: Write rid Error %d", 4153 airo_print_err(ai->dev->name, "%s: Write rid Error %d",
4154 __FUNCTION__, rc); 4154 __func__, rc);
4155 airo_print_err(ai->dev->name, "%s: Cmd=%04x", 4155 airo_print_err(ai->dev->name, "%s: Cmd=%04x",
4156 __FUNCTION__, cmd.cmd); 4156 __func__, cmd.cmd);
4157 } 4157 }
4158 4158
4159 if ((rsp.status & 0x7f00)) 4159 if ((rsp.status & 0x7f00))
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 6f7eb9f59223..ce03a2e865fa 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -180,7 +180,8 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
180 } 180 }
181 181
182 /* Allocate space for private device-specific data */ 182 /* Allocate space for private device-specific data */
183 dev = alloc_orinocodev(sizeof(*card), airport_hard_reset); 183 dev = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
184 airport_hard_reset, NULL);
184 if (! dev) { 185 if (! dev) {
185 printk(KERN_ERR PFX "Cannot allocate network device\n"); 186 printk(KERN_ERR PFX "Cannot allocate network device\n");
186 return -ENODEV; 187 return -ENODEV;
diff --git a/drivers/net/wireless/ath5k/Makefile b/drivers/net/wireless/ath5k/Makefile
index 564ecd0c5d4b..719cfaef7085 100644
--- a/drivers/net/wireless/ath5k/Makefile
+++ b/drivers/net/wireless/ath5k/Makefile
@@ -1,6 +1,14 @@
1ath5k-y += base.o 1ath5k-y += caps.o
2ath5k-y += hw.o
3ath5k-y += initvals.o 2ath5k-y += initvals.o
3ath5k-y += eeprom.o
4ath5k-y += gpio.o
5ath5k-y += desc.o
6ath5k-y += dma.o
7ath5k-y += qcu.o
8ath5k-y += pcu.o
4ath5k-y += phy.o 9ath5k-y += phy.o
10ath5k-y += reset.o
11ath5k-y += attach.o
12ath5k-y += base.o
5ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 13ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
6obj-$(CONFIG_ATH5K) += ath5k.o 14obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 9102eea3c8bf..20018869051d 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -18,18 +18,23 @@
18#ifndef _ATH5K_H 18#ifndef _ATH5K_H
19#define _ATH5K_H 19#define _ATH5K_H
20 20
21/* Set this to 1 to disable regulatory domain restrictions for channel tests. 21/* TODO: Clean up channel debuging -doesn't work anyway- and start
22 * WARNING: This is for debuging only and has side effects (eg. scan takes too 22 * working on reg. control code using all available eeprom information
23 * long and results timeouts). It's also illegal to tune to some of the 23 * -rev. engineering needed- */
24 * supported frequencies in some countries, so use this at your own risk,
25 * you've been warned. */
26#define CHAN_DEBUG 0 24#define CHAN_DEBUG 0
27 25
28#include <linux/io.h> 26#include <linux/io.h>
29#include <linux/types.h> 27#include <linux/types.h>
30#include <net/mac80211.h> 28#include <net/mac80211.h>
31 29
32#include "hw.h" 30/* RX/TX descriptor hw structs
31 * TODO: Driver part should only see sw structs */
32#include "desc.h"
33
34/* EEPROM structs/offsets
35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
36 * and clean up common bits, then introduce set/get functions in eeprom.c */
37#include "eeprom.h"
33 38
34/* PCI IDs */ 39/* PCI IDs */
35#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ 40#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
@@ -87,7 +92,92 @@
87 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) 92 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
88 93
89/* 94/*
95 * AR5K REGISTER ACCESS
96 */
97
98/* Some macros to read/write fields */
99
100/* First shift, then mask */
101#define AR5K_REG_SM(_val, _flags) \
102 (((_val) << _flags##_S) & (_flags))
103
104/* First mask, then shift */
105#define AR5K_REG_MS(_val, _flags) \
106 (((_val) & (_flags)) >> _flags##_S)
107
108/* Some registers can hold multiple values of interest. For this
109 * reason when we want to write to these registers we must first
110 * retrieve the values which we do not want to clear (lets call this
111 * old_data) and then set the register with this and our new_value:
112 * ( old_data | new_value) */
113#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \
114 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
115 (((_val) << _flags##_S) & (_flags)), _reg)
116
117#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \
118 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \
119 (_mask)) | (_flags), _reg)
120
121#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \
122 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
123
124#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
125 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
126
127/* Access to PHY registers */
128#define AR5K_PHY_READ(ah, _reg) \
129 ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
130
131#define AR5K_PHY_WRITE(ah, _reg, _val) \
132 ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
133
134/* Access QCU registers per queue */
135#define AR5K_REG_READ_Q(ah, _reg, _queue) \
136 (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
137
138#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \
139 ath5k_hw_reg_write(ah, (1 << _queue), _reg)
140
141#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \
142 _reg |= 1 << _queue; \
143} while (0)
144
145#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \
146 _reg &= ~(1 << _queue); \
147} while (0)
148
149/* Used while writing initvals */
150#define AR5K_REG_WAIT(_i) do { \
151 if (_i % 64) \
152 udelay(1); \
153} while (0)
154
155/* Register dumps are done per operation mode */
156#define AR5K_INI_RFGAIN_5GHZ 0
157#define AR5K_INI_RFGAIN_2GHZ 1
158
159/* TODO: Clean this up */
160#define AR5K_INI_VAL_11A 0
161#define AR5K_INI_VAL_11A_TURBO 1
162#define AR5K_INI_VAL_11B 2
163#define AR5K_INI_VAL_11G 3
164#define AR5K_INI_VAL_11G_TURBO 4
165#define AR5K_INI_VAL_XR 0
166#define AR5K_INI_VAL_MAX 5
167
168#define AR5K_RF5111_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
169#define AR5K_RF5112_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
170
171/* Used for BSSID etc manipulation */
172#define AR5K_LOW_ID(_a)( \
173(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
174)
175
176#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
177
178/*
90 * Some tuneable values (these should be changeable by the user) 179 * Some tuneable values (these should be changeable by the user)
180 * TODO: Make use of them and add more options OR use debug/configfs
91 */ 181 */
92#define AR5K_TUNE_DMA_BEACON_RESP 2 182#define AR5K_TUNE_DMA_BEACON_RESP 2
93#define AR5K_TUNE_SW_BEACON_RESP 10 183#define AR5K_TUNE_SW_BEACON_RESP 10
@@ -98,13 +188,13 @@
98#define AR5K_TUNE_REGISTER_TIMEOUT 20000 188#define AR5K_TUNE_REGISTER_TIMEOUT 20000
99/* Register for RSSI threshold has a mask of 0xff, so 255 seems to 189/* Register for RSSI threshold has a mask of 0xff, so 255 seems to
100 * be the max value. */ 190 * be the max value. */
101#define AR5K_TUNE_RSSI_THRES 129 191#define AR5K_TUNE_RSSI_THRES 129
102/* This must be set when setting the RSSI threshold otherwise it can 192/* This must be set when setting the RSSI threshold otherwise it can
103 * prevent a reset. If AR5K_RSSI_THR is read after writing to it 193 * prevent a reset. If AR5K_RSSI_THR is read after writing to it
104 * the BMISS_THRES will be seen as 0, seems harware doesn't keep 194 * the BMISS_THRES will be seen as 0, seems harware doesn't keep
105 * track of it. Max value depends on harware. For AR5210 this is just 7. 195 * track of it. Max value depends on harware. For AR5210 this is just 7.
106 * For AR5211+ this seems to be up to 255. */ 196 * For AR5211+ this seems to be up to 255. */
107#define AR5K_TUNE_BMISS_THRES 7 197#define AR5K_TUNE_BMISS_THRES 7
108#define AR5K_TUNE_REGISTER_DWELL_TIME 20000 198#define AR5K_TUNE_REGISTER_DWELL_TIME 20000
109#define AR5K_TUNE_BEACON_INTERVAL 100 199#define AR5K_TUNE_BEACON_INTERVAL 100
110#define AR5K_TUNE_AIFS 2 200#define AR5K_TUNE_AIFS 2
@@ -123,6 +213,55 @@
123#define AR5K_TUNE_ANT_DIVERSITY true 213#define AR5K_TUNE_ANT_DIVERSITY true
124#define AR5K_TUNE_HWTXTRIES 4 214#define AR5K_TUNE_HWTXTRIES 4
125 215
216#define AR5K_INIT_CARR_SENSE_EN 1
217
218/*Swap RX/TX Descriptor for big endian archs*/
219#if defined(__BIG_ENDIAN)
220#define AR5K_INIT_CFG ( \
221 AR5K_CFG_SWTD | AR5K_CFG_SWRD \
222)
223#else
224#define AR5K_INIT_CFG 0x00000000
225#endif
226
227/* Initial values */
228#define AR5K_INIT_TX_LATENCY 502
229#define AR5K_INIT_USEC 39
230#define AR5K_INIT_USEC_TURBO 79
231#define AR5K_INIT_USEC_32 31
232#define AR5K_INIT_SLOT_TIME 396
233#define AR5K_INIT_SLOT_TIME_TURBO 480
234#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
235#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
236#define AR5K_INIT_PROG_IFS 920
237#define AR5K_INIT_PROG_IFS_TURBO 960
238#define AR5K_INIT_EIFS 3440
239#define AR5K_INIT_EIFS_TURBO 6880
240#define AR5K_INIT_SIFS 560
241#define AR5K_INIT_SIFS_TURBO 480
242#define AR5K_INIT_SH_RETRY 10
243#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
244#define AR5K_INIT_SSH_RETRY 32
245#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
246#define AR5K_INIT_TX_RETRY 10
247
248#define AR5K_INIT_TRANSMIT_LATENCY ( \
249 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
250 (AR5K_INIT_USEC) \
251)
252#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \
253 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
254 (AR5K_INIT_USEC_TURBO) \
255)
256#define AR5K_INIT_PROTO_TIME_CNTRL ( \
257 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \
258 (AR5K_INIT_PROG_IFS) \
259)
260#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \
261 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
262 (AR5K_INIT_PROG_IFS_TURBO) \
263)
264
126/* token to use for aifs, cwmin, cwmax in MadWiFi */ 265/* token to use for aifs, cwmin, cwmax in MadWiFi */
127#define AR5K_TXQ_USEDEFAULT ((u32) -1) 266#define AR5K_TXQ_USEDEFAULT ((u32) -1)
128 267
@@ -196,7 +335,6 @@ struct ath5k_srev_name {
196#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */ 335#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */
197 336
198/* IEEE defs */ 337/* IEEE defs */
199
200#define IEEE80211_MAX_LEN 2500 338#define IEEE80211_MAX_LEN 2500
201 339
202/* TODO add support to mac80211 for vendor-specific rates and modes */ 340/* TODO add support to mac80211 for vendor-specific rates and modes */
@@ -268,21 +406,13 @@ enum ath5k_driver_mode {
268 AR5K_MODE_MAX = 5 406 AR5K_MODE_MAX = 5
269}; 407};
270 408
271/* adding this flag to rate_code enables short preamble, see ar5212_reg.h */
272#define AR5K_SET_SHORT_PREAMBLE 0x04
273
274#define HAS_SHPREAMBLE(_ix) \
275 (rt->rates[_ix].modulation == IEEE80211_RATE_SHORT_PREAMBLE)
276#define SHPREAMBLE_FLAG(_ix) \
277 (HAS_SHPREAMBLE(_ix) ? AR5K_SET_SHORT_PREAMBLE : 0)
278
279 409
280/****************\ 410/****************\
281 TX DEFINITIONS 411 TX DEFINITIONS
282\****************/ 412\****************/
283 413
284/* 414/*
285 * TX Status 415 * TX Status descriptor
286 */ 416 */
287struct ath5k_tx_status { 417struct ath5k_tx_status {
288 u16 ts_seqnum; 418 u16 ts_seqnum;
@@ -354,7 +484,6 @@ enum ath5k_tx_queue_id {
354 AR5K_TX_QUEUE_ID_XR_DATA = 9, 484 AR5K_TX_QUEUE_ID_XR_DATA = 9,
355}; 485};
356 486
357
358/* 487/*
359 * Flags to set hw queue's parameters... 488 * Flags to set hw queue's parameters...
360 */ 489 */
@@ -387,7 +516,8 @@ struct ath5k_txq_info {
387 516
388/* 517/*
389 * Transmit packet types. 518 * Transmit packet types.
390 * These are not fully used inside OpenHAL yet 519 * used on tx control descriptor
520 * TODO: Use them inside base.c corectly
391 */ 521 */
392enum ath5k_pkt_type { 522enum ath5k_pkt_type {
393 AR5K_PKT_TYPE_NORMAL = 0, 523 AR5K_PKT_TYPE_NORMAL = 0,
@@ -430,7 +560,7 @@ enum ath5k_dmasize {
430\****************/ 560\****************/
431 561
432/* 562/*
433 * RX Status 563 * RX Status descriptor
434 */ 564 */
435struct ath5k_rx_status { 565struct ath5k_rx_status {
436 u16 rs_datalen; 566 u16 rs_datalen;
@@ -494,34 +624,59 @@ struct ath5k_beacon_state {
494#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) 624#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
495 625
496 626
627/*******************************\
628 GAIN OPTIMIZATION DEFINITIONS
629\*******************************/
630
631enum ath5k_rfgain {
632 AR5K_RFGAIN_INACTIVE = 0,
633 AR5K_RFGAIN_READ_REQUESTED,
634 AR5K_RFGAIN_NEED_CHANGE,
635};
636
637#define AR5K_GAIN_CRN_FIX_BITS_5111 4
638#define AR5K_GAIN_CRN_FIX_BITS_5112 7
639#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
640#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
641#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
642#define AR5K_GAIN_CCK_PROBE_CORR 5
643#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
644#define AR5K_GAIN_STEP_COUNT 10
645#define AR5K_GAIN_PARAM_TX_CLIP 0
646#define AR5K_GAIN_PARAM_PD_90 1
647#define AR5K_GAIN_PARAM_PD_84 2
648#define AR5K_GAIN_PARAM_GAIN_SEL 3
649#define AR5K_GAIN_PARAM_MIX_ORN 0
650#define AR5K_GAIN_PARAM_PD_138 1
651#define AR5K_GAIN_PARAM_PD_137 2
652#define AR5K_GAIN_PARAM_PD_136 3
653#define AR5K_GAIN_PARAM_PD_132 4
654#define AR5K_GAIN_PARAM_PD_131 5
655#define AR5K_GAIN_PARAM_PD_130 6
656#define AR5K_GAIN_CHECK_ADJUST(_g) \
657 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
658
659struct ath5k_gain_opt_step {
660 s16 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
661 s32 gos_gain;
662};
663
664struct ath5k_gain {
665 u32 g_step_idx;
666 u32 g_current;
667 u32 g_target;
668 u32 g_low;
669 u32 g_high;
670 u32 g_f_corr;
671 u32 g_active;
672 const struct ath5k_gain_opt_step *g_step;
673};
674
675
497/********************\ 676/********************\
498 COMMON DEFINITIONS 677 COMMON DEFINITIONS
499\********************/ 678\********************/
500 679
501/*
502 * Atheros hardware descriptor
503 * This is read and written to by the hardware
504 */
505struct ath5k_desc {
506 u32 ds_link; /* physical address of the next descriptor */
507 u32 ds_data; /* physical address of data buffer (skb) */
508
509 union {
510 struct ath5k_hw_5210_tx_desc ds_tx5210;
511 struct ath5k_hw_5212_tx_desc ds_tx5212;
512 struct ath5k_hw_all_rx_desc ds_rx;
513 } ud;
514} __packed;
515
516#define AR5K_RXDESC_INTREQ 0x0020
517
518#define AR5K_TXDESC_CLRDMASK 0x0001
519#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
520#define AR5K_TXDESC_RTSENA 0x0004
521#define AR5K_TXDESC_CTSENA 0x0008
522#define AR5K_TXDESC_INTREQ 0x0010
523#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
524
525#define AR5K_SLOT_TIME_9 396 680#define AR5K_SLOT_TIME_9 396
526#define AR5K_SLOT_TIME_20 880 681#define AR5K_SLOT_TIME_20 880
527#define AR5K_SLOT_TIME_MAX 0xffff 682#define AR5K_SLOT_TIME_MAX 0xffff
@@ -553,167 +708,79 @@ struct ath5k_desc {
553#define CHANNEL_MODES CHANNEL_ALL 708#define CHANNEL_MODES CHANNEL_ALL
554 709
555/* 710/*
556 * Used internaly in OpenHAL (ar5211.c/ar5212.c 711 * Used internaly for reset_tx_queue).
557 * for reset_tx_queue). Also see struct struct ieee80211_channel. 712 * Also see struct struct ieee80211_channel.
558 */ 713 */
559#define IS_CHAN_XR(_c) ((_c.hw_value & CHANNEL_XR) != 0) 714#define IS_CHAN_XR(_c) ((_c.hw_value & CHANNEL_XR) != 0)
560#define IS_CHAN_B(_c) ((_c.hw_value & CHANNEL_B) != 0) 715#define IS_CHAN_B(_c) ((_c.hw_value & CHANNEL_B) != 0)
561 716
562/* 717/*
563 * The following structure will be used to map 2GHz channels to 718 * The following structure is used to map 2GHz channels to
564 * 5GHz Atheros channels. 719 * 5GHz Atheros channels.
720 * TODO: Clean up
565 */ 721 */
566struct ath5k_athchan_2ghz { 722struct ath5k_athchan_2ghz {
567 u32 a2_flags; 723 u32 a2_flags;
568 u16 a2_athchan; 724 u16 a2_athchan;
569}; 725};
570 726
571/*
572 * Rate definitions
573 * TODO: Clean them up or move them on mac80211 -most of these infos are
574 * used by the rate control algorytm on MadWiFi.
575 */
576 727
577/* Max number of rates on the rate table and what it seems 728/******************\
578 * Atheros hardware supports */ 729 RATE DEFINITIONS
579#define AR5K_MAX_RATES 32 730\******************/
580 731
581/** 732/**
582 * struct ath5k_rate - rate structure 733 * Seems the ar5xxx harware supports up to 32 rates, indexed by 1-32.
583 * @valid: is this a valid rate for rate control (remove)
584 * @modulation: respective mac80211 modulation
585 * @rate_kbps: rate in kbit/s
586 * @rate_code: hardware rate value, used in &struct ath5k_desc, on RX on
587 * &struct ath5k_rx_status.rs_rate and on TX on
588 * &struct ath5k_tx_status.ts_rate. Seems the ar5xxx harware supports
589 * up to 32 rates, indexed by 1-32. This means we really only need
590 * 6 bits for the rate_code.
591 * @dot11_rate: respective IEEE-802.11 rate value
592 * @control_rate: index of rate assumed to be used to send control frames.
593 * This can be used to set override the value on the rate duration
594 * registers. This is only useful if we can override in the harware at
595 * what rate we want to send control frames at. Note that IEEE-802.11
596 * Ch. 9.6 (after IEEE 802.11g changes) defines the rate at which we
597 * should send ACK/CTS, if we change this value we can be breaking
598 * the spec.
599 * 734 *
600 * This structure is used to get the RX rate or set the TX rate on the 735 * The rate code is used to get the RX rate or set the TX rate on the
601 * hardware descriptors. It is also used for internal modulation control 736 * hardware descriptors. It is also used for internal modulation control
602 * and settings. 737 * and settings.
603 * 738 *
604 * On RX after the &struct ath5k_desc is parsed by the appropriate 739 * This is the hardware rate map we are aware of:
605 * ah_proc_rx_desc() the respective hardware rate value is set in
606 * &struct ath5k_rx_status.rs_rate. On TX the desired rate is set in
607 * &struct ath5k_tx_status.ts_rate which is later used to setup the
608 * &struct ath5k_desc correctly. This is the hardware rate map we are
609 * aware of:
610 * 740 *
611 * rate_code 1 2 3 4 5 6 7 8 741 * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
612 * rate_kbps 3000 1000 ? ? ? 2000 500 48000 742 * rate_kbps 3000 1000 ? ? ? 2000 500 48000
613 * 743 *
614 * rate_code 9 10 11 12 13 14 15 16 744 * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
615 * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ? 745 * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
616 * 746 *
617 * rate_code 17 18 19 20 21 22 23 24 747 * rate_code 17 18 19 20 21 22 23 24
618 * rate_kbps ? ? ? ? ? ? ? 11000 748 * rate_kbps ? ? ? ? ? ? ? 11000
619 * 749 *
620 * rate_code 25 26 27 28 29 30 31 32 750 * rate_code 25 26 27 28 29 30 31 32
621 * rate_kbps 5500 2000 1000 ? ? ? ? ? 751 * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
622 * 752 *
753 * "S" indicates CCK rates with short preamble.
754 *
755 * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
756 * lowest 4 bits, so they are the same as below with a 0xF mask.
757 * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
758 * We handle this in ath5k_setup_bands().
623 */ 759 */
624struct ath5k_rate { 760#define AR5K_MAX_RATES 32
625 u8 valid;
626 u32 modulation;
627 u16 rate_kbps;
628 u8 rate_code;
629 u8 dot11_rate;
630 u8 control_rate;
631};
632
633/* XXX: GRR all this stuff to get leds blinking ??? (check out setcurmode) */
634struct ath5k_rate_table {
635 u16 rate_count;
636 u8 rate_code_to_index[AR5K_MAX_RATES]; /* Back-mapping */
637 struct ath5k_rate rates[AR5K_MAX_RATES];
638};
639
640/*
641 * Rate tables...
642 * TODO: CLEAN THIS !!!
643 */
644#define AR5K_RATES_11A { 8, { \
645 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \
646 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \
647 255, 255, 255, 255, 255, 255, 255, 255 }, { \
648 { 1, 0, 6000, 11, 140, 0 }, \
649 { 1, 0, 9000, 15, 18, 0 }, \
650 { 1, 0, 12000, 10, 152, 2 }, \
651 { 1, 0, 18000, 14, 36, 2 }, \
652 { 1, 0, 24000, 9, 176, 4 }, \
653 { 1, 0, 36000, 13, 72, 4 }, \
654 { 1, 0, 48000, 8, 96, 4 }, \
655 { 1, 0, 54000, 12, 108, 4 } } \
656}
657
658#define AR5K_RATES_11B { 4, { \
659 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
660 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
661 3, 2, 1, 0, 255, 255, 255, 255 }, { \
662 { 1, 0, 1000, 27, 130, 0 }, \
663 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 132, 1 }, \
664 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 139, 1 }, \
665 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 150, 1 } } \
666}
667
668#define AR5K_RATES_11G { 12, { \
669 255, 255, 255, 255, 255, 255, 255, 255, 10, 8, 6, 4, \
670 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \
671 3, 2, 1, 0, 255, 255, 255, 255 }, { \
672 { 1, 0, 1000, 27, 2, 0 }, \
673 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 4, 1 }, \
674 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 11, 1 }, \
675 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 22, 1 }, \
676 { 0, 0, 6000, 11, 12, 4 }, \
677 { 0, 0, 9000, 15, 18, 4 }, \
678 { 1, 0, 12000, 10, 24, 6 }, \
679 { 1, 0, 18000, 14, 36, 6 }, \
680 { 1, 0, 24000, 9, 48, 8 }, \
681 { 1, 0, 36000, 13, 72, 8 }, \
682 { 1, 0, 48000, 8, 96, 8 }, \
683 { 1, 0, 54000, 12, 108, 8 } } \
684}
685
686#define AR5K_RATES_TURBO { 8, { \
687 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \
688 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \
689 255, 255, 255, 255, 255, 255, 255, 255 }, { \
690 { 1, MODULATION_TURBO, 6000, 11, 140, 0 }, \
691 { 1, MODULATION_TURBO, 9000, 15, 18, 0 }, \
692 { 1, MODULATION_TURBO, 12000, 10, 152, 2 }, \
693 { 1, MODULATION_TURBO, 18000, 14, 36, 2 }, \
694 { 1, MODULATION_TURBO, 24000, 9, 176, 4 }, \
695 { 1, MODULATION_TURBO, 36000, 13, 72, 4 }, \
696 { 1, MODULATION_TURBO, 48000, 8, 96, 4 }, \
697 { 1, MODULATION_TURBO, 54000, 12, 108, 4 } } \
698}
699 761
700#define AR5K_RATES_XR { 12, { \ 762/* B */
701 255, 3, 1, 255, 255, 255, 2, 0, 10, 8, 6, 4, \ 763#define ATH5K_RATE_CODE_1M 0x1B
702 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \ 764#define ATH5K_RATE_CODE_2M 0x1A
703 255, 255, 255, 255, 255, 255, 255, 255 }, { \ 765#define ATH5K_RATE_CODE_5_5M 0x19
704 { 1, MODULATION_XR, 500, 7, 129, 0 }, \ 766#define ATH5K_RATE_CODE_11M 0x18
705 { 1, MODULATION_XR, 1000, 2, 139, 1 }, \ 767/* A and G */
706 { 1, MODULATION_XR, 2000, 6, 150, 2 }, \ 768#define ATH5K_RATE_CODE_6M 0x0B
707 { 1, MODULATION_XR, 3000, 1, 150, 3 }, \ 769#define ATH5K_RATE_CODE_9M 0x0F
708 { 1, 0, 6000, 11, 140, 4 }, \ 770#define ATH5K_RATE_CODE_12M 0x0A
709 { 1, 0, 9000, 15, 18, 4 }, \ 771#define ATH5K_RATE_CODE_18M 0x0E
710 { 1, 0, 12000, 10, 152, 6 }, \ 772#define ATH5K_RATE_CODE_24M 0x09
711 { 1, 0, 18000, 14, 36, 6 }, \ 773#define ATH5K_RATE_CODE_36M 0x0D
712 { 1, 0, 24000, 9, 176, 8 }, \ 774#define ATH5K_RATE_CODE_48M 0x08
713 { 1, 0, 36000, 13, 72, 8 }, \ 775#define ATH5K_RATE_CODE_54M 0x0C
714 { 1, 0, 48000, 8, 96, 8 }, \ 776/* XR */
715 { 1, 0, 54000, 12, 108, 8 } } \ 777#define ATH5K_RATE_CODE_XR_500K 0x07
716} 778#define ATH5K_RATE_CODE_XR_1M 0x02
779#define ATH5K_RATE_CODE_XR_2M 0x06
780#define ATH5K_RATE_CODE_XR_3M 0x01
781
782/* adding this flag to rate_code enables short preamble */
783#define AR5K_SET_SHORT_PREAMBLE 0x04
717 784
718/* 785/*
719 * Crypto definitions 786 * Crypto definitions
@@ -735,7 +802,6 @@ struct ath5k_rate_table {
735 return (false); \ 802 return (false); \
736} while (0) 803} while (0)
737 804
738
739enum ath5k_ant_setting { 805enum ath5k_ant_setting {
740 AR5K_ANT_VARIABLE = 0, /* variable by programming */ 806 AR5K_ANT_VARIABLE = 0, /* variable by programming */
741 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */ 807 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */
@@ -846,7 +912,8 @@ enum ath5k_power_mode {
846 912
847/* 913/*
848 * These match net80211 definitions (not used in 914 * These match net80211 definitions (not used in
849 * d80211). 915 * mac80211).
916 * TODO: Clean this up
850 */ 917 */
851#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/ 918#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/
852#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/ 919#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/
@@ -862,7 +929,8 @@ enum ath5k_power_mode {
862/* 929/*
863 * Chipset capabilities -see ath5k_hw_get_capability- 930 * Chipset capabilities -see ath5k_hw_get_capability-
864 * get_capability function is not yet fully implemented 931 * get_capability function is not yet fully implemented
865 * in OpenHAL so most of these don't work yet... 932 * in ath5k so most of these don't work yet...
933 * TODO: Implement these & merge with _TUNE_ stuff above
866 */ 934 */
867enum ath5k_capability_type { 935enum ath5k_capability_type {
868 AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */ 936 AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */
@@ -931,6 +999,7 @@ struct ath5k_capabilities {
931#define AR5K_MAX_GPIO 10 999#define AR5K_MAX_GPIO 10
932#define AR5K_MAX_RF_BANKS 8 1000#define AR5K_MAX_RF_BANKS 8
933 1001
1002/* TODO: Clean up and merge with ath5k_softc */
934struct ath5k_hw { 1003struct ath5k_hw {
935 u32 ah_magic; 1004 u32 ah_magic;
936 1005
@@ -939,7 +1008,7 @@ struct ath5k_hw {
939 1008
940 enum ath5k_int ah_imr; 1009 enum ath5k_int ah_imr;
941 1010
942 enum ieee80211_if_types ah_op_mode; 1011 enum nl80211_iftype ah_op_mode;
943 enum ath5k_power_mode ah_power_mode; 1012 enum ath5k_power_mode ah_power_mode;
944 struct ieee80211_channel ah_current_channel; 1013 struct ieee80211_channel ah_current_channel;
945 bool ah_turbo; 1014 bool ah_turbo;
@@ -1023,11 +1092,13 @@ struct ath5k_hw {
1023 /* 1092 /*
1024 * Function pointers 1093 * Function pointers
1025 */ 1094 */
1095 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
1096 u32 size, unsigned int flags);
1026 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1097 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1027 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1098 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
1028 unsigned int, unsigned int, unsigned int, unsigned int, 1099 unsigned int, unsigned int, unsigned int, unsigned int,
1029 unsigned int, unsigned int, unsigned int); 1100 unsigned int, unsigned int, unsigned int);
1030 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1101 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1031 unsigned int, unsigned int, unsigned int, unsigned int, 1102 unsigned int, unsigned int, unsigned int, unsigned int,
1032 unsigned int, unsigned int); 1103 unsigned int, unsigned int);
1033 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1104 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1040,33 +1111,38 @@ struct ath5k_hw {
1040 * Prototypes 1111 * Prototypes
1041 */ 1112 */
1042 1113
1043/* General Functions */
1044extern int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, bool is_set);
1045/* Attach/Detach Functions */ 1114/* Attach/Detach Functions */
1046extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version); 1115extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version);
1047extern const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah, unsigned int mode);
1048extern void ath5k_hw_detach(struct ath5k_hw *ah); 1116extern void ath5k_hw_detach(struct ath5k_hw *ah);
1117
1049/* Reset Functions */ 1118/* Reset Functions */
1050extern int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode, struct ieee80211_channel *channel, bool change_channel); 1119extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
1120extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
1051/* Power management functions */ 1121/* Power management functions */
1052extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration); 1122extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
1123
1053/* DMA Related Functions */ 1124/* DMA Related Functions */
1054extern void ath5k_hw_start_rx(struct ath5k_hw *ah); 1125extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
1055extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah); 1126extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
1056extern u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah); 1127extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
1057extern void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr); 1128extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
1058extern int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue); 1129extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1059extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1130extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1060extern u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue); 1131extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
1061extern int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr); 1132extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
1133 u32 phys_addr);
1062extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase); 1134extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
1063/* Interrupt handling */ 1135/* Interrupt handling */
1064extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); 1136extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1065extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1137extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1066extern enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask); 1138extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
1139ath5k_int new_mask);
1067extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats); 1140extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
1141
1068/* EEPROM access functions */ 1142/* EEPROM access functions */
1069extern int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain); 1143extern int ath5k_eeprom_init(struct ath5k_hw *ah);
1144extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1145
1070/* Protocol Control Unit Functions */ 1146/* Protocol Control Unit Functions */
1071extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1147extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1072/* BSSID Functions */ 1148/* BSSID Functions */
@@ -1076,14 +1152,14 @@ extern void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc
1076extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1152extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1077/* Receive start/stop functions */ 1153/* Receive start/stop functions */
1078extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1154extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1079extern void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah); 1155extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1080/* RX Filter functions */ 1156/* RX Filter functions */
1081extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); 1157extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
1082extern int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index); 1158extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
1083extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1159extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
1084extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah); 1160extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1085extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter); 1161extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1086/* Beacon related functions */ 1162/* Beacon control functions */
1087extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); 1163extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
1088extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1164extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1089extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1165extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
@@ -1105,61 +1181,129 @@ extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1105extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1181extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
1106extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac); 1182extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
1107extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac); 1183extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1184
1108/* Queue Control Unit, DFS Control Unit Functions */ 1185/* Queue Control Unit, DFS Control Unit Functions */
1109extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info);
1110extern int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *queue_info);
1111extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info); 1186extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
1187extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1188 const struct ath5k_txq_info *queue_info);
1189extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1190 enum ath5k_tx_queue queue_type,
1191 struct ath5k_txq_info *queue_info);
1192extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1112extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1193extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1113extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1194extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1114extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1115extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1116extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah); 1195extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
1196extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1197
1117/* Hardware Descriptor Functions */ 1198/* Hardware Descriptor Functions */
1118extern int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u32 size, unsigned int flags); 1199extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
1200
1119/* GPIO Functions */ 1201/* GPIO Functions */
1120extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1202extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
1121extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1122extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); 1203extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
1204extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1123extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); 1205extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1124extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1206extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1125extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1207extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
1208
1126/* Misc functions */ 1209/* Misc functions */
1210int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1127extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1211extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
1128 1212extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
1213extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1129 1214
1130/* Initial register settings functions */ 1215/* Initial register settings functions */
1131extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1216extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1217
1132/* Initialize RF */ 1218/* Initialize RF */
1133extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode); 1219extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode);
1134extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq); 1220extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq);
1135extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah); 1221extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah);
1136extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah); 1222extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah);
1137
1138
1139/* PHY/RF channel functions */ 1223/* PHY/RF channel functions */
1140extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1224extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1141extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1225extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1142/* PHY calibration */ 1226/* PHY calibration */
1143extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1227extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1144extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1228extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
1145/* Misc PHY functions */ 1229/* Misc PHY functions */
1146extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); 1230extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1147extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant); 1231extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
1148extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); 1232extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1149extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1233extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1150/* TX power setup */ 1234/* TX power setup */
1151extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower); 1235extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower);
1152extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power); 1236extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power);
1153 1237
1238/*
1239 * Functions used internaly
1240 */
1154 1241
1242/*
1243 * Translate usec to hw clock units
1244 */
1245static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1246{
1247 return turbo ? (usec * 80) : (usec * 40);
1248}
1249
1250/*
1251 * Translate hw clock units to usec
1252 */
1253static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1254{
1255 return turbo ? (clock / 80) : (clock / 40);
1256}
1257
1258/*
1259 * Read from a register
1260 */
1155static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1261static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1156{ 1262{
1157 return ioread32(ah->ah_iobase + reg); 1263 return ioread32(ah->ah_iobase + reg);
1158} 1264}
1159 1265
1266/*
1267 * Write to a register
1268 */
1160static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) 1269static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1161{ 1270{
1162 iowrite32(val, ah->ah_iobase + reg); 1271 iowrite32(val, ah->ah_iobase + reg);
1163} 1272}
1164 1273
1274#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
1275/*
1276 * Check if a register write has been completed
1277 */
1278static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
1279 u32 val, bool is_set)
1280{
1281 int i;
1282 u32 data;
1283
1284 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
1285 data = ath5k_hw_reg_read(ah, reg);
1286 if (is_set && (data & flag))
1287 break;
1288 else if ((data & flag) == val)
1289 break;
1290 udelay(15);
1291 }
1292
1293 return (i <= 0) ? -EAGAIN : 0;
1294}
1295#endif
1296
1297static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1298{
1299 u32 retval = 0, bit, i;
1300
1301 for (i = 0; i < bits; i++) {
1302 bit = (val >> i) & 1;
1303 retval = (retval << 1) | bit;
1304 }
1305
1306 return retval;
1307}
1308
1165#endif 1309#endif
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
new file mode 100644
index 000000000000..153c4111fabe
--- /dev/null
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -0,0 +1,315 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* Attach/Detach Functions and helpers *
21\*************************************/
22
23#include <linux/pci.h>
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/**
30 * ath5k_hw_post - Power On Self Test helper function
31 *
32 * @ah: The &struct ath5k_hw
33 */
34static int ath5k_hw_post(struct ath5k_hw *ah)
35{
36
37 int i, c;
38 u16 cur_reg;
39 u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
40 u32 var_pattern;
41 u32 static_pattern[4] = {
42 0x55555555, 0xaaaaaaaa,
43 0x66666666, 0x99999999
44 };
45 u32 init_val;
46 u32 cur_val;
47
48 for (c = 0; c < 2; c++) {
49
50 cur_reg = regs[c];
51
52 /* Save previous value */
53 init_val = ath5k_hw_reg_read(ah, cur_reg);
54
55 for (i = 0; i < 256; i++) {
56 var_pattern = i << 16 | i;
57 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
58 cur_val = ath5k_hw_reg_read(ah, cur_reg);
59
60 if (cur_val != var_pattern) {
61 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
62 return -EAGAIN;
63 }
64
65 /* Found on ndiswrapper dumps */
66 var_pattern = 0x0039080f;
67 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
68 }
69
70 for (i = 0; i < 4; i++) {
71 var_pattern = static_pattern[i];
72 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
73 cur_val = ath5k_hw_reg_read(ah, cur_reg);
74
75 if (cur_val != var_pattern) {
76 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
77 return -EAGAIN;
78 }
79
80 /* Found on ndiswrapper dumps */
81 var_pattern = 0x003b080f;
82 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
83 }
84
85 /* Restore previous value */
86 ath5k_hw_reg_write(ah, init_val, cur_reg);
87
88 }
89
90 return 0;
91
92}
93
94/**
95 * ath5k_hw_attach - Check if hw is supported and init the needed structs
96 *
97 * @sc: The &struct ath5k_softc we got from the driver's attach function
98 * @mac_version: The mac version id (check out ath5k.h) based on pci id
99 *
100 * Check if the device is supported, perform a POST and initialize the needed
101 * structs. Returns -ENOMEM if we don't have memory for the needed structs,
102 * -ENODEV if the device is not supported or prints an error msg if something
103 * else went wrong.
104 */
105struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
106{
107 struct ath5k_hw *ah;
108 struct pci_dev *pdev = sc->pdev;
109 u8 mac[ETH_ALEN];
110 int ret;
111 u32 srev;
112
113 /*If we passed the test malloc a ath5k_hw struct*/
114 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
115 if (ah == NULL) {
116 ret = -ENOMEM;
117 ATH5K_ERR(sc, "out of memory\n");
118 goto err;
119 }
120
121 ah->ah_sc = sc;
122 ah->ah_iobase = sc->iobase;
123
124 /*
125 * HW information
126 */
127 ah->ah_op_mode = NL80211_IFTYPE_STATION;
128 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
129 ah->ah_turbo = false;
130 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
131 ah->ah_imr = 0;
132 ah->ah_atim_window = 0;
133 ah->ah_aifs = AR5K_TUNE_AIFS;
134 ah->ah_cw_min = AR5K_TUNE_CWMIN;
135 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
136 ah->ah_software_retry = false;
137 ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
138
139 /*
140 * Set the mac revision based on the pci id
141 */
142 ah->ah_version = mac_version;
143
144 /*Fill the ath5k_hw struct with the needed functions*/
145 ret = ath5k_hw_init_desc_functions(ah);
146 if (ret)
147 goto err_free;
148
149 /* Bring device out of sleep and reset it's units */
150 ret = ath5k_hw_nic_wakeup(ah, CHANNEL_B, true);
151 if (ret)
152 goto err_free;
153
154 /* Get MAC, PHY and RADIO revisions */
155 srev = ath5k_hw_reg_read(ah, AR5K_SREV);
156 ah->ah_mac_srev = srev;
157 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
158 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
159 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
160 0xffffffff;
161 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
162 CHANNEL_5GHZ);
163
164 if (ah->ah_version == AR5K_AR5210)
165 ah->ah_radio_2ghz_revision = 0;
166 else
167 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
168 CHANNEL_2GHZ);
169
170 /* Return on unsuported chips (unsupported eeprom etc) */
171 if ((srev >= AR5K_SREV_VER_AR5416) &&
172 (srev < AR5K_SREV_VER_AR2425)) {
173 ATH5K_ERR(sc, "Device not yet supported.\n");
174 ret = -ENODEV;
175 goto err_free;
176 } else if (srev == AR5K_SREV_VER_AR2425) {
177 ATH5K_WARN(sc, "Support for RF2425 is under development.\n");
178 }
179
180 /* Identify single chip solutions */
181 if (((srev <= AR5K_SREV_VER_AR5414) &&
182 (srev >= AR5K_SREV_VER_AR2413)) ||
183 (srev == AR5K_SREV_VER_AR2425)) {
184 ah->ah_single_chip = true;
185 } else {
186 ah->ah_single_chip = false;
187 }
188
189 /* Single chip radio */
190 if (ah->ah_radio_2ghz_revision == ah->ah_radio_5ghz_revision)
191 ah->ah_radio_2ghz_revision = 0;
192
193 /* Identify the radio chip*/
194 if (ah->ah_version == AR5K_AR5210) {
195 ah->ah_radio = AR5K_RF5110;
196 /*
197 * Register returns 0x0/0x04 for radio revision
198 * so ath5k_hw_radio_revision doesn't parse the value
199 * correctly. For now we are based on mac's srev to
200 * identify RF2425 radio.
201 */
202 } else if (srev == AR5K_SREV_VER_AR2425) {
203 ah->ah_radio = AR5K_RF2425;
204 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
205 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
206 ah->ah_radio = AR5K_RF5111;
207 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
208 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
209 ah->ah_radio = AR5K_RF5112;
210 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
211 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
212 ah->ah_radio = AR5K_RF2413;
213 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
214 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
215 ah->ah_radio = AR5K_RF5413;
216 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
217 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
218 /* AR5424 */
219 if (srev >= AR5K_SREV_VER_AR5424) {
220 ah->ah_radio = AR5K_RF5413;
221 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
222 /* AR2424 */
223 } else {
224 ah->ah_radio = AR5K_RF2413; /* For testing */
225 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
226 }
227 }
228 ah->ah_phy = AR5K_PHY(0);
229
230 /*
231 * Write PCI-E power save settings
232 */
233 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
234 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
235 ath5k_hw_reg_write(ah, 0x24924924, 0x4080);
236 ath5k_hw_reg_write(ah, 0x28000039, 0x4080);
237 ath5k_hw_reg_write(ah, 0x53160824, 0x4080);
238 ath5k_hw_reg_write(ah, 0xe5980579, 0x4080);
239 ath5k_hw_reg_write(ah, 0x001defff, 0x4080);
240 ath5k_hw_reg_write(ah, 0x1aaabe40, 0x4080);
241 ath5k_hw_reg_write(ah, 0xbe105554, 0x4080);
242 ath5k_hw_reg_write(ah, 0x000e3007, 0x4080);
243 ath5k_hw_reg_write(ah, 0x00000000, 0x4084);
244 }
245
246 /*
247 * POST
248 */
249 ret = ath5k_hw_post(ah);
250 if (ret)
251 goto err_free;
252
253 /* Write AR5K_PCICFG_UNK on 2112B and later chips */
254 if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
255 srev > AR5K_SREV_VER_AR2413) {
256 ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
257 }
258
259 /*
260 * Get card capabilities, values, ...
261 */
262 ret = ath5k_eeprom_init(ah);
263 if (ret) {
264 ATH5K_ERR(sc, "unable to init EEPROM\n");
265 goto err_free;
266 }
267
268 /* Get misc capabilities */
269 ret = ath5k_hw_set_capabilities(ah);
270 if (ret) {
271 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
272 sc->pdev->device);
273 goto err_free;
274 }
275
276 /* Get MAC address */
277 ret = ath5k_eeprom_read_mac(ah, mac);
278 if (ret) {
279 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
280 sc->pdev->device);
281 goto err_free;
282 }
283
284 ath5k_hw_set_lladdr(ah, mac);
285 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
286 memset(ah->ah_bssid, 0xff, ETH_ALEN);
287 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
288 ath5k_hw_set_opmode(ah);
289
290 ath5k_hw_set_rfgain_opt(ah);
291
292 return ah;
293err_free:
294 kfree(ah);
295err:
296 return ERR_PTR(ret);
297}
298
299/**
300 * ath5k_hw_detach - Free the ath5k_hw struct
301 *
302 * @ah: The &struct ath5k_hw
303 */
304void ath5k_hw_detach(struct ath5k_hw *ah)
305{
306 ATH5K_TRACE(ah->ah_sc);
307
308 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
309
310 if (ah->ah_rf_banks != NULL)
311 kfree(ah->ah_rf_banks);
312
313 /* assume interrupts are down */
314 kfree(ah);
315}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 0676c6d84383..e09ed2ce6753 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -132,6 +132,48 @@ static struct ath5k_srev_name srev_names[] = {
132 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 132 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
133}; 133};
134 134
135static struct ieee80211_rate ath5k_rates[] = {
136 { .bitrate = 10,
137 .hw_value = ATH5K_RATE_CODE_1M, },
138 { .bitrate = 20,
139 .hw_value = ATH5K_RATE_CODE_2M,
140 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
141 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
142 { .bitrate = 55,
143 .hw_value = ATH5K_RATE_CODE_5_5M,
144 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
145 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
146 { .bitrate = 110,
147 .hw_value = ATH5K_RATE_CODE_11M,
148 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
149 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
150 { .bitrate = 60,
151 .hw_value = ATH5K_RATE_CODE_6M,
152 .flags = 0 },
153 { .bitrate = 90,
154 .hw_value = ATH5K_RATE_CODE_9M,
155 .flags = 0 },
156 { .bitrate = 120,
157 .hw_value = ATH5K_RATE_CODE_12M,
158 .flags = 0 },
159 { .bitrate = 180,
160 .hw_value = ATH5K_RATE_CODE_18M,
161 .flags = 0 },
162 { .bitrate = 240,
163 .hw_value = ATH5K_RATE_CODE_24M,
164 .flags = 0 },
165 { .bitrate = 360,
166 .hw_value = ATH5K_RATE_CODE_36M,
167 .flags = 0 },
168 { .bitrate = 480,
169 .hw_value = ATH5K_RATE_CODE_48M,
170 .flags = 0 },
171 { .bitrate = 540,
172 .hw_value = ATH5K_RATE_CODE_54M,
173 .flags = 0 },
174 /* XR missing */
175};
176
135/* 177/*
136 * Prototypes - PCI stack related functions 178 * Prototypes - PCI stack related functions
137 */ 179 */
@@ -162,7 +204,8 @@ static struct pci_driver ath5k_pci_driver = {
162 * Prototypes - MAC 802.11 stack related functions 204 * Prototypes - MAC 802.11 stack related functions
163 */ 205 */
164static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 206static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
165static int ath5k_reset(struct ieee80211_hw *hw); 207static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel);
208static int ath5k_reset_wake(struct ath5k_softc *sc);
166static int ath5k_start(struct ieee80211_hw *hw); 209static int ath5k_start(struct ieee80211_hw *hw);
167static void ath5k_stop(struct ieee80211_hw *hw); 210static void ath5k_stop(struct ieee80211_hw *hw);
168static int ath5k_add_interface(struct ieee80211_hw *hw, 211static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -218,20 +261,16 @@ static void ath5k_detach(struct pci_dev *pdev,
218 struct ieee80211_hw *hw); 261 struct ieee80211_hw *hw);
219/* Channel/mode setup */ 262/* Channel/mode setup */
220static inline short ath5k_ieee2mhz(short chan); 263static inline short ath5k_ieee2mhz(short chan);
221static unsigned int ath5k_copy_rates(struct ieee80211_rate *rates,
222 const struct ath5k_rate_table *rt,
223 unsigned int max);
224static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, 264static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
225 struct ieee80211_channel *channels, 265 struct ieee80211_channel *channels,
226 unsigned int mode, 266 unsigned int mode,
227 unsigned int max); 267 unsigned int max);
228static int ath5k_getchannels(struct ieee80211_hw *hw); 268static int ath5k_setup_bands(struct ieee80211_hw *hw);
229static int ath5k_chan_set(struct ath5k_softc *sc, 269static int ath5k_chan_set(struct ath5k_softc *sc,
230 struct ieee80211_channel *chan); 270 struct ieee80211_channel *chan);
231static void ath5k_setcurmode(struct ath5k_softc *sc, 271static void ath5k_setcurmode(struct ath5k_softc *sc,
232 unsigned int mode); 272 unsigned int mode);
233static void ath5k_mode_setup(struct ath5k_softc *sc); 273static void ath5k_mode_setup(struct ath5k_softc *sc);
234static void ath5k_set_total_hw_rates(struct ath5k_softc *sc);
235 274
236/* Descriptor setup */ 275/* Descriptor setup */
237static int ath5k_desc_alloc(struct ath5k_softc *sc, 276static int ath5k_desc_alloc(struct ath5k_softc *sc,
@@ -446,6 +485,12 @@ ath5k_pci_probe(struct pci_dev *pdev,
446 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 485 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
447 IEEE80211_HW_SIGNAL_DBM | 486 IEEE80211_HW_SIGNAL_DBM |
448 IEEE80211_HW_NOISE_DBM; 487 IEEE80211_HW_NOISE_DBM;
488
489 hw->wiphy->interface_modes =
490 BIT(NL80211_IFTYPE_STATION) |
491 BIT(NL80211_IFTYPE_ADHOC) |
492 BIT(NL80211_IFTYPE_MESH_POINT);
493
449 hw->extra_tx_headroom = 2; 494 hw->extra_tx_headroom = 2;
450 hw->channel_change_time = 5000; 495 hw->channel_change_time = 5000;
451 sc = hw->priv; 496 sc = hw->priv;
@@ -462,7 +507,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
462 507
463 sc->iobase = mem; /* So we can unmap it on detach */ 508 sc->iobase = mem; /* So we can unmap it on detach */
464 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ 509 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
465 sc->opmode = IEEE80211_IF_TYPE_STA; 510 sc->opmode = NL80211_IFTYPE_STATION;
466 mutex_init(&sc->lock); 511 mutex_init(&sc->lock);
467 spin_lock_init(&sc->rxbuflock); 512 spin_lock_init(&sc->rxbuflock);
468 spin_lock_init(&sc->txbuflock); 513 spin_lock_init(&sc->txbuflock);
@@ -646,7 +691,6 @@ err_no_irq:
646#endif /* CONFIG_PM */ 691#endif /* CONFIG_PM */
647 692
648 693
649
650/***********************\ 694/***********************\
651* Driver Initialization * 695* Driver Initialization *
652\***********************/ 696\***********************/
@@ -669,7 +713,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
669 * return false w/o doing anything. MAC's that do 713 * return false w/o doing anything. MAC's that do
670 * support it will return true w/o doing anything. 714 * support it will return true w/o doing anything.
671 */ 715 */
672 ret = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 716 ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
673 if (ret < 0) 717 if (ret < 0)
674 goto err; 718 goto err;
675 if (ret > 0) 719 if (ret > 0)
@@ -688,15 +732,12 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
688 * on settings like the phy mode and regulatory 732 * on settings like the phy mode and regulatory
689 * domain restrictions. 733 * domain restrictions.
690 */ 734 */
691 ret = ath5k_getchannels(hw); 735 ret = ath5k_setup_bands(hw);
692 if (ret) { 736 if (ret) {
693 ATH5K_ERR(sc, "can't get channels\n"); 737 ATH5K_ERR(sc, "can't get channels\n");
694 goto err; 738 goto err;
695 } 739 }
696 740
697 /* Set *_rates so we can map hw rate index */
698 ath5k_set_total_hw_rates(sc);
699
700 /* NB: setup here so ath5k_rate_update is happy */ 741 /* NB: setup here so ath5k_rate_update is happy */
701 if (test_bit(AR5K_MODE_11A, ah->ah_modes)) 742 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
702 ath5k_setcurmode(sc, AR5K_MODE_11A); 743 ath5k_setcurmode(sc, AR5K_MODE_11A);
@@ -813,27 +854,6 @@ ath5k_ieee2mhz(short chan)
813} 854}
814 855
815static unsigned int 856static unsigned int
816ath5k_copy_rates(struct ieee80211_rate *rates,
817 const struct ath5k_rate_table *rt,
818 unsigned int max)
819{
820 unsigned int i, count;
821
822 if (rt == NULL)
823 return 0;
824
825 for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) {
826 rates[count].bitrate = rt->rates[i].rate_kbps / 100;
827 rates[count].hw_value = rt->rates[i].rate_code;
828 rates[count].flags = rt->rates[i].modulation;
829 count++;
830 max--;
831 }
832
833 return count;
834}
835
836static unsigned int
837ath5k_copy_channels(struct ath5k_hw *ah, 857ath5k_copy_channels(struct ath5k_hw *ah,
838 struct ieee80211_channel *channels, 858 struct ieee80211_channel *channels,
839 unsigned int mode, 859 unsigned int mode,
@@ -895,74 +915,97 @@ ath5k_copy_channels(struct ath5k_hw *ah,
895 return count; 915 return count;
896} 916}
897 917
918static void
919ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
920{
921 u8 i;
922
923 for (i = 0; i < AR5K_MAX_RATES; i++)
924 sc->rate_idx[b->band][i] = -1;
925
926 for (i = 0; i < b->n_bitrates; i++) {
927 sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
928 if (b->bitrates[i].hw_value_short)
929 sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
930 }
931}
932
898static int 933static int
899ath5k_getchannels(struct ieee80211_hw *hw) 934ath5k_setup_bands(struct ieee80211_hw *hw)
900{ 935{
901 struct ath5k_softc *sc = hw->priv; 936 struct ath5k_softc *sc = hw->priv;
902 struct ath5k_hw *ah = sc->ah; 937 struct ath5k_hw *ah = sc->ah;
903 struct ieee80211_supported_band *sbands = sc->sbands; 938 struct ieee80211_supported_band *sband;
904 const struct ath5k_rate_table *hw_rates; 939 int max_c, count_c = 0;
905 unsigned int max_r, max_c, count_r, count_c; 940 int i;
906 int mode2g = AR5K_MODE_11G;
907 941
908 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); 942 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
909
910 max_r = ARRAY_SIZE(sc->rates);
911 max_c = ARRAY_SIZE(sc->channels); 943 max_c = ARRAY_SIZE(sc->channels);
912 count_r = count_c = 0;
913 944
914 /* 2GHz band */ 945 /* 2GHz band */
915 if (!test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { 946 sband = &sc->sbands[IEEE80211_BAND_2GHZ];
916 mode2g = AR5K_MODE_11B; 947 sband->band = IEEE80211_BAND_2GHZ;
917 if (!test_bit(AR5K_MODE_11B, 948 sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
918 sc->ah->ah_capabilities.cap_mode))
919 mode2g = -1;
920 }
921 949
922 if (mode2g > 0) { 950 if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
923 struct ieee80211_supported_band *sband = 951 /* G mode */
924 &sbands[IEEE80211_BAND_2GHZ]; 952 memcpy(sband->bitrates, &ath5k_rates[0],
953 sizeof(struct ieee80211_rate) * 12);
954 sband->n_bitrates = 12;
925 955
926 sband->bitrates = sc->rates;
927 sband->channels = sc->channels; 956 sband->channels = sc->channels;
928
929 sband->band = IEEE80211_BAND_2GHZ;
930 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 957 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
931 mode2g, max_c); 958 AR5K_MODE_11G, max_c);
932
933 hw_rates = ath5k_hw_get_rate_table(ah, mode2g);
934 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
935 hw_rates, max_r);
936 959
960 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
937 count_c = sband->n_channels; 961 count_c = sband->n_channels;
938 count_r = sband->n_bitrates; 962 max_c -= count_c;
963 } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
964 /* B mode */
965 memcpy(sband->bitrates, &ath5k_rates[0],
966 sizeof(struct ieee80211_rate) * 4);
967 sband->n_bitrates = 4;
968
969 /* 5211 only supports B rates and uses 4bit rate codes
970 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
971 * fix them up here:
972 */
973 if (ah->ah_version == AR5K_AR5211) {
974 for (i = 0; i < 4; i++) {
975 sband->bitrates[i].hw_value =
976 sband->bitrates[i].hw_value & 0xF;
977 sband->bitrates[i].hw_value_short =
978 sband->bitrates[i].hw_value_short & 0xF;
979 }
980 }
939 981
940 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 982 sband->channels = sc->channels;
983 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
984 AR5K_MODE_11B, max_c);
941 985
942 max_r -= count_r; 986 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
987 count_c = sband->n_channels;
943 max_c -= count_c; 988 max_c -= count_c;
944
945 } 989 }
990 ath5k_setup_rate_idx(sc, sband);
946 991
947 /* 5GHz band */ 992 /* 5GHz band, A mode */
948
949 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { 993 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
950 struct ieee80211_supported_band *sband = 994 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
951 &sbands[IEEE80211_BAND_5GHZ]; 995 sband->band = IEEE80211_BAND_5GHZ;
996 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
952 997
953 sband->bitrates = &sc->rates[count_r]; 998 memcpy(sband->bitrates, &ath5k_rates[4],
954 sband->channels = &sc->channels[count_c]; 999 sizeof(struct ieee80211_rate) * 8);
1000 sband->n_bitrates = 8;
955 1001
956 sband->band = IEEE80211_BAND_5GHZ; 1002 sband->channels = &sc->channels[count_c];
957 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 1003 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
958 AR5K_MODE_11A, max_c); 1004 AR5K_MODE_11A, max_c);
959 1005
960 hw_rates = ath5k_hw_get_rate_table(ah, AR5K_MODE_11A);
961 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
962 hw_rates, max_r);
963
964 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 1006 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
965 } 1007 }
1008 ath5k_setup_rate_idx(sc, sband);
966 1009
967 ath5k_debug_dump_bands(sc); 1010 ath5k_debug_dump_bands(sc);
968 1011
@@ -978,9 +1021,6 @@ ath5k_getchannels(struct ieee80211_hw *hw)
978static int 1021static int
979ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1022ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
980{ 1023{
981 struct ath5k_hw *ah = sc->ah;
982 int ret;
983
984 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", 1024 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
985 sc->curchan->center_freq, chan->center_freq); 1025 sc->curchan->center_freq, chan->center_freq);
986 1026
@@ -996,41 +1036,7 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
996 * hardware at the new frequency, and then re-enable 1036 * hardware at the new frequency, and then re-enable
997 * the relevant bits of the h/w. 1037 * the relevant bits of the h/w.
998 */ 1038 */
999 ath5k_hw_set_intr(ah, 0); /* disable interrupts */ 1039 return ath5k_reset(sc, true, true);
1000 ath5k_txq_cleanup(sc); /* clear pending tx frames */
1001 ath5k_rx_stop(sc); /* turn off frame recv */
1002 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
1003 if (ret) {
1004 ATH5K_ERR(sc, "%s: unable to reset channel "
1005 "(%u Mhz)\n", __func__, chan->center_freq);
1006 return ret;
1007 }
1008
1009 ath5k_hw_set_txpower_limit(sc->ah, 0);
1010
1011 /*
1012 * Re-enable rx framework.
1013 */
1014 ret = ath5k_rx_start(sc);
1015 if (ret) {
1016 ATH5K_ERR(sc, "%s: unable to restart recv logic\n",
1017 __func__);
1018 return ret;
1019 }
1020
1021 /*
1022 * Change channels and update the h/w rate map
1023 * if we're switching; e.g. 11a to 11b/g.
1024 *
1025 * XXX needed?
1026 */
1027/* ath5k_chan_change(sc, chan); */
1028
1029 ath5k_beacon_config(sc);
1030 /*
1031 * Re-enable interrupts.
1032 */
1033 ath5k_hw_set_intr(ah, sc->imask);
1034 } 1040 }
1035 1041
1036 return 0; 1042 return 0;
@@ -1068,75 +1074,13 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1068 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1074 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1069} 1075}
1070 1076
1071/*
1072 * Match the hw provided rate index (through descriptors)
1073 * to an index for sc->curband->bitrates, so it can be used
1074 * by the stack.
1075 *
1076 * This one is a little bit tricky but i think i'm right
1077 * about this...
1078 *
1079 * We have 4 rate tables in the following order:
1080 * XR (4 rates)
1081 * 802.11a (8 rates)
1082 * 802.11b (4 rates)
1083 * 802.11g (12 rates)
1084 * that make the hw rate table.
1085 *
1086 * Lets take a 5211 for example that supports a and b modes only.
1087 * First comes the 802.11a table and then 802.11b (total 12 rates).
1088 * When hw returns eg. 11 it points to the last 802.11b rate (11Mbit),
1089 * if it returns 2 it points to the second 802.11a rate etc.
1090 *
1091 * Same goes for 5212 who has xr/a/b/g support (total 28 rates).
1092 * First comes the XR table, then 802.11a, 802.11b and 802.11g.
1093 * When hw returns eg. 27 it points to the last 802.11g rate (54Mbits) etc
1094 */
1095static void
1096ath5k_set_total_hw_rates(struct ath5k_softc *sc) {
1097
1098 struct ath5k_hw *ah = sc->ah;
1099
1100 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
1101 sc->a_rates = 8;
1102
1103 if (test_bit(AR5K_MODE_11B, ah->ah_modes))
1104 sc->b_rates = 4;
1105
1106 if (test_bit(AR5K_MODE_11G, ah->ah_modes))
1107 sc->g_rates = 12;
1108
1109 /* XXX: Need to see what what happens when
1110 xr disable bits in eeprom are set */
1111 if (ah->ah_version >= AR5K_AR5212)
1112 sc->xr_rates = 4;
1113
1114}
1115
1116static inline int 1077static inline int
1117ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) { 1078ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1118 1079{
1119 int mac80211_rix; 1080 WARN_ON(hw_rix < 0 || hw_rix > AR5K_MAX_RATES);
1120 1081 return sc->rate_idx[sc->curband->band][hw_rix];
1121 if(sc->curband->band == IEEE80211_BAND_2GHZ) {
1122 /* We setup a g ratetable for both b/g modes */
1123 mac80211_rix =
1124 hw_rix - sc->b_rates - sc->a_rates - sc->xr_rates;
1125 } else {
1126 mac80211_rix = hw_rix - sc->xr_rates;
1127 }
1128
1129 /* Something went wrong, fallback to basic rate for this band */
1130 if ((mac80211_rix >= sc->curband->n_bitrates) ||
1131 (mac80211_rix <= 0 ))
1132 mac80211_rix = 1;
1133
1134 return mac80211_rix;
1135} 1082}
1136 1083
1137
1138
1139
1140/***************\ 1084/***************\
1141* Buffers setup * 1085* Buffers setup *
1142\***************/ 1086\***************/
@@ -1199,7 +1143,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1199 ds = bf->desc; 1143 ds = bf->desc;
1200 ds->ds_link = bf->daddr; /* link to self */ 1144 ds->ds_link = bf->daddr; /* link to self */
1201 ds->ds_data = bf->skbaddr; 1145 ds->ds_data = bf->skbaddr;
1202 ath5k_hw_setup_rx_desc(ah, ds, 1146 ah->ah_setup_rx_desc(ah, ds,
1203 skb_tailroom(skb), /* buffer size */ 1147 skb_tailroom(skb), /* buffer size */
1204 0); 1148 0);
1205 1149
@@ -1250,12 +1194,12 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1250 list_add_tail(&bf->list, &txq->q); 1194 list_add_tail(&bf->list, &txq->q);
1251 sc->tx_stats[txq->qnum].len++; 1195 sc->tx_stats[txq->qnum].len++;
1252 if (txq->link == NULL) /* is this first packet? */ 1196 if (txq->link == NULL) /* is this first packet? */
1253 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); 1197 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1254 else /* no, so only link it */ 1198 else /* no, so only link it */
1255 *txq->link = bf->daddr; 1199 *txq->link = bf->daddr;
1256 1200
1257 txq->link = &ds->ds_link; 1201 txq->link = &ds->ds_link;
1258 ath5k_hw_tx_start(ah, txq->qnum); 1202 ath5k_hw_start_tx_dma(ah, txq->qnum);
1259 mmiowb(); 1203 mmiowb();
1260 spin_unlock_bh(&txq->lock); 1204 spin_unlock_bh(&txq->lock);
1261 1205
@@ -1433,7 +1377,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1433 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); 1377 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1434 if (ret) 1378 if (ret)
1435 return ret; 1379 return ret;
1436 if (sc->opmode == IEEE80211_IF_TYPE_AP) { 1380 if (sc->opmode == NL80211_IFTYPE_AP ||
1381 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1437 /* 1382 /*
1438 * Always burst out beacon and CAB traffic 1383 * Always burst out beacon and CAB traffic
1439 * (aifs = cwmin = cwmax = 0) 1384 * (aifs = cwmin = cwmax = 0)
@@ -1441,7 +1386,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1441 qi.tqi_aifs = 0; 1386 qi.tqi_aifs = 0;
1442 qi.tqi_cw_min = 0; 1387 qi.tqi_cw_min = 0;
1443 qi.tqi_cw_max = 0; 1388 qi.tqi_cw_max = 0;
1444 } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 1389 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
1445 /* 1390 /*
1446 * Adhoc mode; backoff between 0 and (2 * cw_min). 1391 * Adhoc mode; backoff between 0 and (2 * cw_min).
1447 */ 1392 */
@@ -1454,7 +1399,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1454 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", 1399 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1455 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); 1400 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1456 1401
1457 ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi); 1402 ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
1458 if (ret) { 1403 if (ret) {
1459 ATH5K_ERR(sc, "%s: unable to update parameters for beacon " 1404 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1460 "hardware queue!\n", __func__); 1405 "hardware queue!\n", __func__);
@@ -1503,14 +1448,14 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1503 /* don't touch the hardware if marked invalid */ 1448 /* don't touch the hardware if marked invalid */
1504 ath5k_hw_stop_tx_dma(ah, sc->bhalq); 1449 ath5k_hw_stop_tx_dma(ah, sc->bhalq);
1505 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", 1450 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
1506 ath5k_hw_get_tx_buf(ah, sc->bhalq)); 1451 ath5k_hw_get_txdp(ah, sc->bhalq));
1507 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1452 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1508 if (sc->txqs[i].setup) { 1453 if (sc->txqs[i].setup) {
1509 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); 1454 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
1510 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " 1455 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
1511 "link %p\n", 1456 "link %p\n",
1512 sc->txqs[i].qnum, 1457 sc->txqs[i].qnum,
1513 ath5k_hw_get_tx_buf(ah, 1458 ath5k_hw_get_txdp(ah,
1514 sc->txqs[i].qnum), 1459 sc->txqs[i].qnum),
1515 sc->txqs[i].link); 1460 sc->txqs[i].link);
1516 } 1461 }
@@ -1570,8 +1515,8 @@ ath5k_rx_start(struct ath5k_softc *sc)
1570 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 1515 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1571 spin_unlock_bh(&sc->rxbuflock); 1516 spin_unlock_bh(&sc->rxbuflock);
1572 1517
1573 ath5k_hw_put_rx_buf(ah, bf->daddr); 1518 ath5k_hw_set_rxdp(ah, bf->daddr);
1574 ath5k_hw_start_rx(ah); /* enable recv descriptors */ 1519 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1575 ath5k_mode_setup(sc); /* set filters, etc. */ 1520 ath5k_mode_setup(sc); /* set filters, etc. */
1576 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1521 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1577 1522
@@ -1588,7 +1533,7 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1588{ 1533{
1589 struct ath5k_hw *ah = sc->ah; 1534 struct ath5k_hw *ah = sc->ah;
1590 1535
1591 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ 1536 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1592 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1537 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1593 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1538 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1594 1539
@@ -1602,7 +1547,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1602 struct sk_buff *skb, struct ath5k_rx_status *rs) 1547 struct sk_buff *skb, struct ath5k_rx_status *rs)
1603{ 1548{
1604 struct ieee80211_hdr *hdr = (void *)skb->data; 1549 struct ieee80211_hdr *hdr = (void *)skb->data;
1605 unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); 1550 unsigned int keyix, hlen;
1606 1551
1607 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && 1552 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1608 rs->rs_keyix != AR5K_RXKEYIX_INVALID) 1553 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
@@ -1611,6 +1556,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1611 /* Apparently when a default key is used to decrypt the packet 1556 /* Apparently when a default key is used to decrypt the packet
1612 the hw does not set the index used to decrypt. In such cases 1557 the hw does not set the index used to decrypt. In such cases
1613 get the index from the packet. */ 1558 get the index from the packet. */
1559 hlen = ieee80211_hdrlen(hdr->frame_control);
1614 if (ieee80211_has_protected(hdr->frame_control) && 1560 if (ieee80211_has_protected(hdr->frame_control) &&
1615 !(rs->rs_status & AR5K_RXERR_DECRYPT) && 1561 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1616 skb->len >= hlen + 4) { 1562 skb->len >= hlen + 4) {
@@ -1768,7 +1714,7 @@ ath5k_tasklet_rx(unsigned long data)
1768 /* let crypto-error packets fall through in MNTR */ 1714 /* let crypto-error packets fall through in MNTR */
1769 if ((rs.rs_status & 1715 if ((rs.rs_status &
1770 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 1716 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
1771 sc->opmode != IEEE80211_IF_TYPE_MNTR) 1717 sc->opmode != NL80211_IFTYPE_MONITOR)
1772 goto next; 1718 goto next;
1773 } 1719 }
1774accept: 1720accept:
@@ -1824,10 +1770,14 @@ accept:
1824 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1770 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1825 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 1771 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
1826 1772
1773 if (rxs.rate_idx >= 0 && rs.rs_rate ==
1774 sc->curband->bitrates[rxs.rate_idx].hw_value_short)
1775 rxs.flag |= RX_FLAG_SHORTPRE;
1776
1827 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1777 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1828 1778
1829 /* check beacons in IBSS mode */ 1779 /* check beacons in IBSS mode */
1830 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) 1780 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1831 ath5k_check_ibss_tsf(sc, skb, &rxs); 1781 ath5k_check_ibss_tsf(sc, skb, &rxs);
1832 1782
1833 __ieee80211_rx(sc->hw, skb, &rxs); 1783 __ieee80211_rx(sc->hw, skb, &rxs);
@@ -1942,7 +1892,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1942 ds = bf->desc; 1892 ds = bf->desc;
1943 1893
1944 flags = AR5K_TXDESC_NOACK; 1894 flags = AR5K_TXDESC_NOACK;
1945 if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) { 1895 if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1946 ds->ds_link = bf->daddr; /* self-linked */ 1896 ds->ds_link = bf->daddr; /* self-linked */
1947 flags |= AR5K_TXDESC_VEOL; 1897 flags |= AR5K_TXDESC_VEOL;
1948 /* 1898 /*
@@ -1991,8 +1941,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1991 1941
1992 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1942 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1993 1943
1994 if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA || 1944 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1995 sc->opmode == IEEE80211_IF_TYPE_MNTR)) { 1945 sc->opmode == NL80211_IFTYPE_MONITOR)) {
1996 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); 1946 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1997 return; 1947 return;
1998 } 1948 }
@@ -2032,8 +1982,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2032 /* NB: hw still stops DMA, so proceed */ 1982 /* NB: hw still stops DMA, so proceed */
2033 } 1983 }
2034 1984
2035 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); 1985 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
2036 ath5k_hw_tx_start(ah, sc->bhalq); 1986 ath5k_hw_start_tx_dma(ah, sc->bhalq);
2037 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1987 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
2038 sc->bhalq, (unsigned long long)bf->daddr, bf->desc); 1988 sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
2039 1989
@@ -2162,13 +2112,13 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2162{ 2112{
2163 struct ath5k_hw *ah = sc->ah; 2113 struct ath5k_hw *ah = sc->ah;
2164 2114
2165 ath5k_hw_set_intr(ah, 0); 2115 ath5k_hw_set_imr(ah, 0);
2166 sc->bmisscount = 0; 2116 sc->bmisscount = 0;
2167 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2117 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2168 2118
2169 if (sc->opmode == IEEE80211_IF_TYPE_STA) { 2119 if (sc->opmode == NL80211_IFTYPE_STATION) {
2170 sc->imask |= AR5K_INT_BMISS; 2120 sc->imask |= AR5K_INT_BMISS;
2171 } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2121 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2172 /* 2122 /*
2173 * In IBSS mode we use a self-linked tx descriptor and let the 2123 * In IBSS mode we use a self-linked tx descriptor and let the
2174 * hardware send the beacons automatically. We have to load it 2124 * hardware send the beacons automatically. We have to load it
@@ -2188,7 +2138,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2188 } 2138 }
2189 /* TODO else AP */ 2139 /* TODO else AP */
2190 2140
2191 ath5k_hw_set_intr(ah, sc->imask); 2141 ath5k_hw_set_imr(ah, sc->imask);
2192} 2142}
2193 2143
2194 2144
@@ -2220,36 +2170,13 @@ ath5k_init(struct ath5k_softc *sc)
2220 */ 2170 */
2221 sc->curchan = sc->hw->conf.channel; 2171 sc->curchan = sc->hw->conf.channel;
2222 sc->curband = &sc->sbands[sc->curchan->band]; 2172 sc->curband = &sc->sbands[sc->curchan->band];
2223 ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false);
2224 if (ret) {
2225 ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret);
2226 goto done;
2227 }
2228 /*
2229 * This is needed only to setup initial state
2230 * but it's best done after a reset.
2231 */
2232 ath5k_hw_set_txpower_limit(sc->ah, 0);
2233
2234 /*
2235 * Setup the hardware after reset: the key cache
2236 * is filled as needed and the receive engine is
2237 * set going. Frame transmit is handled entirely
2238 * in the frame output path; there's nothing to do
2239 * here except setup the interrupt mask.
2240 */
2241 ret = ath5k_rx_start(sc);
2242 if (ret)
2243 goto done;
2244
2245 /*
2246 * Enable interrupts.
2247 */
2248 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | 2173 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL |
2249 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL | 2174 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL |
2250 AR5K_INT_MIB; 2175 AR5K_INT_MIB;
2176 ret = ath5k_reset(sc, false, false);
2177 if (ret)
2178 goto done;
2251 2179
2252 ath5k_hw_set_intr(sc->ah, sc->imask);
2253 /* Set ack to be sent at low bit-rates */ 2180 /* Set ack to be sent at low bit-rates */
2254 ath5k_hw_set_ack_bitrate_high(sc->ah, false); 2181 ath5k_hw_set_ack_bitrate_high(sc->ah, false);
2255 2182
@@ -2290,7 +2217,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2290 2217
2291 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2218 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2292 ath5k_led_off(sc); 2219 ath5k_led_off(sc);
2293 ath5k_hw_set_intr(ah, 0); 2220 ath5k_hw_set_imr(ah, 0);
2294 synchronize_irq(sc->pdev->irq); 2221 synchronize_irq(sc->pdev->irq);
2295 } 2222 }
2296 ath5k_txq_cleanup(sc); 2223 ath5k_txq_cleanup(sc);
@@ -2396,7 +2323,7 @@ ath5k_intr(int irq, void *dev_id)
2396 * transmission time) in order to detect wether 2323 * transmission time) in order to detect wether
2397 * automatic TSF updates happened. 2324 * automatic TSF updates happened.
2398 */ 2325 */
2399 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2326 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2400 /* XXX: only if VEOL suppported */ 2327 /* XXX: only if VEOL suppported */
2401 u64 tsf = ath5k_hw_get_tsf64(ah); 2328 u64 tsf = ath5k_hw_get_tsf64(ah);
2402 sc->nexttbtt += sc->bintval; 2329 sc->nexttbtt += sc->bintval;
@@ -2451,7 +2378,7 @@ ath5k_tasklet_reset(unsigned long data)
2451{ 2378{
2452 struct ath5k_softc *sc = (void *)data; 2379 struct ath5k_softc *sc = (void *)data;
2453 2380
2454 ath5k_reset(sc->hw); 2381 ath5k_reset_wake(sc);
2455} 2382}
2456 2383
2457/* 2384/*
@@ -2474,7 +2401,7 @@ ath5k_calibrate(unsigned long data)
2474 * to load new gain values. 2401 * to load new gain values.
2475 */ 2402 */
2476 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2403 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2477 ath5k_reset(sc->hw); 2404 ath5k_reset_wake(sc);
2478 } 2405 }
2479 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2406 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2480 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2407 ATH5K_ERR(sc, "calibration of channel %u failed\n",
@@ -2626,7 +2553,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2626 2553
2627 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2554 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
2628 2555
2629 if (sc->opmode == IEEE80211_IF_TYPE_MNTR) 2556 if (sc->opmode == NL80211_IFTYPE_MONITOR)
2630 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); 2557 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
2631 2558
2632 /* 2559 /*
@@ -2675,48 +2602,67 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2675} 2602}
2676 2603
2677static int 2604static int
2678ath5k_reset(struct ieee80211_hw *hw) 2605ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
2679{ 2606{
2680 struct ath5k_softc *sc = hw->priv;
2681 struct ath5k_hw *ah = sc->ah; 2607 struct ath5k_hw *ah = sc->ah;
2682 int ret; 2608 int ret;
2683 2609
2684 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2610 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2685 2611
2686 ath5k_hw_set_intr(ah, 0); 2612 if (stop) {
2687 ath5k_txq_cleanup(sc); 2613 ath5k_hw_set_imr(ah, 0);
2688 ath5k_rx_stop(sc); 2614 ath5k_txq_cleanup(sc);
2689 2615 ath5k_rx_stop(sc);
2616 }
2690 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); 2617 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
2691 if (unlikely(ret)) { 2618 if (ret) {
2692 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); 2619 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2693 goto err; 2620 goto err;
2694 } 2621 }
2622
2623 /*
2624 * This is needed only to setup initial state
2625 * but it's best done after a reset.
2626 */
2695 ath5k_hw_set_txpower_limit(sc->ah, 0); 2627 ath5k_hw_set_txpower_limit(sc->ah, 0);
2696 2628
2697 ret = ath5k_rx_start(sc); 2629 ret = ath5k_rx_start(sc);
2698 if (unlikely(ret)) { 2630 if (ret) {
2699 ATH5K_ERR(sc, "can't start recv logic\n"); 2631 ATH5K_ERR(sc, "can't start recv logic\n");
2700 goto err; 2632 goto err;
2701 } 2633 }
2634
2702 /* 2635 /*
2703 * We may be doing a reset in response to an ioctl 2636 * Change channels and update the h/w rate map if we're switching;
2704 * that changes the channel so update any state that 2637 * e.g. 11a to 11b/g.
2705 * might change as a result. 2638 *
2639 * We may be doing a reset in response to an ioctl that changes the
2640 * channel so update any state that might change as a result.
2706 * 2641 *
2707 * XXX needed? 2642 * XXX needed?
2708 */ 2643 */
2709/* ath5k_chan_change(sc, c); */ 2644/* ath5k_chan_change(sc, c); */
2710 ath5k_beacon_config(sc);
2711 /* intrs are started by ath5k_beacon_config */
2712 2645
2713 ieee80211_wake_queues(hw); 2646 ath5k_beacon_config(sc);
2647 /* intrs are enabled by ath5k_beacon_config */
2714 2648
2715 return 0; 2649 return 0;
2716err: 2650err:
2717 return ret; 2651 return ret;
2718} 2652}
2719 2653
2654static int
2655ath5k_reset_wake(struct ath5k_softc *sc)
2656{
2657 int ret;
2658
2659 ret = ath5k_reset(sc, true, true);
2660 if (!ret)
2661 ieee80211_wake_queues(sc->hw);
2662
2663 return ret;
2664}
2665
2720static int ath5k_start(struct ieee80211_hw *hw) 2666static int ath5k_start(struct ieee80211_hw *hw)
2721{ 2667{
2722 return ath5k_init(hw->priv); 2668 return ath5k_init(hw->priv);
@@ -2742,9 +2688,9 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2742 sc->vif = conf->vif; 2688 sc->vif = conf->vif;
2743 2689
2744 switch (conf->type) { 2690 switch (conf->type) {
2745 case IEEE80211_IF_TYPE_STA: 2691 case NL80211_IFTYPE_STATION:
2746 case IEEE80211_IF_TYPE_IBSS: 2692 case NL80211_IFTYPE_ADHOC:
2747 case IEEE80211_IF_TYPE_MNTR: 2693 case NL80211_IFTYPE_MONITOR:
2748 sc->opmode = conf->type; 2694 sc->opmode = conf->type;
2749 break; 2695 break;
2750 default: 2696 default:
@@ -2815,7 +2761,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2815 } 2761 }
2816 2762
2817 if (conf->changed & IEEE80211_IFCC_BEACON && 2763 if (conf->changed & IEEE80211_IFCC_BEACON &&
2818 vif->type == IEEE80211_IF_TYPE_IBSS) { 2764 vif->type == NL80211_IFTYPE_ADHOC) {
2819 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2765 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2820 if (!beacon) { 2766 if (!beacon) {
2821 ret = -ENOMEM; 2767 ret = -ENOMEM;
@@ -2827,7 +2773,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2827 2773
2828 mutex_unlock(&sc->lock); 2774 mutex_unlock(&sc->lock);
2829 2775
2830 return ath5k_reset(hw); 2776 return ath5k_reset_wake(sc);
2831unlock: 2777unlock:
2832 mutex_unlock(&sc->lock); 2778 mutex_unlock(&sc->lock);
2833 return ret; 2779 return ret;
@@ -2934,16 +2880,17 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2934 2880
2935 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ 2881 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
2936 2882
2937 if (sc->opmode == IEEE80211_IF_TYPE_MNTR) 2883 if (sc->opmode == NL80211_IFTYPE_MONITOR)
2938 rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | 2884 rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
2939 AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; 2885 AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
2940 if (sc->opmode != IEEE80211_IF_TYPE_STA) 2886 if (sc->opmode != NL80211_IFTYPE_STATION)
2941 rfilt |= AR5K_RX_FILTER_PROBEREQ; 2887 rfilt |= AR5K_RX_FILTER_PROBEREQ;
2942 if (sc->opmode != IEEE80211_IF_TYPE_AP && 2888 if (sc->opmode != NL80211_IFTYPE_AP &&
2889 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2890 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2891 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == IEEE80211_IF_TYPE_STA || 2892 if (sc->opmode == NL80211_IFTYPE_STATION ||
2946 sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2893 sc->opmode == NL80211_IFTYPE_ADHOC) {
2947 rfilt |= AR5K_RX_FILTER_BEACON; 2894 rfilt |= AR5K_RX_FILTER_BEACON;
2948 } 2895 }
2949 2896
@@ -3048,7 +2995,7 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3048 * in IBSS mode we need to update the beacon timers too. 2995 * in IBSS mode we need to update the beacon timers too.
3049 * this will also reset the TSF if we call it with 0 2996 * this will also reset the TSF if we call it with 0
3050 */ 2997 */
3051 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) 2998 if (sc->opmode == NL80211_IFTYPE_ADHOC)
3052 ath5k_beacon_update_timers(sc, 0); 2999 ath5k_beacon_update_timers(sc, 0);
3053 else 3000 else
3054 ath5k_hw_reset_tsf(sc->ah); 3001 ath5k_hw_reset_tsf(sc->ah);
@@ -3063,7 +3010,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3063 3010
3064 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3011 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3065 3012
3066 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { 3013 if (sc->opmode != NL80211_IFTYPE_ADHOC) {
3067 ret = -EIO; 3014 ret = -EIO;
3068 goto end; 3015 goto end;
3069 } 3016 }
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 7ec2f377d5c7..9d0b728928e3 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -111,17 +111,13 @@ struct ath5k_softc {
111 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 111 struct ieee80211_hw *hw; /* IEEE 802.11 common */
112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
113 struct ieee80211_channel channels[ATH_CHAN_MAX]; 113 struct ieee80211_channel channels[ATH_CHAN_MAX];
114 struct ieee80211_rate rates[AR5K_MAX_RATES * IEEE80211_NUM_BANDS]; 114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
115 enum ieee80211_if_types opmode; 115 u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
116 enum nl80211_iftype opmode;
116 struct ath5k_hw *ah; /* Atheros HW */ 117 struct ath5k_hw *ah; /* Atheros HW */
117 118
118 struct ieee80211_supported_band *curband; 119 struct ieee80211_supported_band *curband;
119 120
120 u8 a_rates;
121 u8 b_rates;
122 u8 g_rates;
123 u8 xr_rates;
124
125#ifdef CONFIG_ATH5K_DEBUG 121#ifdef CONFIG_ATH5K_DEBUG
126 struct ath5k_dbg_info debug; /* debug info */ 122 struct ath5k_dbg_info debug; /* debug info */
127#endif /* CONFIG_ATH5K_DEBUG */ 123#endif /* CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath5k/caps.c b/drivers/net/wireless/ath5k/caps.c
new file mode 100644
index 000000000000..150f5ed204a0
--- /dev/null
+++ b/drivers/net/wireless/ath5k/caps.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 */
19
20/**************\
21* Capabilities *
22\**************/
23
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/*
30 * Fill the capabilities struct
31 * TODO: Merge this with EEPROM code when we are done with it
32 */
33int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
34{
35 u16 ee_header;
36
37 ATH5K_TRACE(ah->ah_sc);
38 /* Capabilities stored in the EEPROM */
39 ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
40
41 if (ah->ah_version == AR5K_AR5210) {
42 /*
43 * Set radio capabilities
44 * (The AR5110 only supports the middle 5GHz band)
45 */
46 ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
47 ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
48 ah->ah_capabilities.cap_range.range_2ghz_min = 0;
49 ah->ah_capabilities.cap_range.range_2ghz_max = 0;
50
51 /* Set supported modes */
52 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
53 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
54 } else {
55 /*
56 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
57 * XXX and from 2312 to 2732GHz. There are problems with the
58 * XXX current ieee80211 implementation because the IEEE
59 * XXX channel mapping does not support negative channel
60 * XXX numbers (2312MHz is channel -19). Of course, this
61 * XXX doesn't matter because these channels are out of range
62 * XXX but some regulation domains like MKK (Japan) will
63 * XXX support frequencies somewhere around 4.8GHz.
64 */
65
66 /*
67 * Set radio capabilities
68 */
69
70 if (AR5K_EEPROM_HDR_11A(ee_header)) {
71 /* 4920 */
72 ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
73 ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
74
75 /* Set supported modes */
76 __set_bit(AR5K_MODE_11A,
77 ah->ah_capabilities.cap_mode);
78 __set_bit(AR5K_MODE_11A_TURBO,
79 ah->ah_capabilities.cap_mode);
80 if (ah->ah_version == AR5K_AR5212)
81 __set_bit(AR5K_MODE_11G_TURBO,
82 ah->ah_capabilities.cap_mode);
83 }
84
85 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
86 * connected */
87 if (AR5K_EEPROM_HDR_11B(ee_header) ||
88 AR5K_EEPROM_HDR_11G(ee_header)) {
89 /* 2312 */
90 ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
91 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
92
93 if (AR5K_EEPROM_HDR_11B(ee_header))
94 __set_bit(AR5K_MODE_11B,
95 ah->ah_capabilities.cap_mode);
96
97 if (AR5K_EEPROM_HDR_11G(ee_header))
98 __set_bit(AR5K_MODE_11G,
99 ah->ah_capabilities.cap_mode);
100 }
101 }
102
103 /* GPIO */
104 ah->ah_gpio_npins = AR5K_NUM_GPIO;
105
106 /* Set number of supported TX queues */
107 if (ah->ah_version == AR5K_AR5210)
108 ah->ah_capabilities.cap_queues.q_tx_num =
109 AR5K_NUM_TX_QUEUES_NOQCU;
110 else
111 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
112
113 return 0;
114}
115
116/* Main function used by the driver part to check caps */
117int ath5k_hw_get_capability(struct ath5k_hw *ah,
118 enum ath5k_capability_type cap_type,
119 u32 capability, u32 *result)
120{
121 ATH5K_TRACE(ah->ah_sc);
122
123 switch (cap_type) {
124 case AR5K_CAP_NUM_TXQUEUES:
125 if (result) {
126 if (ah->ah_version == AR5K_AR5210)
127 *result = AR5K_NUM_TX_QUEUES_NOQCU;
128 else
129 *result = AR5K_NUM_TX_QUEUES;
130 goto yes;
131 }
132 case AR5K_CAP_VEOL:
133 goto yes;
134 case AR5K_CAP_COMPRESSION:
135 if (ah->ah_version == AR5K_AR5212)
136 goto yes;
137 else
138 goto no;
139 case AR5K_CAP_BURST:
140 goto yes;
141 case AR5K_CAP_TPC:
142 goto yes;
143 case AR5K_CAP_BSSIDMASK:
144 if (ah->ah_version == AR5K_AR5212)
145 goto yes;
146 else
147 goto no;
148 case AR5K_CAP_XR:
149 if (ah->ah_version == AR5K_AR5212)
150 goto yes;
151 else
152 goto no;
153 default:
154 goto no;
155 }
156
157no:
158 return -EINVAL;
159yes:
160 return 0;
161}
162
163/*
164 * TODO: Following functions should be part of a new function
165 * set_capability
166 */
167
168int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
169 u16 assoc_id)
170{
171 ATH5K_TRACE(ah->ah_sc);
172
173 if (ah->ah_version == AR5K_AR5210) {
174 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
175 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
176 return 0;
177 }
178
179 return -EIO;
180}
181
182int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
183{
184 ATH5K_TRACE(ah->ah_sc);
185
186 if (ah->ah_version == AR5K_AR5210) {
187 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
188 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
189 return 0;
190 }
191
192 return -EIO;
193}
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 6fa6c8e04ff0..8f92d670f614 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -58,8 +58,8 @@
58 * THE POSSIBILITY OF SUCH DAMAGES. 58 * THE POSSIBILITY OF SUCH DAMAGES.
59 */ 59 */
60 60
61#include "debug.h"
62#include "base.h" 61#include "base.h"
62#include "debug.h"
63 63
64static unsigned int ath5k_debug; 64static unsigned int ath5k_debug;
65module_param_named(debug, ath5k_debug, uint, 0); 65module_param_named(debug, ath5k_debug, uint, 0);
@@ -525,7 +525,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
525 return; 525 return;
526 526
527 printk(KERN_DEBUG "rx queue %x, link %p\n", 527 printk(KERN_DEBUG "rx queue %x, link %p\n",
528 ath5k_hw_get_rx_buf(ah), sc->rxlink); 528 ath5k_hw_get_rxdp(ah), sc->rxlink);
529 529
530 spin_lock_bh(&sc->rxbuflock); 530 spin_lock_bh(&sc->rxbuflock);
531 list_for_each_entry(bf, &sc->rxbuf, list) { 531 list_for_each_entry(bf, &sc->rxbuf, list) {
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
new file mode 100644
index 000000000000..d45b90a6e06c
--- /dev/null
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -0,0 +1,667 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 */
19
20/******************************\
21 Hardware Descriptor Functions
22\******************************/
23
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/*
30 * TX Descriptors
31 */
32
33/*
34 * Initialize the 2-word tx control descriptor on 5210/5211
35 */
36static int
37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
38 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
39 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
40 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
41 unsigned int rtscts_rate, unsigned int rtscts_duration)
42{
43 u32 frame_type;
44 struct ath5k_hw_2w_tx_ctl *tx_ctl;
45 unsigned int frame_len;
46
47 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
48
49 /*
50 * Validate input
51 * - Zero retries don't make sense.
52 * - A zero rate will put the HW into a mode where it continously sends
53 * noise on the channel, so it is important to avoid this.
54 */
55 if (unlikely(tx_tries0 == 0)) {
56 ATH5K_ERR(ah->ah_sc, "zero retries\n");
57 WARN_ON(1);
58 return -EINVAL;
59 }
60 if (unlikely(tx_rate0 == 0)) {
61 ATH5K_ERR(ah->ah_sc, "zero rate\n");
62 WARN_ON(1);
63 return -EINVAL;
64 }
65
66 /* Clear descriptor */
67 memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
68
69 /* Setup control descriptor */
70
71 /* Verify and set frame length */
72
73 /* remove padding we might have added before */
74 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
75
76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
77 return -EINVAL;
78
79 tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
80
81 /* Verify and set buffer length */
82
83 /* NB: beacon's BufLen must be a multiple of 4 bytes */
84 if (type == AR5K_PKT_TYPE_BEACON)
85 pkt_len = roundup(pkt_len, 4);
86
87 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
88 return -EINVAL;
89
90 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
91
92 /*
93 * Verify and set header length
94 * XXX: I only found that on 5210 code, does it work on 5211 ?
95 */
96 if (ah->ah_version == AR5K_AR5210) {
97 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
98 return -EINVAL;
99 tx_ctl->tx_control_0 |=
100 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
101 }
102
103 /*Diferences between 5210-5211*/
104 if (ah->ah_version == AR5K_AR5210) {
105 switch (type) {
106 case AR5K_PKT_TYPE_BEACON:
107 case AR5K_PKT_TYPE_PROBE_RESP:
108 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
109 case AR5K_PKT_TYPE_PIFS:
110 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
111 default:
112 frame_type = type /*<< 2 ?*/;
113 }
114
115 tx_ctl->tx_control_0 |=
116 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
117 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
118
119 } else {
120 tx_ctl->tx_control_0 |=
121 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
122 AR5K_REG_SM(antenna_mode,
123 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
124 tx_ctl->tx_control_1 |=
125 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
126 }
127#define _TX_FLAGS(_c, _flag) \
128 if (flags & AR5K_TXDESC_##_flag) { \
129 tx_ctl->tx_control_##_c |= \
130 AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
131 }
132
133 _TX_FLAGS(0, CLRDMASK);
134 _TX_FLAGS(0, VEOL);
135 _TX_FLAGS(0, INTREQ);
136 _TX_FLAGS(0, RTSENA);
137 _TX_FLAGS(1, NOACK);
138
139#undef _TX_FLAGS
140
141 /*
142 * WEP crap
143 */
144 if (key_index != AR5K_TXKEYIX_INVALID) {
145 tx_ctl->tx_control_0 |=
146 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
147 tx_ctl->tx_control_1 |=
148 AR5K_REG_SM(key_index,
149 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
150 }
151
152 /*
153 * RTS/CTS Duration [5210 ?]
154 */
155 if ((ah->ah_version == AR5K_AR5210) &&
156 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
157 tx_ctl->tx_control_1 |= rtscts_duration &
158 AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
159
160 return 0;
161}
162
163/*
164 * Initialize the 4-word tx control descriptor on 5212
165 */
166static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
167 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
168 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
169 unsigned int tx_tries0, unsigned int key_index,
170 unsigned int antenna_mode, unsigned int flags,
171 unsigned int rtscts_rate,
172 unsigned int rtscts_duration)
173{
174 struct ath5k_hw_4w_tx_ctl *tx_ctl;
175 unsigned int frame_len;
176
177 ATH5K_TRACE(ah->ah_sc);
178 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
179
180 /*
181 * Validate input
182 * - Zero retries don't make sense.
183 * - A zero rate will put the HW into a mode where it continously sends
184 * noise on the channel, so it is important to avoid this.
185 */
186 if (unlikely(tx_tries0 == 0)) {
187 ATH5K_ERR(ah->ah_sc, "zero retries\n");
188 WARN_ON(1);
189 return -EINVAL;
190 }
191 if (unlikely(tx_rate0 == 0)) {
192 ATH5K_ERR(ah->ah_sc, "zero rate\n");
193 WARN_ON(1);
194 return -EINVAL;
195 }
196
197 /* Clear descriptor */
198 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
199
200 /* Setup control descriptor */
201
202 /* Verify and set frame length */
203
204 /* remove padding we might have added before */
205 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
206
207 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
208 return -EINVAL;
209
210 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
211
212 /* Verify and set buffer length */
213
214 /* NB: beacon's BufLen must be a multiple of 4 bytes */
215 if (type == AR5K_PKT_TYPE_BEACON)
216 pkt_len = roundup(pkt_len, 4);
217
218 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
219 return -EINVAL;
220
221 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
222
223 tx_ctl->tx_control_0 |=
224 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
225 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
226 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
227 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
228 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
229 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
230 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
231
232#define _TX_FLAGS(_c, _flag) \
233 if (flags & AR5K_TXDESC_##_flag) { \
234 tx_ctl->tx_control_##_c |= \
235 AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
236 }
237
238 _TX_FLAGS(0, CLRDMASK);
239 _TX_FLAGS(0, VEOL);
240 _TX_FLAGS(0, INTREQ);
241 _TX_FLAGS(0, RTSENA);
242 _TX_FLAGS(0, CTSENA);
243 _TX_FLAGS(1, NOACK);
244
245#undef _TX_FLAGS
246
247 /*
248 * WEP crap
249 */
250 if (key_index != AR5K_TXKEYIX_INVALID) {
251 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
252 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
253 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
254 }
255
256 /*
257 * RTS/CTS
258 */
259 if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
260 if ((flags & AR5K_TXDESC_RTSENA) &&
261 (flags & AR5K_TXDESC_CTSENA))
262 return -EINVAL;
263 tx_ctl->tx_control_2 |= rtscts_duration &
264 AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
265 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
266 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
267 }
268
269 return 0;
270}
271
272/*
273 * Initialize a 4-word multi rate retry tx control descriptor on 5212
274 */
275static int
276ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
277 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
278 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
279{
280 struct ath5k_hw_4w_tx_ctl *tx_ctl;
281
282 /*
283 * Rates can be 0 as long as the retry count is 0 too.
284 * A zero rate and nonzero retry count will put the HW into a mode where
285 * it continously sends noise on the channel, so it is important to
286 * avoid this.
287 */
288 if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
289 (tx_rate2 == 0 && tx_tries2 != 0) ||
290 (tx_rate3 == 0 && tx_tries3 != 0))) {
291 ATH5K_ERR(ah->ah_sc, "zero rate\n");
292 WARN_ON(1);
293 return -EINVAL;
294 }
295
296 if (ah->ah_version == AR5K_AR5212) {
297 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
298
299#define _XTX_TRIES(_n) \
300 if (tx_tries##_n) { \
301 tx_ctl->tx_control_2 |= \
302 AR5K_REG_SM(tx_tries##_n, \
303 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
304 tx_ctl->tx_control_3 |= \
305 AR5K_REG_SM(tx_rate##_n, \
306 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
307 }
308
309 _XTX_TRIES(1);
310 _XTX_TRIES(2);
311 _XTX_TRIES(3);
312
313#undef _XTX_TRIES
314
315 return 1;
316 }
317
318 return 0;
319}
320
321/*
322 * Proccess the tx status descriptor on 5210/5211
323 */
324static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
325 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
326{
327 struct ath5k_hw_2w_tx_ctl *tx_ctl;
328 struct ath5k_hw_tx_status *tx_status;
329
330 ATH5K_TRACE(ah->ah_sc);
331
332 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
333 tx_status = &desc->ud.ds_tx5210.tx_stat;
334
335 /* No frame has been send or error */
336 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
337 return -EINPROGRESS;
338
339 /*
340 * Get descriptor status
341 */
342 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
343 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
344 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
345 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
346 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
347 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
348 /*TODO: ts->ts_virtcol + test*/
349 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
350 AR5K_DESC_TX_STATUS1_SEQ_NUM);
351 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
352 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
353 ts->ts_antenna = 1;
354 ts->ts_status = 0;
355 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
356 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
357
358 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
359 if (tx_status->tx_status_0 &
360 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
361 ts->ts_status |= AR5K_TXERR_XRETRY;
362
363 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
364 ts->ts_status |= AR5K_TXERR_FIFO;
365
366 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
367 ts->ts_status |= AR5K_TXERR_FILT;
368 }
369
370 return 0;
371}
372
373/*
374 * Proccess a tx status descriptor on 5212
375 */
376static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
377 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
378{
379 struct ath5k_hw_4w_tx_ctl *tx_ctl;
380 struct ath5k_hw_tx_status *tx_status;
381
382 ATH5K_TRACE(ah->ah_sc);
383
384 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
385 tx_status = &desc->ud.ds_tx5212.tx_stat;
386
387 /* No frame has been send or error */
388 if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE)))
389 return -EINPROGRESS;
390
391 /*
392 * Get descriptor status
393 */
394 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
395 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
396 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
397 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
398 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
399 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
400 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
401 AR5K_DESC_TX_STATUS1_SEQ_NUM);
402 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
403 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
404 ts->ts_antenna = (tx_status->tx_status_1 &
405 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
406 ts->ts_status = 0;
407
408 switch (AR5K_REG_MS(tx_status->tx_status_1,
409 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
410 case 0:
411 ts->ts_rate = tx_ctl->tx_control_3 &
412 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
413 break;
414 case 1:
415 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
416 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
417 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
418 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
419 break;
420 case 2:
421 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
422 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
423 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
424 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
425 break;
426 case 3:
427 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
428 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
429 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
430 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
431 break;
432 }
433
434 /* TX error */
435 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
436 if (tx_status->tx_status_0 &
437 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
438 ts->ts_status |= AR5K_TXERR_XRETRY;
439
440 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
441 ts->ts_status |= AR5K_TXERR_FIFO;
442
443 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
444 ts->ts_status |= AR5K_TXERR_FILT;
445 }
446
447 return 0;
448}
449
450/*
451 * RX Descriptors
452 */
453
454/*
455 * Initialize an rx control descriptor
456 */
457static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
458 u32 size, unsigned int flags)
459{
460 struct ath5k_hw_rx_ctl *rx_ctl;
461
462 ATH5K_TRACE(ah->ah_sc);
463 rx_ctl = &desc->ud.ds_rx.rx_ctl;
464
465 /*
466 * Clear the descriptor
467 * If we don't clean the status descriptor,
468 * while scanning we get too many results,
469 * most of them virtual, after some secs
470 * of scanning system hangs. M.F.
471 */
472 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
473
474 /* Setup descriptor */
475 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
476 if (unlikely(rx_ctl->rx_control_1 != size))
477 return -EINVAL;
478
479 if (flags & AR5K_RXDESC_INTREQ)
480 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
481
482 return 0;
483}
484
485/*
486 * Proccess the rx status descriptor on 5210/5211
487 */
488static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
489 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
490{
491 struct ath5k_hw_rx_status *rx_status;
492
493 rx_status = &desc->ud.ds_rx.u.rx_stat;
494
495 /* No frame received / not ready */
496 if (unlikely(!(rx_status->rx_status_1 &
497 AR5K_5210_RX_DESC_STATUS1_DONE)))
498 return -EINPROGRESS;
499
500 /*
501 * Frame receive status
502 */
503 rs->rs_datalen = rx_status->rx_status_0 &
504 AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
505 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
506 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
507 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
508 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
509 rs->rs_antenna = rx_status->rx_status_0 &
510 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
511 rs->rs_more = rx_status->rx_status_0 &
512 AR5K_5210_RX_DESC_STATUS0_MORE;
513 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
514 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
515 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
516 rs->rs_status = 0;
517 rs->rs_phyerr = 0;
518
519 /*
520 * Key table status
521 */
522 if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
523 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
524 AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
525 else
526 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
527
528 /*
529 * Receive/descriptor errors
530 */
531 if (!(rx_status->rx_status_1 &
532 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
533 if (rx_status->rx_status_1 &
534 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
535 rs->rs_status |= AR5K_RXERR_CRC;
536
537 if (rx_status->rx_status_1 &
538 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
539 rs->rs_status |= AR5K_RXERR_FIFO;
540
541 if (rx_status->rx_status_1 &
542 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
543 rs->rs_status |= AR5K_RXERR_PHY;
544 rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
545 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
546 }
547
548 if (rx_status->rx_status_1 &
549 AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
550 rs->rs_status |= AR5K_RXERR_DECRYPT;
551 }
552
553 return 0;
554}
555
556/*
557 * Proccess the rx status descriptor on 5212
558 */
559static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
560 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
561{
562 struct ath5k_hw_rx_status *rx_status;
563 struct ath5k_hw_rx_error *rx_err;
564
565 ATH5K_TRACE(ah->ah_sc);
566 rx_status = &desc->ud.ds_rx.u.rx_stat;
567
568 /* Overlay on error */
569 rx_err = &desc->ud.ds_rx.u.rx_err;
570
571 /* No frame received / not ready */
572 if (unlikely(!(rx_status->rx_status_1 &
573 AR5K_5212_RX_DESC_STATUS1_DONE)))
574 return -EINPROGRESS;
575
576 /*
577 * Frame receive status
578 */
579 rs->rs_datalen = rx_status->rx_status_0 &
580 AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
581 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
582 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
583 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
584 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
585 rs->rs_antenna = rx_status->rx_status_0 &
586 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
587 rs->rs_more = rx_status->rx_status_0 &
588 AR5K_5212_RX_DESC_STATUS0_MORE;
589 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
590 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
591 rs->rs_status = 0;
592 rs->rs_phyerr = 0;
593
594 /*
595 * Key table status
596 */
597 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
598 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
599 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
600 else
601 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
602
603 /*
604 * Receive/descriptor errors
605 */
606 if (!(rx_status->rx_status_1 &
607 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
608 if (rx_status->rx_status_1 &
609 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
610 rs->rs_status |= AR5K_RXERR_CRC;
611
612 if (rx_status->rx_status_1 &
613 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
614 rs->rs_status |= AR5K_RXERR_PHY;
615 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
616 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
617 }
618
619 if (rx_status->rx_status_1 &
620 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
621 rs->rs_status |= AR5K_RXERR_DECRYPT;
622
623 if (rx_status->rx_status_1 &
624 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
625 rs->rs_status |= AR5K_RXERR_MIC;
626 }
627
628 return 0;
629}
630
631/*
632 * Init function pointers inside ath5k_hw struct
633 */
634int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
635{
636
637 if (ah->ah_version != AR5K_AR5210 &&
638 ah->ah_version != AR5K_AR5211 &&
639 ah->ah_version != AR5K_AR5212)
640 return -ENOTSUPP;
641
642 /* XXX: What is this magic value and where is it used ? */
643 if (ah->ah_version == AR5K_AR5212)
644 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
645 else if (ah->ah_version == AR5K_AR5211)
646 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
647
648 if (ah->ah_version == AR5K_AR5212) {
649 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
650 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
651 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
652 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
653 } else {
654 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
655 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
656 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
657 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
658 }
659
660 if (ah->ah_version == AR5K_AR5212)
661 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
662 else if (ah->ah_version <= AR5K_AR5211)
663 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
664
665 return 0;
666}
667
diff --git a/drivers/net/wireless/ath5k/hw.h b/drivers/net/wireless/ath5k/desc.h
index 64fca8dcb386..56158c804e3e 100644
--- a/drivers/net/wireless/ath5k/hw.h
+++ b/drivers/net/wireless/ath5k/desc.h
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * 4 *
7 * Permission to use, copy, modify, and distribute this software for any 5 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -15,159 +13,9 @@
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
18 */ 17 */
19 18
20#include <linux/delay.h>
21
22/*
23 * Gain settings
24 */
25
26enum ath5k_rfgain {
27 AR5K_RFGAIN_INACTIVE = 0,
28 AR5K_RFGAIN_READ_REQUESTED,
29 AR5K_RFGAIN_NEED_CHANGE,
30};
31
32#define AR5K_GAIN_CRN_FIX_BITS_5111 4
33#define AR5K_GAIN_CRN_FIX_BITS_5112 7
34#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
35#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
36#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
37#define AR5K_GAIN_CCK_PROBE_CORR 5
38#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
39#define AR5K_GAIN_STEP_COUNT 10
40#define AR5K_GAIN_PARAM_TX_CLIP 0
41#define AR5K_GAIN_PARAM_PD_90 1
42#define AR5K_GAIN_PARAM_PD_84 2
43#define AR5K_GAIN_PARAM_GAIN_SEL 3
44#define AR5K_GAIN_PARAM_MIX_ORN 0
45#define AR5K_GAIN_PARAM_PD_138 1
46#define AR5K_GAIN_PARAM_PD_137 2
47#define AR5K_GAIN_PARAM_PD_136 3
48#define AR5K_GAIN_PARAM_PD_132 4
49#define AR5K_GAIN_PARAM_PD_131 5
50#define AR5K_GAIN_PARAM_PD_130 6
51#define AR5K_GAIN_CHECK_ADJUST(_g) \
52 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
53
54struct ath5k_gain_opt_step {
55 s16 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
56 s32 gos_gain;
57};
58
59struct ath5k_gain {
60 u32 g_step_idx;
61 u32 g_current;
62 u32 g_target;
63 u32 g_low;
64 u32 g_high;
65 u32 g_f_corr;
66 u32 g_active;
67 const struct ath5k_gain_opt_step *g_step;
68};
69
70
71/*
72 * HW SPECIFIC STRUCTS
73 */
74
75/* Some EEPROM defines */
76#define AR5K_EEPROM_EEP_SCALE 100
77#define AR5K_EEPROM_EEP_DELTA 10
78#define AR5K_EEPROM_N_MODES 3
79#define AR5K_EEPROM_N_5GHZ_CHAN 10
80#define AR5K_EEPROM_N_2GHZ_CHAN 3
81#define AR5K_EEPROM_MAX_CHAN 10
82#define AR5K_EEPROM_N_PCDAC 11
83#define AR5K_EEPROM_N_TEST_FREQ 8
84#define AR5K_EEPROM_N_EDGES 8
85#define AR5K_EEPROM_N_INTERCEPTS 11
86#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
87#define AR5K_EEPROM_PCDAC_M 0x3f
88#define AR5K_EEPROM_PCDAC_START 1
89#define AR5K_EEPROM_PCDAC_STOP 63
90#define AR5K_EEPROM_PCDAC_STEP 1
91#define AR5K_EEPROM_NON_EDGE_M 0x40
92#define AR5K_EEPROM_CHANNEL_POWER 8
93#define AR5K_EEPROM_N_OBDB 4
94#define AR5K_EEPROM_OBDB_DIS 0xffff
95#define AR5K_EEPROM_CHANNEL_DIS 0xff
96#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
97#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
98#define AR5K_EEPROM_MAX_CTLS 32
99#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
100#define AR5K_EEPROM_N_XPD0_POINTS 4
101#define AR5K_EEPROM_N_XPD3_POINTS 3
102#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
103#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
104#define AR5K_EEPROM_POWER_M 0x3f
105#define AR5K_EEPROM_POWER_MIN 0
106#define AR5K_EEPROM_POWER_MAX 3150
107#define AR5K_EEPROM_POWER_STEP 50
108#define AR5K_EEPROM_POWER_TABLE_SIZE 64
109#define AR5K_EEPROM_N_POWER_LOC_11B 4
110#define AR5K_EEPROM_N_POWER_LOC_11G 6
111#define AR5K_EEPROM_I_GAIN 10
112#define AR5K_EEPROM_CCK_OFDM_DELTA 15
113#define AR5K_EEPROM_N_IQ_CAL 2
114
115/* Struct to hold EEPROM calibration data */
116struct ath5k_eeprom_info {
117 u16 ee_magic;
118 u16 ee_protect;
119 u16 ee_regdomain;
120 u16 ee_version;
121 u16 ee_header;
122 u16 ee_ant_gain;
123 u16 ee_misc0;
124 u16 ee_misc1;
125 u16 ee_cck_ofdm_gain_delta;
126 u16 ee_cck_ofdm_power_delta;
127 u16 ee_scaled_cck_delta;
128
129 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
130 u16 ee_tx_clip;
131 u16 ee_pwd_84;
132 u16 ee_pwd_90;
133 u16 ee_gain_select;
134
135 /* RF Calibration settings (reset, rfregs) */
136 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
137 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
138 u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
139 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
140 u16 ee_xr_power[AR5K_EEPROM_N_MODES];
141 u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
142 u16 ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
143 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
144 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
145 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
146 u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
147 u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
148 u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
149 u16 ee_thr_62[AR5K_EEPROM_N_MODES];
150 u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
151 u16 ee_xpd[AR5K_EEPROM_N_MODES];
152 u16 ee_x_gain[AR5K_EEPROM_N_MODES];
153 u16 ee_i_gain[AR5K_EEPROM_N_MODES];
154 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
155
156 /* Unused */
157 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
158 u16 ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
159 u16 ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
160
161 /* Conformance test limits (Unused) */
162 u16 ee_ctls;
163 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
164
165 /* Noise Floor Calibration settings */
166 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
167 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
168 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
169};
170
171/* 19/*
172 * Internal RX/TX descriptor structures 20 * Internal RX/TX descriptor structures
173 * (rX: reserved fields possibily used by future versions of the ar5k chipset) 21 * (rX: reserved fields possibily used by future versions of the ar5k chipset)
@@ -178,14 +26,15 @@ struct ath5k_eeprom_info {
178 */ 26 */
179struct ath5k_hw_rx_ctl { 27struct ath5k_hw_rx_ctl {
180 u32 rx_control_0; /* RX control word 0 */ 28 u32 rx_control_0; /* RX control word 0 */
29 u32 rx_control_1; /* RX control word 1 */
30} __packed;
181 31
32/* RX control word 0 field/sflags */
182#define AR5K_DESC_RX_CTL0 0x00000000 33#define AR5K_DESC_RX_CTL0 0x00000000
183 34
184 u32 rx_control_1; /* RX control word 1 */ 35/* RX control word 1 fields/flags */
185
186#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff 36#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff
187#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 37#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000
188} __packed;
189 38
190/* 39/*
191 * common hardware RX status descriptor 40 * common hardware RX status descriptor
@@ -197,6 +46,7 @@ struct ath5k_hw_rx_status {
197} __packed; 46} __packed;
198 47
199/* 5210/5211 */ 48/* 5210/5211 */
49/* RX status word 0 fields/flags */
200#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff 50#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff
201#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 51#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000
202#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 52#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
@@ -205,6 +55,8 @@ struct ath5k_hw_rx_status {
205#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19 55#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
206#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000 56#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
207#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27 57#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
58
59/* RX status word 1 fields/flags */
208#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 60#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001
209#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 61#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
210#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 62#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004
@@ -220,6 +72,7 @@ struct ath5k_hw_rx_status {
220#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 72#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
221 73
222/* 5212 */ 74/* 5212 */
75/* RX status word 0 fields/flags */
223#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff 76#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff
224#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 77#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000
225#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 78#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
@@ -229,6 +82,8 @@ struct ath5k_hw_rx_status {
229#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20 82#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
230#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 83#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
231#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28 84#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
85
86/* RX status word 1 fields/flags */
232#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 87#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001
233#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 88#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
234#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 89#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004
@@ -246,16 +101,18 @@ struct ath5k_hw_rx_status {
246 * common hardware RX error descriptor 101 * common hardware RX error descriptor
247 */ 102 */
248struct ath5k_hw_rx_error { 103struct ath5k_hw_rx_error {
249 u32 rx_error_0; /* RX error word 0 */ 104 u32 rx_error_0; /* RX status word 0 */
105 u32 rx_error_1; /* RX status word 1 */
106} __packed;
250 107
108/* RX error word 0 fields/flags */
251#define AR5K_RX_DESC_ERROR0 0x00000000 109#define AR5K_RX_DESC_ERROR0 0x00000000
252 110
253 u32 rx_error_1; /* RX error word 1 */ 111/* RX error word 1 fields/flags */
254
255#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
256#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
257} __packed;
258 114
115/* PHY Error codes */
259#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00 116#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
260#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20 117#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
261#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40 118#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
@@ -270,7 +127,10 @@ struct ath5k_hw_rx_error {
270 */ 127 */
271struct ath5k_hw_2w_tx_ctl { 128struct ath5k_hw_2w_tx_ctl {
272 u32 tx_control_0; /* TX control word 0 */ 129 u32 tx_control_0; /* TX control word 0 */
130 u32 tx_control_1; /* TX control word 1 */
131} __packed;
273 132
133/* TX control word 0 fields/flags */
274#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 134#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
275#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/ 135#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/
276#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12 136#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12
@@ -284,29 +144,34 @@ struct ath5k_hw_2w_tx_ctl {
284#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26 144#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26
285#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 145#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000
286#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 146#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000
287#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT (ah->ah_version == AR5K_AR5210 ? \ 147
288 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \ 148#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
289 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211) 149 (ah->ah_version == AR5K_AR5210 ? \
150 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
151 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
152
290#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 153#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
291#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 154#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000
292#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 155#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
293 156
294 u32 tx_control_1; /* TX control word 1 */ 157/* TX control word 1 fields/flags */
295
296#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff 158#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff
297#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 159#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000
298#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000 160#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000
299#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000 161#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000
300#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX (ah->ah_version == AR5K_AR5210 ? \ 162
301 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \ 163#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \
302 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211) 164 (ah->ah_version == AR5K_AR5210 ? \
165 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \
166 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
167
303#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 168#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
304#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/ 169#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/
305#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20 170#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20
306#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/ 171#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/
307#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/ 172#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/
308} __packed;
309 173
174/* Frame types */
310#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00 175#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00
311#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04 176#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04
312#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08 177#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08
@@ -378,7 +243,10 @@ struct ath5k_hw_4w_tx_ctl {
378 */ 243 */
379struct ath5k_hw_tx_status { 244struct ath5k_hw_tx_status {
380 u32 tx_status_0; /* TX status word 0 */ 245 u32 tx_status_0; /* TX status word 0 */
246 u32 tx_status_1; /* TX status word 1 */
247} __packed;
381 248
249/* TX status word 0 fields/flags */
382#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001
383#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 251#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002
384#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 252#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004
@@ -400,8 +268,7 @@ struct ath5k_hw_tx_status {
400#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 268#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000
401#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16 269#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
402 270
403 u32 tx_status_1; /* TX status word 1 */ 271/* TX status word 1 fields/flags */
404
405#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 272#define AR5K_DESC_TX_STATUS1_DONE 0x00000001
406#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe 273#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe
407#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1 274#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
@@ -411,8 +278,6 @@ struct ath5k_hw_tx_status {
411#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21 278#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21
412#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000 279#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000
413#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000 280#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000
414} __packed;
415
416 281
417/* 282/*
418 * 5210/5211 hardware TX descriptor 283 * 5210/5211 hardware TX descriptor
@@ -441,176 +306,27 @@ struct ath5k_hw_all_rx_desc {
441 } u; 306 } u;
442} __packed; 307} __packed;
443 308
444
445/* 309/*
446 * AR5K REGISTER ACCESS 310 * Atheros hardware descriptor
311 * This is read and written to by the hardware
447 */ 312 */
313struct ath5k_desc {
314 u32 ds_link; /* physical address of the next descriptor */
315 u32 ds_data; /* physical address of data buffer (skb) */
448 316
449/*Swap RX/TX Descriptor for big endian archs*/ 317 union {
450#if defined(__BIG_ENDIAN) 318 struct ath5k_hw_5210_tx_desc ds_tx5210;
451#define AR5K_INIT_CFG ( \ 319 struct ath5k_hw_5212_tx_desc ds_tx5212;
452 AR5K_CFG_SWTD | AR5K_CFG_SWRD \ 320 struct ath5k_hw_all_rx_desc ds_rx;
453) 321 } ud;
454#else 322} __packed;
455#define AR5K_INIT_CFG 0x00000000
456#endif
457
458/*#define AR5K_REG_READ(_reg) ath5k_hw_reg_read(ah, _reg)
459
460#define AR5K_REG_WRITE(_reg, _val) ath5k_hw_reg_write(ah, _val, _reg)*/
461
462#define AR5K_REG_SM(_val, _flags) \
463 (((_val) << _flags##_S) & (_flags))
464
465#define AR5K_REG_MS(_val, _flags) \
466 (((_val) & (_flags)) >> _flags##_S)
467
468/* Some registers can hold multiple values of interest. For this
469 * reason when we want to write to these registers we must first
470 * retrieve the values which we do not want to clear (lets call this
471 * old_data) and then set the register with this and our new_value:
472 * ( old_data | new_value) */
473#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \
474 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
475 (((_val) << _flags##_S) & (_flags)), _reg)
476
477#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \
478 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \
479 (_mask)) | (_flags), _reg)
480
481#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \
482 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
483
484#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
485 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
486
487#define AR5K_PHY_WRITE(ah, _reg, _val) \
488 ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
489
490#define AR5K_PHY_READ(ah, _reg) \
491 ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
492
493#define AR5K_REG_WAIT(_i) do { \
494 if (_i % 64) \
495 udelay(1); \
496} while (0)
497
498#define AR5K_EEPROM_READ(_o, _v) do { \
499 if ((ret = ath5k_hw_eeprom_read(ah, (_o), &(_v))) != 0) \
500 return (ret); \
501} while (0)
502
503#define AR5K_EEPROM_READ_HDR(_o, _v) \
504 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
505
506/* Read status of selected queue */
507#define AR5K_REG_READ_Q(ah, _reg, _queue) \
508 (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
509
510#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \
511 ath5k_hw_reg_write(ah, (1 << _queue), _reg)
512
513#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \
514 _reg |= 1 << _queue; \
515} while (0)
516
517#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \
518 _reg &= ~(1 << _queue); \
519} while (0)
520
521#define AR5K_LOW_ID(_a)( \
522(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
523)
524
525#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
526
527/*
528 * Initial register values
529 */
530
531/*
532 * Common initial register values
533 */
534#define AR5K_INIT_MODE CHANNEL_B
535
536#define AR5K_INIT_TX_LATENCY 502
537#define AR5K_INIT_USEC 39
538#define AR5K_INIT_USEC_TURBO 79
539#define AR5K_INIT_USEC_32 31
540#define AR5K_INIT_CARR_SENSE_EN 1
541#define AR5K_INIT_PROG_IFS 920
542#define AR5K_INIT_PROG_IFS_TURBO 960
543#define AR5K_INIT_EIFS 3440
544#define AR5K_INIT_EIFS_TURBO 6880
545#define AR5K_INIT_SLOT_TIME 396
546#define AR5K_INIT_SLOT_TIME_TURBO 480
547#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
548#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
549#define AR5K_INIT_SIFS 560
550#define AR5K_INIT_SIFS_TURBO 480
551#define AR5K_INIT_SH_RETRY 10
552#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
553#define AR5K_INIT_SSH_RETRY 32
554#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
555#define AR5K_INIT_TX_RETRY 10
556#define AR5K_INIT_TOPS 8
557#define AR5K_INIT_RXNOFRM 8
558#define AR5K_INIT_RPGTO 0
559#define AR5K_INIT_TXNOFRM 0
560#define AR5K_INIT_BEACON_PERIOD 65535
561#define AR5K_INIT_TIM_OFFSET 0
562#define AR5K_INIT_BEACON_EN 0
563#define AR5K_INIT_RESET_TSF 0
564
565#define AR5K_INIT_TRANSMIT_LATENCY ( \
566 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
567 (AR5K_INIT_USEC) \
568)
569#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \
570 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
571 (AR5K_INIT_USEC_TURBO) \
572)
573#define AR5K_INIT_PROTO_TIME_CNTRL ( \
574 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \
575 (AR5K_INIT_PROG_IFS) \
576)
577#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \
578 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
579 (AR5K_INIT_PROG_IFS_TURBO) \
580)
581#define AR5K_INIT_BEACON_CONTROL ( \
582 (AR5K_INIT_RESET_TSF << 24) | (AR5K_INIT_BEACON_EN << 23) | \
583 (AR5K_INIT_TIM_OFFSET << 16) | (AR5K_INIT_BEACON_PERIOD) \
584)
585
586/*
587 * Non-common initial register values which have to be loaded into the
588 * card at boot time and after each reset.
589 */
590
591/* Register dumps are done per operation mode */
592#define AR5K_INI_RFGAIN_5GHZ 0
593#define AR5K_INI_RFGAIN_2GHZ 1
594
595#define AR5K_INI_VAL_11A 0
596#define AR5K_INI_VAL_11A_TURBO 1
597#define AR5K_INI_VAL_11B 2
598#define AR5K_INI_VAL_11G 3
599#define AR5K_INI_VAL_11G_TURBO 4
600#define AR5K_INI_VAL_XR 0
601#define AR5K_INI_VAL_MAX 5
602
603#define AR5K_RF5111_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
604#define AR5K_RF5112_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
605 323
606static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) 324#define AR5K_RXDESC_INTREQ 0x0020
607{
608 u32 retval = 0, bit, i;
609 325
610 for (i = 0; i < bits; i++) { 326#define AR5K_TXDESC_CLRDMASK 0x0001
611 bit = (val >> i) & 1; 327#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
612 retval = (retval << 1) | bit; 328#define AR5K_TXDESC_RTSENA 0x0004
613 } 329#define AR5K_TXDESC_CTSENA 0x0008
330#define AR5K_TXDESC_INTREQ 0x0010
331#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
614 332
615 return retval;
616}
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
new file mode 100644
index 000000000000..a28090be9603
--- /dev/null
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -0,0 +1,566 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* DMA and interrupt masking functions *
21\*************************************/
22
23/*
24 * dma.c - DMA and interrupt masking functions
25 *
26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
28 * Also we setup interrupt mask register (IMR) and read the various iterrupt
29 * status registers (ISR).
30 *
31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue
32 * number that resulted the interrupt.
33 */
34
35#include "ath5k.h"
36#include "reg.h"
37#include "debug.h"
38#include "base.h"
39
40/*********\
41* Receive *
42\*********/
43
44/**
45 * ath5k_hw_start_rx_dma - Start DMA receive
46 *
47 * @ah: The &struct ath5k_hw
48 */
49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
50{
51 ATH5K_TRACE(ah->ah_sc);
52 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
53 ath5k_hw_reg_read(ah, AR5K_CR);
54}
55
56/**
57 * ath5k_hw_stop_rx_dma - Stop DMA receive
58 *
59 * @ah: The &struct ath5k_hw
60 */
61int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
62{
63 unsigned int i;
64
65 ATH5K_TRACE(ah->ah_sc);
66 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
67
68 /*
69 * It may take some time to disable the DMA receive unit
70 */
71 for (i = 2000; i > 0 &&
72 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
73 i--)
74 udelay(10);
75
76 return i ? 0 : -EBUSY;
77}
78
79/**
80 * ath5k_hw_get_rxdp - Get RX Descriptor's address
81 *
82 * @ah: The &struct ath5k_hw
83 *
84 * XXX: Is RXDP read and clear ?
85 */
86u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
87{
88 return ath5k_hw_reg_read(ah, AR5K_RXDP);
89}
90
91/**
92 * ath5k_hw_set_rxdp - Set RX Descriptor's address
93 *
94 * @ah: The &struct ath5k_hw
95 * @phys_addr: RX descriptor address
96 *
97 * XXX: Should we check if rx is enabled before setting rxdp ?
98 */
99void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
100{
101 ATH5K_TRACE(ah->ah_sc);
102
103 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
104}
105
106
107/**********\
108* Transmit *
109\**********/
110
111/**
112 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
113 *
114 * @ah: The &struct ath5k_hw
115 * @queue: The hw queue number
116 *
117 * Start DMA transmit for a specific queue and since 5210 doesn't have
118 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
119 * queue for normal data and one queue for beacons). For queue setup
120 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
121 * of range or if queue is already disabled.
122 *
123 * NOTE: Must be called after setting up tx control descriptor for that
124 * queue (see below).
125 */
126int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
127{
128 u32 tx_queue;
129
130 ATH5K_TRACE(ah->ah_sc);
131 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
132
133 /* Return if queue is declared inactive */
134 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
135 return -EIO;
136
137 if (ah->ah_version == AR5K_AR5210) {
138 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
139
140 /*
141 * Set the queue by type on 5210
142 */
143 switch (ah->ah_txq[queue].tqi_type) {
144 case AR5K_TX_QUEUE_DATA:
145 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
146 break;
147 case AR5K_TX_QUEUE_BEACON:
148 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
149 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
150 AR5K_BSR);
151 break;
152 case AR5K_TX_QUEUE_CAB:
153 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
154 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
155 AR5K_BCR_BDMAE, AR5K_BSR);
156 break;
157 default:
158 return -EINVAL;
159 }
160 /* Start queue */
161 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
162 ath5k_hw_reg_read(ah, AR5K_CR);
163 } else {
164 /* Return if queue is disabled */
165 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
166 return -EIO;
167
168 /* Start queue */
169 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
170 }
171
172 return 0;
173}
174
175/**
176 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
177 *
178 * @ah: The &struct ath5k_hw
179 * @queue: The hw queue number
180 *
181 * Stop DMA transmit on a specific hw queue and drain queue so we don't
182 * have any pending frames. Returns -EBUSY if we still have pending frames,
183 * -EINVAL if queue number is out of range.
184 *
185 * TODO: Test queue drain code
186 */
187int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
188{
189 unsigned int i = 100;
190 u32 tx_queue, pending;
191
192 ATH5K_TRACE(ah->ah_sc);
193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
194
195 /* Return if queue is declared inactive */
196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
197 return -EIO;
198
199 if (ah->ah_version == AR5K_AR5210) {
200 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
201
202 /*
203 * Set by queue type
204 */
205 switch (ah->ah_txq[queue].tqi_type) {
206 case AR5K_TX_QUEUE_DATA:
207 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
208 break;
209 case AR5K_TX_QUEUE_BEACON:
210 case AR5K_TX_QUEUE_CAB:
211 /* XXX Fix me... */
212 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
213 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
214 break;
215 default:
216 return -EINVAL;
217 }
218
219 /* Stop queue */
220 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
221 ath5k_hw_reg_read(ah, AR5K_CR);
222 } else {
223 /*
224 * Schedule TX disable and wait until queue is empty
225 */
226 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
227
228 /*Check for pending frames*/
229 do {
230 pending = ath5k_hw_reg_read(ah,
231 AR5K_QUEUE_STATUS(queue)) &
232 AR5K_QCU_STS_FRMPENDCNT;
233 udelay(100);
234 } while (--i && pending);
235
236 /* Clear register */
237 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
238 if (pending)
239 return -EBUSY;
240 }
241
242 /* TODO: Check for success else return error */
243 return 0;
244}
245
246/**
247 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
248 *
249 * @ah: The &struct ath5k_hw
250 * @queue: The hw queue number
251 *
252 * Get TX descriptor's address for a specific queue. For 5210 we ignore
253 * the queue number and use tx queue type since we only have 2 queues.
254 * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
255 * For newer chips with QCU/DCU we just read the corresponding TXDP register.
256 *
257 * XXX: Is TXDP read and clear ?
258 */
259u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
260{
261 u16 tx_reg;
262
263 ATH5K_TRACE(ah->ah_sc);
264 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
265
266 /*
267 * Get the transmit queue descriptor pointer from the selected queue
268 */
269 /*5210 doesn't have QCU*/
270 if (ah->ah_version == AR5K_AR5210) {
271 switch (ah->ah_txq[queue].tqi_type) {
272 case AR5K_TX_QUEUE_DATA:
273 tx_reg = AR5K_NOQCU_TXDP0;
274 break;
275 case AR5K_TX_QUEUE_BEACON:
276 case AR5K_TX_QUEUE_CAB:
277 tx_reg = AR5K_NOQCU_TXDP1;
278 break;
279 default:
280 return 0xffffffff;
281 }
282 } else {
283 tx_reg = AR5K_QUEUE_TXDP(queue);
284 }
285
286 return ath5k_hw_reg_read(ah, tx_reg);
287}
288
289/**
290 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
291 *
292 * @ah: The &struct ath5k_hw
293 * @queue: The hw queue number
294 *
295 * Set TX descriptor's address for a specific queue. For 5210 we ignore
296 * the queue number and we use tx queue type since we only have 2 queues
297 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
298 * For newer chips with QCU/DCU we just set the corresponding TXDP register.
299 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
300 * active.
301 */
302int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
303{
304 u16 tx_reg;
305
306 ATH5K_TRACE(ah->ah_sc);
307 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
308
309 /*
310 * Set the transmit queue descriptor pointer register by type
311 * on 5210
312 */
313 if (ah->ah_version == AR5K_AR5210) {
314 switch (ah->ah_txq[queue].tqi_type) {
315 case AR5K_TX_QUEUE_DATA:
316 tx_reg = AR5K_NOQCU_TXDP0;
317 break;
318 case AR5K_TX_QUEUE_BEACON:
319 case AR5K_TX_QUEUE_CAB:
320 tx_reg = AR5K_NOQCU_TXDP1;
321 break;
322 default:
323 return -EINVAL;
324 }
325 } else {
326 /*
327 * Set the transmit queue descriptor pointer for
328 * the selected queue on QCU for 5211+
329 * (this won't work if the queue is still active)
330 */
331 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
332 return -EIO;
333
334 tx_reg = AR5K_QUEUE_TXDP(queue);
335 }
336
337 /* Set descriptor pointer */
338 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
339
340 return 0;
341}
342
343/**
344 * ath5k_hw_update_tx_triglevel - Update tx trigger level
345 *
346 * @ah: The &struct ath5k_hw
347 * @increase: Flag to force increase of trigger level
348 *
349 * This function increases/decreases the tx trigger level for the tx fifo
350 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
351 * the buffer and transmits it's data. Lowering this results sending small
352 * frames more quickly but can lead to tx underruns, raising it a lot can
353 * result other problems (i think bmiss is related). Right now we start with
354 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
355 * the increase flag. Returns -EIO if we have have reached maximum/minimum.
356 *
357 * XXX: Link this with tx DMA size ?
358 * XXX: Use it to save interrupts ?
359 * TODO: Needs testing, i think it's related to bmiss...
360 */
361int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
362{
363 u32 trigger_level, imr;
364 int ret = -EIO;
365
366 ATH5K_TRACE(ah->ah_sc);
367
368 /*
369 * Disable interrupts by setting the mask
370 */
371 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
372
373 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
374 AR5K_TXCFG_TXFULL);
375
376 if (!increase) {
377 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
378 goto done;
379 } else
380 trigger_level +=
381 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
382
383 /*
384 * Update trigger level on success
385 */
386 if (ah->ah_version == AR5K_AR5210)
387 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
388 else
389 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
390 AR5K_TXCFG_TXFULL, trigger_level);
391
392 ret = 0;
393
394done:
395 /*
396 * Restore interrupt mask
397 */
398 ath5k_hw_set_imr(ah, imr);
399
400 return ret;
401}
402
403/*******************\
404* Interrupt masking *
405\*******************/
406
407/**
408 * ath5k_hw_is_intr_pending - Check if we have pending interrupts
409 *
410 * @ah: The &struct ath5k_hw
411 *
412 * Check if we have pending interrupts to process. Returns 1 if we
413 * have pending interrupts and 0 if we haven't.
414 */
415bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
416{
417 ATH5K_TRACE(ah->ah_sc);
418 return ath5k_hw_reg_read(ah, AR5K_INTPEND);
419}
420
421/**
422 * ath5k_hw_get_isr - Get interrupt status
423 *
424 * @ah: The @struct ath5k_hw
425 * @interrupt_mask: Driver's interrupt mask used to filter out
426 * interrupts in sw.
427 *
428 * This function is used inside our interrupt handler to determine the reason
429 * for the interrupt by reading Primary Interrupt Status Register. Returns an
430 * abstract interrupt status mask which is mostly ISR with some uncommon bits
431 * being mapped on some standard non hw-specific positions
432 * (check out &ath5k_int).
433 *
434 * NOTE: We use read-and-clear register, so after this function is called ISR
435 * is zeroed.
436 *
437 * XXX: Why filter interrupts in sw with interrupt_mask ? No benefit at all
438 * plus it can be misleading (one might thing that we save interrupts this way)
439 */
440int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
441{
442 u32 data;
443
444 ATH5K_TRACE(ah->ah_sc);
445
446 /*
447 * Read interrupt status from the Interrupt Status register
448 * on 5210
449 */
450 if (ah->ah_version == AR5K_AR5210) {
451 data = ath5k_hw_reg_read(ah, AR5K_ISR);
452 if (unlikely(data == AR5K_INT_NOCARD)) {
453 *interrupt_mask = data;
454 return -ENODEV;
455 }
456 } else {
457 /*
458 * Read interrupt status from the Read-And-Clear
459 * shadow register.
460 * Note: PISR/SISR Not available on 5210
461 */
462 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
463 }
464
465 /*
466 * Get abstract interrupt mask (driver-compatible)
467 */
468 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
469
470 if (unlikely(data == AR5K_INT_NOCARD))
471 return -ENODEV;
472
473 if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
474 *interrupt_mask |= AR5K_INT_RX;
475
476 if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
477 | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
478 *interrupt_mask |= AR5K_INT_TX;
479
480 if (ah->ah_version != AR5K_AR5210) {
481 /*HIU = Host Interface Unit (PCI etc)*/
482 if (unlikely(data & (AR5K_ISR_HIUERR)))
483 *interrupt_mask |= AR5K_INT_FATAL;
484
485 /*Beacon Not Ready*/
486 if (unlikely(data & (AR5K_ISR_BNR)))
487 *interrupt_mask |= AR5K_INT_BNR;
488 }
489
490 /*
491 * XXX: BMISS interrupts may occur after association.
492 * I found this on 5210 code but it needs testing. If this is
493 * true we should disable them before assoc and re-enable them
494 * after a successfull assoc + some jiffies.
495 */
496#if 0
497 interrupt_mask &= ~AR5K_INT_BMISS;
498#endif
499
500 /*
501 * In case we didn't handle anything,
502 * print the register value.
503 */
504 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
505 ATH5K_PRINTF("0x%08x\n", data);
506
507 return 0;
508}
509
510/**
511 * ath5k_hw_set_imr - Set interrupt mask
512 *
513 * @ah: The &struct ath5k_hw
514 * @new_mask: The new interrupt mask to be set
515 *
516 * Set the interrupt mask in hw to save interrupts. We do that by mapping
517 * ath5k_int bits to hw-specific bits to remove abstraction and writing
518 * Interrupt Mask Register.
519 */
520enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
521{
522 enum ath5k_int old_mask, int_mask;
523
524 /*
525 * Disable card interrupts to prevent any race conditions
526 * (they will be re-enabled afterwards).
527 */
528 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
529 ath5k_hw_reg_read(ah, AR5K_IER);
530
531 old_mask = ah->ah_imr;
532
533 /*
534 * Add additional, chipset-dependent interrupt mask flags
535 * and write them to the IMR (interrupt mask register).
536 */
537 int_mask = new_mask & AR5K_INT_COMMON;
538
539 if (new_mask & AR5K_INT_RX)
540 int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
541 AR5K_IMR_RXDESC;
542
543 if (new_mask & AR5K_INT_TX)
544 int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
545 AR5K_IMR_TXURN;
546
547 if (ah->ah_version != AR5K_AR5210) {
548 if (new_mask & AR5K_INT_FATAL) {
549 int_mask |= AR5K_IMR_HIUERR;
550 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
551 AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
552 }
553 }
554
555 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
556
557 /* Store new interrupt mask */
558 ah->ah_imr = new_mask;
559
560 /* ..re-enable interrupts */
561 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
562 ath5k_hw_reg_read(ah, AR5K_IER);
563
564 return old_mask;
565}
566
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
new file mode 100644
index 000000000000..a883839b6a9f
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -0,0 +1,466 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* EEPROM access functions and helpers *
21\*************************************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Read from eeprom
30 */
31static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
32{
33 u32 status, timeout;
34
35 ATH5K_TRACE(ah->ah_sc);
36 /*
37 * Initialize EEPROM access
38 */
39 if (ah->ah_version == AR5K_AR5210) {
40 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
41 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
42 } else {
43 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
44 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
45 AR5K_EEPROM_CMD_READ);
46 }
47
48 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
49 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
50 if (status & AR5K_EEPROM_STAT_RDDONE) {
51 if (status & AR5K_EEPROM_STAT_RDERR)
52 return -EIO;
53 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
54 0xffff);
55 return 0;
56 }
57 udelay(15);
58 }
59
60 return -ETIMEDOUT;
61}
62
63/*
64 * Translate binary channel representation in EEPROM to frequency
65 */
66static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin,
67 unsigned int mode)
68{
69 u16 val;
70
71 if (bin == AR5K_EEPROM_CHANNEL_DIS)
72 return bin;
73
74 if (mode == AR5K_EEPROM_MODE_11A) {
75 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
76 val = (5 * bin) + 4800;
77 else
78 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
79 (bin * 10) + 5100;
80 } else {
81 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
82 val = bin + 2300;
83 else
84 val = bin + 2400;
85 }
86
87 return val;
88}
89
90/*
91 * Read antenna infos from eeprom
92 */
93static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
94 unsigned int mode)
95{
96 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
97 u32 o = *offset;
98 u16 val;
99 int ret, i = 0;
100
101 AR5K_EEPROM_READ(o++, val);
102 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
103 ee->ee_ant_tx_rx[mode] = (val >> 2) & 0x3f;
104 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
105
106 AR5K_EEPROM_READ(o++, val);
107 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
108 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
109 ee->ee_ant_control[mode][i++] = val & 0x3f;
110
111 AR5K_EEPROM_READ(o++, val);
112 ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
113 ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
114 ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
115
116 AR5K_EEPROM_READ(o++, val);
117 ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
118 ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
119 ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
120 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
121
122 AR5K_EEPROM_READ(o++, val);
123 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
124 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
125 ee->ee_ant_control[mode][i++] = val & 0x3f;
126
127 /* Get antenna modes */
128 ah->ah_antenna[mode][0] =
129 (ee->ee_ant_control[mode][0] << 4) | 0x1;
130 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
131 ee->ee_ant_control[mode][1] |
132 (ee->ee_ant_control[mode][2] << 6) |
133 (ee->ee_ant_control[mode][3] << 12) |
134 (ee->ee_ant_control[mode][4] << 18) |
135 (ee->ee_ant_control[mode][5] << 24);
136 ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
137 ee->ee_ant_control[mode][6] |
138 (ee->ee_ant_control[mode][7] << 6) |
139 (ee->ee_ant_control[mode][8] << 12) |
140 (ee->ee_ant_control[mode][9] << 18) |
141 (ee->ee_ant_control[mode][10] << 24);
142
143 /* return new offset */
144 *offset = o;
145
146 return 0;
147}
148
149/*
150 * Read supported modes from eeprom
151 */
152static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
153 unsigned int mode)
154{
155 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
156 u32 o = *offset;
157 u16 val;
158 int ret;
159
160 AR5K_EEPROM_READ(o++, val);
161 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
162 ee->ee_thr_62[mode] = val & 0xff;
163
164 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
165 ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
166
167 AR5K_EEPROM_READ(o++, val);
168 ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
169 ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
170
171 AR5K_EEPROM_READ(o++, val);
172 ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
173
174 if ((val & 0xff) & 0x80)
175 ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
176 else
177 ee->ee_noise_floor_thr[mode] = val & 0xff;
178
179 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
180 ee->ee_noise_floor_thr[mode] =
181 mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
182
183 AR5K_EEPROM_READ(o++, val);
184 ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
185 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
186 ee->ee_xpd[mode] = val & 0x1;
187
188 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
189 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
190
191 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
192 AR5K_EEPROM_READ(o++, val);
193 ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
194
195 if (mode == AR5K_EEPROM_MODE_11A)
196 ee->ee_xr_power[mode] = val & 0x3f;
197 else {
198 ee->ee_ob[mode][0] = val & 0x7;
199 ee->ee_db[mode][0] = (val >> 3) & 0x7;
200 }
201 }
202
203 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
204 ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
205 ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
206 } else {
207 ee->ee_i_gain[mode] = (val >> 13) & 0x7;
208
209 AR5K_EEPROM_READ(o++, val);
210 ee->ee_i_gain[mode] |= (val << 3) & 0x38;
211
212 if (mode == AR5K_EEPROM_MODE_11G)
213 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
214 }
215
216 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
217 mode == AR5K_EEPROM_MODE_11A) {
218 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
219 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
220 }
221
222 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
223 mode == AR5K_EEPROM_MODE_11G)
224 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
225
226 /* return new offset */
227 *offset = o;
228
229 return 0;
230}
231
232/*
233 * Initialize eeprom & capabilities structs
234 */
235int ath5k_eeprom_init(struct ath5k_hw *ah)
236{
237 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
238 unsigned int mode, i;
239 int ret;
240 u32 offset;
241 u16 val;
242
243 /* Initial TX thermal adjustment values */
244 ee->ee_tx_clip = 4;
245 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
246 ee->ee_gain_select = 1;
247
248 /*
249 * Read values from EEPROM and store them in the capability structure
250 */
251 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
252 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
253 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
254 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
255 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
256
257 /* Return if we have an old EEPROM */
258 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
259 return 0;
260
261#ifdef notyet
262 /*
263 * Validate the checksum of the EEPROM date. There are some
264 * devices with invalid EEPROMs.
265 */
266 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
267 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
268 cksum ^= val;
269 }
270 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
271 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
272 return -EIO;
273 }
274#endif
275
276 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
277 ee_ant_gain);
278
279 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
280 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
281 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
282 }
283
284 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
285 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
286 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
287 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
288
289 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
290 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
291 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
292 }
293
294 /*
295 * Get conformance test limit values
296 */
297 offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
298 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
299
300 for (i = 0; i < ee->ee_ctls; i++) {
301 AR5K_EEPROM_READ(offset++, val);
302 ee->ee_ctl[i] = (val >> 8) & 0xff;
303 ee->ee_ctl[i + 1] = val & 0xff;
304 }
305
306 /*
307 * Get values for 802.11a (5GHz)
308 */
309 mode = AR5K_EEPROM_MODE_11A;
310
311 ee->ee_turbo_max_power[mode] =
312 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
313
314 offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
315
316 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
317 if (ret)
318 return ret;
319
320 AR5K_EEPROM_READ(offset++, val);
321 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
322 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
323 ee->ee_db[mode][3] = (val >> 2) & 0x7;
324 ee->ee_ob[mode][2] = (val << 1) & 0x7;
325
326 AR5K_EEPROM_READ(offset++, val);
327 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
328 ee->ee_db[mode][2] = (val >> 12) & 0x7;
329 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
330 ee->ee_db[mode][1] = (val >> 6) & 0x7;
331 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
332 ee->ee_db[mode][0] = val & 0x7;
333
334 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
335 if (ret)
336 return ret;
337
338 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
339 AR5K_EEPROM_READ(offset++, val);
340 ee->ee_margin_tx_rx[mode] = val & 0x3f;
341 }
342
343 /*
344 * Get values for 802.11b (2.4GHz)
345 */
346 mode = AR5K_EEPROM_MODE_11B;
347 offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
348
349 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
350 if (ret)
351 return ret;
352
353 AR5K_EEPROM_READ(offset++, val);
354 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
355 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
356 ee->ee_db[mode][1] = val & 0x7;
357
358 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
359 if (ret)
360 return ret;
361
362 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
363 AR5K_EEPROM_READ(offset++, val);
364 ee->ee_cal_pier[mode][0] =
365 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
366 ee->ee_cal_pier[mode][1] =
367 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
368
369 AR5K_EEPROM_READ(offset++, val);
370 ee->ee_cal_pier[mode][2] =
371 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
372 }
373
374 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
375 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
376
377 /*
378 * Get values for 802.11g (2.4GHz)
379 */
380 mode = AR5K_EEPROM_MODE_11G;
381 offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
382
383 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
384 if (ret)
385 return ret;
386
387 AR5K_EEPROM_READ(offset++, val);
388 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
389 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
390 ee->ee_db[mode][1] = val & 0x7;
391
392 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
393 if (ret)
394 return ret;
395
396 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
397 AR5K_EEPROM_READ(offset++, val);
398 ee->ee_cal_pier[mode][0] =
399 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
400 ee->ee_cal_pier[mode][1] =
401 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
402
403 AR5K_EEPROM_READ(offset++, val);
404 ee->ee_turbo_max_power[mode] = val & 0x7f;
405 ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
406
407 AR5K_EEPROM_READ(offset++, val);
408 ee->ee_cal_pier[mode][2] =
409 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
410
411 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
412 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
413
414 AR5K_EEPROM_READ(offset++, val);
415 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
416 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
417
418 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
419 AR5K_EEPROM_READ(offset++, val);
420 ee->ee_cck_ofdm_gain_delta = val & 0xff;
421 }
422 }
423
424 /*
425 * Read 5GHz EEPROM channels
426 */
427
428 return 0;
429}
430
431/*
432 * Read the MAC address from eeprom
433 */
434int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
435{
436 u8 mac_d[ETH_ALEN];
437 u32 total, offset;
438 u16 data;
439 int octet, ret;
440
441 memset(mac, 0, ETH_ALEN);
442 memset(mac_d, 0, ETH_ALEN);
443
444 ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
445 if (ret)
446 return ret;
447
448 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
449 ret = ath5k_hw_eeprom_read(ah, offset, &data);
450 if (ret)
451 return ret;
452
453 total += data;
454 mac_d[octet + 1] = data & 0xff;
455 mac_d[octet] = data >> 8;
456 octet += 2;
457 }
458
459 memcpy(mac, mac_d, ETH_ALEN);
460
461 if (!total || total == 3 * 0xffff)
462 return -EINVAL;
463
464 return 0;
465}
466
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
new file mode 100644
index 000000000000..a468ecfbb18a
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*
20 * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
21 */
22#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
23#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
24#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
27
28#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
29#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
30#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
31#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
32#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
33#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
34#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
35#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
36#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
37#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
38#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
39#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
40#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
41#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
42#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
43#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
44#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
45#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
46#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
47#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
48#define AR5K_EEPROM_INFO_CKSUM 0xffff
49#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
50
51#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
52#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
53#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
54#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
55#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
56#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
57#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
58#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
59#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
60#define AR5K_EEPROM_VERSION_4_3 0x4003
61#define AR5K_EEPROM_VERSION_4_4 0x4004
62#define AR5K_EEPROM_VERSION_4_5 0x4005
63#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
64#define AR5K_EEPROM_VERSION_4_7 0x4007
65
66#define AR5K_EEPROM_MODE_11A 0
67#define AR5K_EEPROM_MODE_11B 1
68#define AR5K_EEPROM_MODE_11G 2
69
70#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
71#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
72#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
73#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
74#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
75#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
76#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
77#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz (?) */
78#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
79
80#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
81#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
82#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
83#define AR5K_EEPROM_RFKILL_POLARITY_S 1
84
85/* Newer EEPROMs are using a different offset */
86#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
87 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
88
89#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
90#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((int8_t)(((_v) >> 8) & 0xff))
91#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((int8_t)((_v) & 0xff))
92
93/* calibration settings */
94#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
95#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
96#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
97#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
98
99/* [3.1 - 3.3] */
100#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
101#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
102
103/* Misc values available since EEPROM 4.0 */
104#define AR5K_EEPROM_MISC0 0x00c4
105#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
106#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
107#define AR5K_EEPROM_MISC1 0x00c5
108#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
109#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
110
111
112/* Some EEPROM defines */
113#define AR5K_EEPROM_EEP_SCALE 100
114#define AR5K_EEPROM_EEP_DELTA 10
115#define AR5K_EEPROM_N_MODES 3
116#define AR5K_EEPROM_N_5GHZ_CHAN 10
117#define AR5K_EEPROM_N_2GHZ_CHAN 3
118#define AR5K_EEPROM_MAX_CHAN 10
119#define AR5K_EEPROM_N_PCDAC 11
120#define AR5K_EEPROM_N_TEST_FREQ 8
121#define AR5K_EEPROM_N_EDGES 8
122#define AR5K_EEPROM_N_INTERCEPTS 11
123#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
124#define AR5K_EEPROM_PCDAC_M 0x3f
125#define AR5K_EEPROM_PCDAC_START 1
126#define AR5K_EEPROM_PCDAC_STOP 63
127#define AR5K_EEPROM_PCDAC_STEP 1
128#define AR5K_EEPROM_NON_EDGE_M 0x40
129#define AR5K_EEPROM_CHANNEL_POWER 8
130#define AR5K_EEPROM_N_OBDB 4
131#define AR5K_EEPROM_OBDB_DIS 0xffff
132#define AR5K_EEPROM_CHANNEL_DIS 0xff
133#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
134#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
135#define AR5K_EEPROM_MAX_CTLS 32
136#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
137#define AR5K_EEPROM_N_XPD0_POINTS 4
138#define AR5K_EEPROM_N_XPD3_POINTS 3
139#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
140#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
141#define AR5K_EEPROM_POWER_M 0x3f
142#define AR5K_EEPROM_POWER_MIN 0
143#define AR5K_EEPROM_POWER_MAX 3150
144#define AR5K_EEPROM_POWER_STEP 50
145#define AR5K_EEPROM_POWER_TABLE_SIZE 64
146#define AR5K_EEPROM_N_POWER_LOC_11B 4
147#define AR5K_EEPROM_N_POWER_LOC_11G 6
148#define AR5K_EEPROM_I_GAIN 10
149#define AR5K_EEPROM_CCK_OFDM_DELTA 15
150#define AR5K_EEPROM_N_IQ_CAL 2
151
152#define AR5K_EEPROM_READ(_o, _v) do { \
153 ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \
154 if (ret) \
155 return ret; \
156} while (0)
157
158#define AR5K_EEPROM_READ_HDR(_o, _v) \
159 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
160
161/* Struct to hold EEPROM calibration data */
162struct ath5k_eeprom_info {
163 u16 ee_magic;
164 u16 ee_protect;
165 u16 ee_regdomain;
166 u16 ee_version;
167 u16 ee_header;
168 u16 ee_ant_gain;
169 u16 ee_misc0;
170 u16 ee_misc1;
171 u16 ee_cck_ofdm_gain_delta;
172 u16 ee_cck_ofdm_power_delta;
173 u16 ee_scaled_cck_delta;
174
175 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
176 u16 ee_tx_clip;
177 u16 ee_pwd_84;
178 u16 ee_pwd_90;
179 u16 ee_gain_select;
180
181 /* RF Calibration settings (reset, rfregs) */
182 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
183 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
184 u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
185 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
186 u16 ee_xr_power[AR5K_EEPROM_N_MODES];
187 u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
188 u16 ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
189 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
190 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
191 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
192 u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
193 u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
194 u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
195 u16 ee_thr_62[AR5K_EEPROM_N_MODES];
196 u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
197 u16 ee_xpd[AR5K_EEPROM_N_MODES];
198 u16 ee_x_gain[AR5K_EEPROM_N_MODES];
199 u16 ee_i_gain[AR5K_EEPROM_N_MODES];
200 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
201
202 /* Unused */
203 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
204 u16 ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
205 u16 ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
206
207 /* Conformance test limits (Unused) */
208 u16 ee_ctls;
209 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
210
211 /* Noise Floor Calibration settings */
212 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
213 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
214 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
215};
diff --git a/drivers/net/wireless/ath5k/gpio.c b/drivers/net/wireless/ath5k/gpio.c
new file mode 100644
index 000000000000..b77205adc180
--- /dev/null
+++ b/drivers/net/wireless/ath5k/gpio.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/****************\
20 GPIO Functions
21\****************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Set led state
30 */
31void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
32{
33 u32 led;
34 /*5210 has different led mode handling*/
35 u32 led_5210;
36
37 ATH5K_TRACE(ah->ah_sc);
38
39 /*Reset led status*/
40 if (ah->ah_version != AR5K_AR5210)
41 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
42 AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
43 else
44 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
45
46 /*
47 * Some blinking values, define at your wish
48 */
49 switch (state) {
50 case AR5K_LED_SCAN:
51 case AR5K_LED_AUTH:
52 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
53 led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
54 break;
55
56 case AR5K_LED_INIT:
57 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
58 led_5210 = AR5K_PCICFG_LED_PEND;
59 break;
60
61 case AR5K_LED_ASSOC:
62 case AR5K_LED_RUN:
63 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
64 led_5210 = AR5K_PCICFG_LED_ASSOC;
65 break;
66
67 default:
68 led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
69 led_5210 = AR5K_PCICFG_LED_PEND;
70 break;
71 }
72
73 /*Write new status to the register*/
74 if (ah->ah_version != AR5K_AR5210)
75 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
76 else
77 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
78}
79
80/*
81 * Set GPIO inputs
82 */
83int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
84{
85 ATH5K_TRACE(ah->ah_sc);
86 if (gpio > AR5K_NUM_GPIO)
87 return -EINVAL;
88
89 ath5k_hw_reg_write(ah,
90 (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
91 | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
92
93 return 0;
94}
95
96/*
97 * Set GPIO outputs
98 */
99int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
100{
101 ATH5K_TRACE(ah->ah_sc);
102 if (gpio > AR5K_NUM_GPIO)
103 return -EINVAL;
104
105 ath5k_hw_reg_write(ah,
106 (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
107 | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
108
109 return 0;
110}
111
112/*
113 * Get GPIO state
114 */
115u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
116{
117 ATH5K_TRACE(ah->ah_sc);
118 if (gpio > AR5K_NUM_GPIO)
119 return 0xffffffff;
120
121 /* GPIO input magic */
122 return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
123 0x1;
124}
125
126/*
127 * Set GPIO state
128 */
129int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
130{
131 u32 data;
132 ATH5K_TRACE(ah->ah_sc);
133
134 if (gpio > AR5K_NUM_GPIO)
135 return -EINVAL;
136
137 /* GPIO output magic */
138 data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
139
140 data &= ~(1 << gpio);
141 data |= (val & 1) << gpio;
142
143 ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
144
145 return 0;
146}
147
148/*
149 * Initialize the GPIO interrupt (RFKill switch)
150 */
151void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
152 u32 interrupt_level)
153{
154 u32 data;
155
156 ATH5K_TRACE(ah->ah_sc);
157 if (gpio > AR5K_NUM_GPIO)
158 return;
159
160 /*
161 * Set the GPIO interrupt
162 */
163 data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
164 ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
165 AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
166 (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
167
168 ath5k_hw_reg_write(ah, interrupt_level ? data :
169 (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
170
171 ah->ah_imr |= AR5K_IMR_GPIO;
172
173 /* Enable GPIO interrupts */
174 AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
175}
176
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
deleted file mode 100644
index ad1a5b422c8c..000000000000
--- a/drivers/net/wireless/ath5k/hw.c
+++ /dev/null
@@ -1,4529 +0,0 @@
1/*
2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * Copyright (c) 2007 Pavel Roskin <proski@gnu.org>
7 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22
23/*
24 * HW related functions for Atheros Wireless LAN devices.
25 */
26
27#include <linux/pci.h>
28#include <linux/delay.h>
29
30#include "reg.h"
31#include "base.h"
32#include "debug.h"
33
34/* Rate tables */
35static const struct ath5k_rate_table ath5k_rt_11a = AR5K_RATES_11A;
36static const struct ath5k_rate_table ath5k_rt_11b = AR5K_RATES_11B;
37static const struct ath5k_rate_table ath5k_rt_11g = AR5K_RATES_11G;
38static const struct ath5k_rate_table ath5k_rt_turbo = AR5K_RATES_TURBO;
39static const struct ath5k_rate_table ath5k_rt_xr = AR5K_RATES_XR;
40
41/* Prototypes */
42static int ath5k_hw_nic_reset(struct ath5k_hw *, u32);
43static int ath5k_hw_nic_wakeup(struct ath5k_hw *, int, bool);
44static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
47 unsigned int, unsigned int);
48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
50 unsigned int);
51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
52 struct ath5k_tx_status *);
53static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
54 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
55 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
56 unsigned int, unsigned int);
57static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
58 struct ath5k_tx_status *);
59static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *, struct ath5k_desc *,
60 struct ath5k_rx_status *);
61static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *, struct ath5k_desc *,
62 struct ath5k_rx_status *);
63static int ath5k_hw_get_capabilities(struct ath5k_hw *);
64
65static int ath5k_eeprom_init(struct ath5k_hw *);
66static int ath5k_eeprom_read_mac(struct ath5k_hw *, u8 *);
67
68static int ath5k_hw_enable_pspoll(struct ath5k_hw *, u8 *, u16);
69static int ath5k_hw_disable_pspoll(struct ath5k_hw *);
70
71/*
72 * Enable to overwrite the country code (use "00" for debug)
73 */
74#if 0
75#define COUNTRYCODE "00"
76#endif
77
78/*******************\
79 General Functions
80\*******************/
81
82/*
83 * Functions used internaly
84 */
85
86static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
87{
88 return turbo ? (usec * 80) : (usec * 40);
89}
90
91static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
92{
93 return turbo ? (clock / 80) : (clock / 40);
94}
95
96/*
97 * Check if a register write has been completed
98 */
99int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
100 bool is_set)
101{
102 int i;
103 u32 data;
104
105 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
106 data = ath5k_hw_reg_read(ah, reg);
107 if (is_set && (data & flag))
108 break;
109 else if ((data & flag) == val)
110 break;
111 udelay(15);
112 }
113
114 return (i <= 0) ? -EAGAIN : 0;
115}
116
117
118/***************************************\
119 Attach/Detach Functions
120\***************************************/
121
122/*
123 * Power On Self Test helper function
124 */
125static int ath5k_hw_post(struct ath5k_hw *ah)
126{
127
128 int i, c;
129 u16 cur_reg;
130 u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
131 u32 var_pattern;
132 u32 static_pattern[4] = {
133 0x55555555, 0xaaaaaaaa,
134 0x66666666, 0x99999999
135 };
136 u32 init_val;
137 u32 cur_val;
138
139 for (c = 0; c < 2; c++) {
140
141 cur_reg = regs[c];
142
143 /* Save previous value */
144 init_val = ath5k_hw_reg_read(ah, cur_reg);
145
146 for (i = 0; i < 256; i++) {
147 var_pattern = i << 16 | i;
148 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
149 cur_val = ath5k_hw_reg_read(ah, cur_reg);
150
151 if (cur_val != var_pattern) {
152 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
153 return -EAGAIN;
154 }
155
156 /* Found on ndiswrapper dumps */
157 var_pattern = 0x0039080f;
158 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
159 }
160
161 for (i = 0; i < 4; i++) {
162 var_pattern = static_pattern[i];
163 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
164 cur_val = ath5k_hw_reg_read(ah, cur_reg);
165
166 if (cur_val != var_pattern) {
167 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
168 return -EAGAIN;
169 }
170
171 /* Found on ndiswrapper dumps */
172 var_pattern = 0x003b080f;
173 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
174 }
175
176 /* Restore previous value */
177 ath5k_hw_reg_write(ah, init_val, cur_reg);
178
179 }
180
181 return 0;
182
183}
184
185/*
186 * Check if the device is supported and initialize the needed structs
187 */
188struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
189{
190 struct ath5k_hw *ah;
191 struct pci_dev *pdev = sc->pdev;
192 u8 mac[ETH_ALEN];
193 int ret;
194 u32 srev;
195
196 /*If we passed the test malloc a ath5k_hw struct*/
197 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
198 if (ah == NULL) {
199 ret = -ENOMEM;
200 ATH5K_ERR(sc, "out of memory\n");
201 goto err;
202 }
203
204 ah->ah_sc = sc;
205 ah->ah_iobase = sc->iobase;
206
207 /*
208 * HW information
209 */
210
211 ah->ah_op_mode = IEEE80211_IF_TYPE_STA;
212 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
213 ah->ah_turbo = false;
214 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
215 ah->ah_imr = 0;
216 ah->ah_atim_window = 0;
217 ah->ah_aifs = AR5K_TUNE_AIFS;
218 ah->ah_cw_min = AR5K_TUNE_CWMIN;
219 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
220 ah->ah_software_retry = false;
221 ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
222
223 /*
224 * Set the mac revision based on the pci id
225 */
226 ah->ah_version = mac_version;
227
228 /*Fill the ath5k_hw struct with the needed functions*/
229 if (ah->ah_version == AR5K_AR5212)
230 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
231 else if (ah->ah_version == AR5K_AR5211)
232 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
233
234 if (ah->ah_version == AR5K_AR5212) {
235 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
236 ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
237 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
238 } else {
239 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
240 ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
241 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
242 }
243
244 if (ah->ah_version == AR5K_AR5212)
245 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
246 else if (ah->ah_version <= AR5K_AR5211)
247 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
248
249 /* Bring device out of sleep and reset it's units */
250 ret = ath5k_hw_nic_wakeup(ah, AR5K_INIT_MODE, true);
251 if (ret)
252 goto err_free;
253
254 /* Get MAC, PHY and RADIO revisions */
255 srev = ath5k_hw_reg_read(ah, AR5K_SREV);
256 ah->ah_mac_srev = srev;
257 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
258 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
259 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
260 0xffffffff;
261 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
262 CHANNEL_5GHZ);
263
264 if (ah->ah_version == AR5K_AR5210)
265 ah->ah_radio_2ghz_revision = 0;
266 else
267 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
268 CHANNEL_2GHZ);
269
270 /* Return on unsuported chips (unsupported eeprom etc) */
271 if ((srev >= AR5K_SREV_VER_AR5416) &&
272 (srev < AR5K_SREV_VER_AR2425)) {
273 ATH5K_ERR(sc, "Device not yet supported.\n");
274 ret = -ENODEV;
275 goto err_free;
276 } else if (srev == AR5K_SREV_VER_AR2425) {
277 ATH5K_WARN(sc, "Support for RF2425 is under development.\n");
278 }
279
280 /* Identify single chip solutions */
281 if (((srev <= AR5K_SREV_VER_AR5414) &&
282 (srev >= AR5K_SREV_VER_AR2413)) ||
283 (srev == AR5K_SREV_VER_AR2425)) {
284 ah->ah_single_chip = true;
285 } else {
286 ah->ah_single_chip = false;
287 }
288
289 /* Single chip radio */
290 if (ah->ah_radio_2ghz_revision == ah->ah_radio_5ghz_revision)
291 ah->ah_radio_2ghz_revision = 0;
292
293 /* Identify the radio chip*/
294 if (ah->ah_version == AR5K_AR5210) {
295 ah->ah_radio = AR5K_RF5110;
296 /*
297 * Register returns 0x0/0x04 for radio revision
298 * so ath5k_hw_radio_revision doesn't parse the value
299 * correctly. For now we are based on mac's srev to
300 * identify RF2425 radio.
301 */
302 } else if (srev == AR5K_SREV_VER_AR2425) {
303 ah->ah_radio = AR5K_RF2425;
304 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
305 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
306 ah->ah_radio = AR5K_RF5111;
307 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
308 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
309 ah->ah_radio = AR5K_RF5112;
310 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
311 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
312 ah->ah_radio = AR5K_RF2413;
313 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
314 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
315 ah->ah_radio = AR5K_RF5413;
316 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
317 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
318 /* AR5424 */
319 if (srev >= AR5K_SREV_VER_AR5424) {
320 ah->ah_radio = AR5K_RF5413;
321 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
322 /* AR2424 */
323 } else {
324 ah->ah_radio = AR5K_RF2413; /* For testing */
325 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
326 }
327 }
328 ah->ah_phy = AR5K_PHY(0);
329
330 /*
331 * Write PCI-E power save settings
332 */
333 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
334 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
335 ath5k_hw_reg_write(ah, 0x24924924, 0x4080);
336 ath5k_hw_reg_write(ah, 0x28000039, 0x4080);
337 ath5k_hw_reg_write(ah, 0x53160824, 0x4080);
338 ath5k_hw_reg_write(ah, 0xe5980579, 0x4080);
339 ath5k_hw_reg_write(ah, 0x001defff, 0x4080);
340 ath5k_hw_reg_write(ah, 0x1aaabe40, 0x4080);
341 ath5k_hw_reg_write(ah, 0xbe105554, 0x4080);
342 ath5k_hw_reg_write(ah, 0x000e3007, 0x4080);
343 ath5k_hw_reg_write(ah, 0x00000000, 0x4084);
344 }
345
346 /*
347 * POST
348 */
349 ret = ath5k_hw_post(ah);
350 if (ret)
351 goto err_free;
352
353 /* Write AR5K_PCICFG_UNK on 2112B and later chips */
354 if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
355 srev > AR5K_SREV_VER_AR2413) {
356 ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
357 }
358
359 /*
360 * Get card capabilities, values, ...
361 */
362 ret = ath5k_eeprom_init(ah);
363 if (ret) {
364 ATH5K_ERR(sc, "unable to init EEPROM\n");
365 goto err_free;
366 }
367
368 /* Get misc capabilities */
369 ret = ath5k_hw_get_capabilities(ah);
370 if (ret) {
371 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
372 sc->pdev->device);
373 goto err_free;
374 }
375
376 /* Get MAC address */
377 ret = ath5k_eeprom_read_mac(ah, mac);
378 if (ret) {
379 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
380 sc->pdev->device);
381 goto err_free;
382 }
383
384 ath5k_hw_set_lladdr(ah, mac);
385 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
386 memset(ah->ah_bssid, 0xff, ETH_ALEN);
387 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
388 ath5k_hw_set_opmode(ah);
389
390 ath5k_hw_set_rfgain_opt(ah);
391
392 return ah;
393err_free:
394 kfree(ah);
395err:
396 return ERR_PTR(ret);
397}
398
399/*
400 * Bring up MAC + PHY Chips
401 */
402static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
403{
404 struct pci_dev *pdev = ah->ah_sc->pdev;
405 u32 turbo, mode, clock, bus_flags;
406 int ret;
407
408 turbo = 0;
409 mode = 0;
410 clock = 0;
411
412 ATH5K_TRACE(ah->ah_sc);
413
414 /* Wakeup the device */
415 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
416 if (ret) {
417 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
418 return ret;
419 }
420
421 if (ah->ah_version != AR5K_AR5210) {
422 /*
423 * Get channel mode flags
424 */
425
426 if (ah->ah_radio >= AR5K_RF5112) {
427 mode = AR5K_PHY_MODE_RAD_RF5112;
428 clock = AR5K_PHY_PLL_RF5112;
429 } else {
430 mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
431 clock = AR5K_PHY_PLL_RF5111; /*Zero*/
432 }
433
434 if (flags & CHANNEL_2GHZ) {
435 mode |= AR5K_PHY_MODE_FREQ_2GHZ;
436 clock |= AR5K_PHY_PLL_44MHZ;
437
438 if (flags & CHANNEL_CCK) {
439 mode |= AR5K_PHY_MODE_MOD_CCK;
440 } else if (flags & CHANNEL_OFDM) {
441 /* XXX Dynamic OFDM/CCK is not supported by the
442 * AR5211 so we set MOD_OFDM for plain g (no
443 * CCK headers) operation. We need to test
444 * this, 5211 might support ofdm-only g after
445 * all, there are also initial register values
446 * in the code for g mode (see initvals.c). */
447 if (ah->ah_version == AR5K_AR5211)
448 mode |= AR5K_PHY_MODE_MOD_OFDM;
449 else
450 mode |= AR5K_PHY_MODE_MOD_DYN;
451 } else {
452 ATH5K_ERR(ah->ah_sc,
453 "invalid radio modulation mode\n");
454 return -EINVAL;
455 }
456 } else if (flags & CHANNEL_5GHZ) {
457 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
458 clock |= AR5K_PHY_PLL_40MHZ;
459
460 if (flags & CHANNEL_OFDM)
461 mode |= AR5K_PHY_MODE_MOD_OFDM;
462 else {
463 ATH5K_ERR(ah->ah_sc,
464 "invalid radio modulation mode\n");
465 return -EINVAL;
466 }
467 } else {
468 ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
469 return -EINVAL;
470 }
471
472 if (flags & CHANNEL_TURBO)
473 turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
474 } else { /* Reset the device */
475
476 /* ...enable Atheros turbo mode if requested */
477 if (flags & CHANNEL_TURBO)
478 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
479 AR5K_PHY_TURBO);
480 }
481
482 /* reseting PCI on PCI-E cards results card to hang
483 * and always return 0xffff... so we ingore that flag
484 * for PCI-E cards */
485 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
486
487 /* Reset chipset */
488 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
489 AR5K_RESET_CTL_BASEBAND | bus_flags);
490 if (ret) {
491 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
492 return -EIO;
493 }
494
495 if (ah->ah_version == AR5K_AR5210)
496 udelay(2300);
497
498 /* ...wakeup again!*/
499 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
500 if (ret) {
501 ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
502 return ret;
503 }
504
505 /* ...final warm reset */
506 if (ath5k_hw_nic_reset(ah, 0)) {
507 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
508 return -EIO;
509 }
510
511 if (ah->ah_version != AR5K_AR5210) {
512 /* ...set the PHY operating mode */
513 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
514 udelay(300);
515
516 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
517 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
518 }
519
520 return 0;
521}
522
523/*
524 * Get the rate table for a specific operation mode
525 */
526const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah,
527 unsigned int mode)
528{
529 ATH5K_TRACE(ah->ah_sc);
530
531 if (!test_bit(mode, ah->ah_capabilities.cap_mode))
532 return NULL;
533
534 /* Get rate tables */
535 switch (mode) {
536 case AR5K_MODE_11A:
537 return &ath5k_rt_11a;
538 case AR5K_MODE_11A_TURBO:
539 return &ath5k_rt_turbo;
540 case AR5K_MODE_11B:
541 return &ath5k_rt_11b;
542 case AR5K_MODE_11G:
543 return &ath5k_rt_11g;
544 case AR5K_MODE_11G_TURBO:
545 return &ath5k_rt_xr;
546 }
547
548 return NULL;
549}
550
551/*
552 * Free the ath5k_hw struct
553 */
554void ath5k_hw_detach(struct ath5k_hw *ah)
555{
556 ATH5K_TRACE(ah->ah_sc);
557
558 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
559
560 if (ah->ah_rf_banks != NULL)
561 kfree(ah->ah_rf_banks);
562
563 /* assume interrupts are down */
564 kfree(ah);
565}
566
567/****************************\
568 Reset function and helpers
569\****************************/
570
571/**
572 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
573 *
574 * @ah: the &struct ath5k_hw
575 * @channel: the currently set channel upon reset
576 *
577 * Write the OFDM timings for the AR5212 upon reset. This is a helper for
578 * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
579 * depending on the bandwidth of the channel.
580 *
581 */
582static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
583 struct ieee80211_channel *channel)
584{
585 /* Get exponent and mantissa and set it */
586 u32 coef_scaled, coef_exp, coef_man,
587 ds_coef_exp, ds_coef_man, clock;
588
589 if (!(ah->ah_version == AR5K_AR5212) ||
590 !(channel->hw_value & CHANNEL_OFDM))
591 BUG();
592
593 /* Seems there are two PLLs, one for baseband sampling and one
594 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
595 * turbo. */
596 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
597 coef_scaled = ((5 * (clock << 24)) / 2) /
598 channel->center_freq;
599
600 for (coef_exp = 31; coef_exp > 0; coef_exp--)
601 if ((coef_scaled >> coef_exp) & 0x1)
602 break;
603
604 if (!coef_exp)
605 return -EINVAL;
606
607 coef_exp = 14 - (coef_exp - 24);
608 coef_man = coef_scaled +
609 (1 << (24 - coef_exp - 1));
610 ds_coef_man = coef_man >> (24 - coef_exp);
611 ds_coef_exp = coef_exp - 16;
612
613 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
614 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
615 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
616 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
617
618 return 0;
619}
620
621/**
622 * ath5k_hw_write_rate_duration - set rate duration during hw resets
623 *
624 * @ah: the &struct ath5k_hw
625 * @mode: one of enum ath5k_driver_mode
626 *
627 * Write the rate duration table for the current mode upon hw reset. This
628 * is a helper for ath5k_hw_reset(). It seems all this is doing is setting
629 * an ACK timeout for the hardware for the current mode for each rate. The
630 * rates which are capable of short preamble (802.11b rates 2Mbps, 5.5Mbps,
631 * and 11Mbps) have another register for the short preamble ACK timeout
632 * calculation.
633 *
634 */
635static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
636 unsigned int mode)
637{
638 struct ath5k_softc *sc = ah->ah_sc;
639 const struct ath5k_rate_table *rt;
640 struct ieee80211_rate srate = {};
641 unsigned int i;
642
643 /* Get rate table for the current operating mode */
644 rt = ath5k_hw_get_rate_table(ah, mode);
645
646 /* Write rate duration table */
647 for (i = 0; i < rt->rate_count; i++) {
648 const struct ath5k_rate *rate, *control_rate;
649
650 u32 reg;
651 u16 tx_time;
652
653 rate = &rt->rates[i];
654 control_rate = &rt->rates[rate->control_rate];
655
656 /* Set ACK timeout */
657 reg = AR5K_RATE_DUR(rate->rate_code);
658
659 srate.bitrate = control_rate->rate_kbps/100;
660
661 /* An ACK frame consists of 10 bytes. If you add the FCS,
662 * which ieee80211_generic_frame_duration() adds,
663 * its 14 bytes. Note we use the control rate and not the
664 * actual rate for this rate. See mac80211 tx.c
665 * ieee80211_duration() for a brief description of
666 * what rate we should choose to TX ACKs. */
667 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
668 sc->vif, 10, &srate));
669
670 ath5k_hw_reg_write(ah, tx_time, reg);
671
672 if (!HAS_SHPREAMBLE(i))
673 continue;
674
675 /*
676 * We're not distinguishing short preamble here,
677 * This is true, all we'll get is a longer value here
678 * which is not necessarilly bad. We could use
679 * export ieee80211_frame_duration() but that needs to be
680 * fixed first to be properly used by mac802111 drivers:
681 *
682 * - remove erp stuff and let the routine figure ofdm
683 * erp rates
684 * - remove passing argument ieee80211_local as
685 * drivers don't have access to it
686 * - move drivers using ieee80211_generic_frame_duration()
687 * to this
688 */
689 ath5k_hw_reg_write(ah, tx_time,
690 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
691 }
692}
693
694/*
695 * Main reset function
696 */
697int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
698 struct ieee80211_channel *channel, bool change_channel)
699{
700 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
701 struct pci_dev *pdev = ah->ah_sc->pdev;
702 u32 data, s_seq, s_ant, s_led[3], dma_size;
703 unsigned int i, mode, freq, ee_mode, ant[2];
704 int ret;
705
706 ATH5K_TRACE(ah->ah_sc);
707
708 s_seq = 0;
709 s_ant = 0;
710 ee_mode = 0;
711 freq = 0;
712 mode = 0;
713
714 /*
715 * Save some registers before a reset
716 */
717 /*DCU/Antenna selection not available on 5210*/
718 if (ah->ah_version != AR5K_AR5210) {
719 if (change_channel) {
720 /* Seq number for queue 0 -do this for all queues ? */
721 s_seq = ath5k_hw_reg_read(ah,
722 AR5K_QUEUE_DFS_SEQNUM(0));
723 /*Default antenna*/
724 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
725 }
726 }
727
728 /*GPIOs*/
729 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
730 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
731 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
732
733 if (change_channel && ah->ah_rf_banks != NULL)
734 ath5k_hw_get_rf_gain(ah);
735
736
737 /*Wakeup the device*/
738 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
739 if (ret)
740 return ret;
741
742 /*
743 * Initialize operating mode
744 */
745 ah->ah_op_mode = op_mode;
746
747 /*
748 * 5111/5112 Settings
749 * 5210 only comes with RF5110
750 */
751 if (ah->ah_version != AR5K_AR5210) {
752 if (ah->ah_radio != AR5K_RF5111 &&
753 ah->ah_radio != AR5K_RF5112 &&
754 ah->ah_radio != AR5K_RF5413 &&
755 ah->ah_radio != AR5K_RF2413 &&
756 ah->ah_radio != AR5K_RF2425) {
757 ATH5K_ERR(ah->ah_sc,
758 "invalid phy radio: %u\n", ah->ah_radio);
759 return -EINVAL;
760 }
761
762 switch (channel->hw_value & CHANNEL_MODES) {
763 case CHANNEL_A:
764 mode = AR5K_MODE_11A;
765 freq = AR5K_INI_RFGAIN_5GHZ;
766 ee_mode = AR5K_EEPROM_MODE_11A;
767 break;
768 case CHANNEL_G:
769 mode = AR5K_MODE_11G;
770 freq = AR5K_INI_RFGAIN_2GHZ;
771 ee_mode = AR5K_EEPROM_MODE_11G;
772 break;
773 case CHANNEL_B:
774 mode = AR5K_MODE_11B;
775 freq = AR5K_INI_RFGAIN_2GHZ;
776 ee_mode = AR5K_EEPROM_MODE_11B;
777 break;
778 case CHANNEL_T:
779 mode = AR5K_MODE_11A_TURBO;
780 freq = AR5K_INI_RFGAIN_5GHZ;
781 ee_mode = AR5K_EEPROM_MODE_11A;
782 break;
783 /*Is this ok on 5211 too ?*/
784 case CHANNEL_TG:
785 mode = AR5K_MODE_11G_TURBO;
786 freq = AR5K_INI_RFGAIN_2GHZ;
787 ee_mode = AR5K_EEPROM_MODE_11G;
788 break;
789 case CHANNEL_XR:
790 if (ah->ah_version == AR5K_AR5211) {
791 ATH5K_ERR(ah->ah_sc,
792 "XR mode not available on 5211");
793 return -EINVAL;
794 }
795 mode = AR5K_MODE_XR;
796 freq = AR5K_INI_RFGAIN_5GHZ;
797 ee_mode = AR5K_EEPROM_MODE_11A;
798 break;
799 default:
800 ATH5K_ERR(ah->ah_sc,
801 "invalid channel: %d\n", channel->center_freq);
802 return -EINVAL;
803 }
804
805 /* PHY access enable */
806 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
807
808 }
809
810 ret = ath5k_hw_write_initvals(ah, mode, change_channel);
811 if (ret)
812 return ret;
813
814 /*
815 * 5211/5212 Specific
816 */
817 if (ah->ah_version != AR5K_AR5210) {
818 /*
819 * Write initial RF gain settings
820 * This should work for both 5111/5112
821 */
822 ret = ath5k_hw_rfgain(ah, freq);
823 if (ret)
824 return ret;
825
826 mdelay(1);
827
828 /*
829 * Write some more initial register settings
830 */
831 if (ah->ah_version == AR5K_AR5212) {
832 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
833
834 if (channel->hw_value == CHANNEL_G)
835 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
836 ath5k_hw_reg_write(ah, 0x00f80d80,
837 0x994c);
838 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
839 ath5k_hw_reg_write(ah, 0x00380140,
840 0x994c);
841 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
842 ath5k_hw_reg_write(ah, 0x00fc0ec0,
843 0x994c);
844 else /* 2425 */
845 ath5k_hw_reg_write(ah, 0x00fc0fc0,
846 0x994c);
847 else
848 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
849
850 /* Some bits are disabled here, we know nothing about
851 * register 0xa228 yet, most of the times this ends up
852 * with a value 0x9b5 -haven't seen any dump with
853 * a different value- */
854 /* Got this from decompiling binary HAL */
855 data = ath5k_hw_reg_read(ah, 0xa228);
856 data &= 0xfffffdff;
857 ath5k_hw_reg_write(ah, data, 0xa228);
858
859 data = ath5k_hw_reg_read(ah, 0xa228);
860 data &= 0xfffe03ff;
861 ath5k_hw_reg_write(ah, data, 0xa228);
862 data = 0;
863
864 /* Just write 0x9b5 ? */
865 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
866 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
869 }
870
871 /* Fix for first revision of the RF5112 RF chipset */
872 if (ah->ah_radio >= AR5K_RF5112 &&
873 ah->ah_radio_5ghz_revision <
874 AR5K_SREV_RAD_5112A) {
875 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
876 AR5K_PHY_CCKTXCTL);
877 if (channel->hw_value & CHANNEL_5GHZ)
878 data = 0xffb81020;
879 else
880 data = 0xffb80d20;
881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
882 data = 0;
883 }
884
885 /*
886 * Set TX power (FIXME)
887 */
888 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
889 if (ret)
890 return ret;
891
892 /* Write rate duration table only on AR5212 and if
893 * virtual interface has already been brought up
894 * XXX: rethink this after new mode changes to
895 * mac80211 are integrated */
896 if (ah->ah_version == AR5K_AR5212 &&
897 ah->ah_sc->vif != NULL)
898 ath5k_hw_write_rate_duration(ah, mode);
899
900 /*
901 * Write RF registers
902 */
903 ret = ath5k_hw_rfregs(ah, channel, mode);
904 if (ret)
905 return ret;
906
907 /*
908 * Configure additional registers
909 */
910
911 /* Write OFDM timings on 5212*/
912 if (ah->ah_version == AR5K_AR5212 &&
913 channel->hw_value & CHANNEL_OFDM) {
914 ret = ath5k_hw_write_ofdm_timings(ah, channel);
915 if (ret)
916 return ret;
917 }
918
919 /*Enable/disable 802.11b mode on 5111
920 (enable 2111 frequency converter + CCK)*/
921 if (ah->ah_radio == AR5K_RF5111) {
922 if (mode == AR5K_MODE_11B)
923 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
924 AR5K_TXCFG_B_MODE);
925 else
926 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
927 AR5K_TXCFG_B_MODE);
928 }
929
930 /*
931 * Set channel and calibrate the PHY
932 */
933 ret = ath5k_hw_channel(ah, channel);
934 if (ret)
935 return ret;
936
937 /* Set antenna mode */
938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
939 ah->ah_antenna[ee_mode][0], 0xfffffc06);
940
941 /*
942 * In case a fixed antenna was set as default
943 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
944 * registers.
945 */
946 if (s_ant != 0){
947 if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
948 ant[0] = ant[1] = AR5K_ANT_FIXED_A;
949 else /* 2 - Aux */
950 ant[0] = ant[1] = AR5K_ANT_FIXED_B;
951 } else {
952 ant[0] = AR5K_ANT_FIXED_A;
953 ant[1] = AR5K_ANT_FIXED_B;
954 }
955
956 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
957 AR5K_PHY_ANT_SWITCH_TABLE_0);
958 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
959 AR5K_PHY_ANT_SWITCH_TABLE_1);
960
961 /* Commit values from EEPROM */
962 if (ah->ah_radio == AR5K_RF5111)
963 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
964 AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
965
966 ath5k_hw_reg_write(ah,
967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
968 AR5K_PHY_NFTHRES);
969
970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
972 0xffffc07f);
973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
975 0xfffc0fff);
976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
979 0xffff0000);
980
981 ath5k_hw_reg_write(ah,
982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
986
987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
992
993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
994 AR5K_PHY_IQ_CORR_ENABLE |
995 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
996 ee->ee_q_cal[ee_mode]);
997
998 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
999 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
1000 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
1001 ee->ee_margin_tx_rx[ee_mode]);
1002
1003 } else {
1004 mdelay(1);
1005 /* Disable phy and wait */
1006 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
1007 mdelay(1);
1008 }
1009
1010 /*
1011 * Restore saved values
1012 */
1013 /*DCU/Antenna selection not available on 5210*/
1014 if (ah->ah_version != AR5K_AR5210) {
1015 ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
1016 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
1017 }
1018 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
1019 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
1020 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
1021
1022 /*
1023 * Misc
1024 */
1025 /* XXX: add ah->aid once mac80211 gives this to us */
1026 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1027
1028 ath5k_hw_set_opmode(ah);
1029 /*PISR/SISR Not available on 5210*/
1030 if (ah->ah_version != AR5K_AR5210) {
1031 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
1032 /* If we later allow tuning for this, store into sc structure */
1033 data = AR5K_TUNE_RSSI_THRES |
1034 AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
1035 ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
1036 }
1037
1038 /*
1039 * Set Rx/Tx DMA Configuration
1040 *
1041 * Set maximum DMA size (512) except for PCI-E cards since
1042 * it causes rx overruns and tx errors (tested on 5424 but since
1043 * rx overruns also occur on 5416/5418 with madwifi we set 128
1044 * for all PCI-E cards to be safe).
1045 *
1046 * In dumps this is 128 for allchips.
1047 *
1048 * XXX: need to check 5210 for this
1049 * TODO: Check out tx triger level, it's always 64 on dumps but I
1050 * guess we can tweak it and see how it goes ;-)
1051 */
1052 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
1053 if (ah->ah_version != AR5K_AR5210) {
1054 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1055 AR5K_TXCFG_SDMAMR, dma_size);
1056 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
1057 AR5K_RXCFG_SDMAMW, dma_size);
1058 }
1059
1060 /*
1061 * Enable the PHY and wait until completion
1062 */
1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1064
1065 /*
1066 * On 5211+ read activation -> rx delay
1067 * and use it.
1068 */
1069 if (ah->ah_version != AR5K_AR5210) {
1070 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
1071 AR5K_PHY_RX_DELAY_M;
1072 data = (channel->hw_value & CHANNEL_CCK) ?
1073 ((data << 2) / 22) : (data / 10);
1074
1075 udelay(100 + (2 * data));
1076 data = 0;
1077 } else {
1078 mdelay(1);
1079 }
1080
1081 /*
1082 * Perform ADC test (?)
1083 */
1084 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1085 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
1086 for (i = 0; i <= 20; i++) {
1087 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
1088 break;
1089 udelay(200);
1090 }
1091 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
1092 data = 0;
1093
1094 /*
1095 * Start automatic gain calibration
1096 *
1097 * During AGC calibration RX path is re-routed to
1098 * a signal detector so we don't receive anything.
1099 *
1100 * This method is used to calibrate some static offsets
1101 * used together with on-the fly I/Q calibration (the
1102 * one performed via ath5k_hw_phy_calibrate), that doesn't
1103 * interrupt rx path.
1104 *
1105 * If we are in a noisy environment AGC calibration may time
1106 * out.
1107 */
1108 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1109 AR5K_PHY_AGCCTL_CAL);
1110
1111 /* At the same time start I/Q calibration for QAM constellation
1112 * -no need for CCK- */
1113 ah->ah_calibration = false;
1114 if (!(mode == AR5K_MODE_11B)) {
1115 ah->ah_calibration = true;
1116 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1117 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1119 AR5K_PHY_IQ_RUN);
1120 }
1121
1122 /* Wait for gain calibration to finish (we check for I/Q calibration
1123 * during ath5k_phy_calibrate) */
1124 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1125 AR5K_PHY_AGCCTL_CAL, 0, false)) {
1126 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
1127 channel->center_freq);
1128 return -EAGAIN;
1129 }
1130
1131 /*
1132 * Start noise floor calibration
1133 *
1134 * If we run NF calibration before AGC, it always times out.
1135 * Binary HAL starts NF and AGC calibration at the same time
1136 * and only waits for AGC to finish. I believe that's wrong because
1137 * during NF calibration, rx path is also routed to a detector, so if
1138 * it doesn't finish we won't have RX.
1139 *
1140 * XXX: Find an interval that's OK for all cards...
1141 */
1142 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1143 if (ret)
1144 return ret;
1145
1146 /*
1147 * Reset queues and start beacon timers at the end of the reset routine
1148 */
1149 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
1150 /*No QCU on 5210*/
1151 if (ah->ah_version != AR5K_AR5210)
1152 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
1153
1154 ret = ath5k_hw_reset_tx_queue(ah, i);
1155 if (ret) {
1156 ATH5K_ERR(ah->ah_sc,
1157 "failed to reset TX queue #%d\n", i);
1158 return ret;
1159 }
1160 }
1161
1162 /* Pre-enable interrupts on 5211/5212*/
1163 if (ah->ah_version != AR5K_AR5210)
1164 ath5k_hw_set_intr(ah, AR5K_INT_RX | AR5K_INT_TX |
1165 AR5K_INT_FATAL);
1166
1167 /*
1168 * Set RF kill flags if supported by the device (read from the EEPROM)
1169 * Disable gpio_intr for now since it results system hang.
1170 * TODO: Handle this in ath5k_intr
1171 */
1172#if 0
1173 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
1174 ath5k_hw_set_gpio_input(ah, 0);
1175 ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
1176 if (ah->ah_gpio[0] == 0)
1177 ath5k_hw_set_gpio_intr(ah, 0, 1);
1178 else
1179 ath5k_hw_set_gpio_intr(ah, 0, 0);
1180 }
1181#endif
1182
1183 /*
1184 * Set the 32MHz reference clock on 5212 phy clock sleep register
1185 *
1186 * TODO: Find out how to switch to external 32Khz clock to save power
1187 */
1188 if (ah->ah_version == AR5K_AR5212) {
1189 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
1190 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
1191 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
1192 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
1193 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
1194 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
1195
1196 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
1197 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
1198 0x00000f80 : 0x00001380 ;
1199 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
1200 data = 0;
1201 }
1202
1203 if (ah->ah_version == AR5K_AR5212) {
1204 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
1205 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
1206 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
1207 if (ah->ah_mac_srev >= AR5K_SREV_VER_AR2413)
1208 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
1209 }
1210
1211 /*
1212 * Disable beacons and reset the register
1213 */
1214 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
1215 AR5K_BEACON_RESET_TSF);
1216
1217 return 0;
1218}
1219
1220/*
1221 * Reset chipset
1222 */
1223static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
1224{
1225 int ret;
1226 u32 mask = val ? val : ~0U;
1227
1228 ATH5K_TRACE(ah->ah_sc);
1229
1230 /* Read-and-clear RX Descriptor Pointer*/
1231 ath5k_hw_reg_read(ah, AR5K_RXDP);
1232
1233 /*
1234 * Reset the device and wait until success
1235 */
1236 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
1237
1238 /* Wait at least 128 PCI clocks */
1239 udelay(15);
1240
1241 if (ah->ah_version == AR5K_AR5210) {
1242 val &= AR5K_RESET_CTL_CHIP;
1243 mask &= AR5K_RESET_CTL_CHIP;
1244 } else {
1245 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
1246 mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
1247 }
1248
1249 ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
1250
1251 /*
1252 * Reset configuration register (for hw byte-swap). Note that this
1253 * is only set for big endian. We do the necessary magic in
1254 * AR5K_INIT_CFG.
1255 */
1256 if ((val & AR5K_RESET_CTL_PCU) == 0)
1257 ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
1258
1259 return ret;
1260}
1261
1262/*
1263 * Power management functions
1264 */
1265
1266/*
1267 * Sleep control
1268 */
1269int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1270 bool set_chip, u16 sleep_duration)
1271{
1272 unsigned int i;
1273 u32 staid, data;
1274
1275 ATH5K_TRACE(ah->ah_sc);
1276 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
1277
1278 switch (mode) {
1279 case AR5K_PM_AUTO:
1280 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
1281 /* fallthrough */
1282 case AR5K_PM_NETWORK_SLEEP:
1283 if (set_chip)
1284 ath5k_hw_reg_write(ah,
1285 AR5K_SLEEP_CTL_SLE_ALLOW |
1286 sleep_duration,
1287 AR5K_SLEEP_CTL);
1288
1289 staid |= AR5K_STA_ID1_PWR_SV;
1290 break;
1291
1292 case AR5K_PM_FULL_SLEEP:
1293 if (set_chip)
1294 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
1295 AR5K_SLEEP_CTL);
1296
1297 staid |= AR5K_STA_ID1_PWR_SV;
1298 break;
1299
1300 case AR5K_PM_AWAKE:
1301
1302 staid &= ~AR5K_STA_ID1_PWR_SV;
1303
1304 if (!set_chip)
1305 goto commit;
1306
1307 /* Preserve sleep duration */
1308 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
1309 if( data & 0xffc00000 ){
1310 data = 0;
1311 } else {
1312 data = data & 0xfffcffff;
1313 }
1314
1315 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1316 udelay(15);
1317
1318 for (i = 50; i > 0; i--) {
1319 /* Check if the chip did wake up */
1320 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
1321 AR5K_PCICFG_SPWR_DN) == 0)
1322 break;
1323
1324 /* Wait a bit and retry */
1325 udelay(200);
1326 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1327 }
1328
1329 /* Fail if the chip didn't wake up */
1330 if (i <= 0)
1331 return -EIO;
1332
1333 break;
1334
1335 default:
1336 return -EINVAL;
1337 }
1338
1339commit:
1340 ah->ah_power_mode = mode;
1341 ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
1342
1343 return 0;
1344}
1345
1346/***********************\
1347 DMA Related Functions
1348\***********************/
1349
1350/*
1351 * Receive functions
1352 */
1353
1354/*
1355 * Start DMA receive
1356 */
1357void ath5k_hw_start_rx(struct ath5k_hw *ah)
1358{
1359 ATH5K_TRACE(ah->ah_sc);
1360 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
1361 ath5k_hw_reg_read(ah, AR5K_CR);
1362}
1363
1364/*
1365 * Stop DMA receive
1366 */
1367int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
1368{
1369 unsigned int i;
1370
1371 ATH5K_TRACE(ah->ah_sc);
1372 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
1373
1374 /*
1375 * It may take some time to disable the DMA receive unit
1376 */
1377 for (i = 2000; i > 0 &&
1378 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
1379 i--)
1380 udelay(10);
1381
1382 return i ? 0 : -EBUSY;
1383}
1384
1385/*
1386 * Get the address of the RX Descriptor
1387 */
1388u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah)
1389{
1390 return ath5k_hw_reg_read(ah, AR5K_RXDP);
1391}
1392
1393/*
1394 * Set the address of the RX Descriptor
1395 */
1396void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr)
1397{
1398 ATH5K_TRACE(ah->ah_sc);
1399
1400 /*TODO:Shouldn't we check if RX is enabled first ?*/
1401 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
1402}
1403
1404/*
1405 * Transmit functions
1406 */
1407
1408/*
1409 * Start DMA transmit for a specific queue
1410 * (see also QCU/DCU functions)
1411 */
1412int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
1413{
1414 u32 tx_queue;
1415
1416 ATH5K_TRACE(ah->ah_sc);
1417 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1418
1419 /* Return if queue is declared inactive */
1420 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
1421 return -EIO;
1422
1423 if (ah->ah_version == AR5K_AR5210) {
1424 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
1425
1426 /*
1427 * Set the queue by type on 5210
1428 */
1429 switch (ah->ah_txq[queue].tqi_type) {
1430 case AR5K_TX_QUEUE_DATA:
1431 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
1432 break;
1433 case AR5K_TX_QUEUE_BEACON:
1434 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
1435 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
1436 AR5K_BSR);
1437 break;
1438 case AR5K_TX_QUEUE_CAB:
1439 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
1440 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
1441 AR5K_BCR_BDMAE, AR5K_BSR);
1442 break;
1443 default:
1444 return -EINVAL;
1445 }
1446 /* Start queue */
1447 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1448 ath5k_hw_reg_read(ah, AR5K_CR);
1449 } else {
1450 /* Return if queue is disabled */
1451 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
1452 return -EIO;
1453
1454 /* Start queue */
1455 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
1456 }
1457
1458 return 0;
1459}
1460
1461/*
1462 * Stop DMA transmit for a specific queue
1463 * (see also QCU/DCU functions)
1464 */
1465int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1466{
1467 unsigned int i = 100;
1468 u32 tx_queue, pending;
1469
1470 ATH5K_TRACE(ah->ah_sc);
1471 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1472
1473 /* Return if queue is declared inactive */
1474 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
1475 return -EIO;
1476
1477 if (ah->ah_version == AR5K_AR5210) {
1478 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
1479
1480 /*
1481 * Set by queue type
1482 */
1483 switch (ah->ah_txq[queue].tqi_type) {
1484 case AR5K_TX_QUEUE_DATA:
1485 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
1486 break;
1487 case AR5K_TX_QUEUE_BEACON:
1488 case AR5K_TX_QUEUE_CAB:
1489 /* XXX Fix me... */
1490 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
1491 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
1492 break;
1493 default:
1494 return -EINVAL;
1495 }
1496
1497 /* Stop queue */
1498 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1499 ath5k_hw_reg_read(ah, AR5K_CR);
1500 } else {
1501 /*
1502 * Schedule TX disable and wait until queue is empty
1503 */
1504 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
1505
1506 /*Check for pending frames*/
1507 do {
1508 pending = ath5k_hw_reg_read(ah,
1509 AR5K_QUEUE_STATUS(queue)) &
1510 AR5K_QCU_STS_FRMPENDCNT;
1511 udelay(100);
1512 } while (--i && pending);
1513
1514 /* Clear register */
1515 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
1516 if (pending)
1517 return -EBUSY;
1518 }
1519
1520 /* TODO: Check for success else return error */
1521 return 0;
1522}
1523
1524/*
1525 * Get the address of the TX Descriptor for a specific queue
1526 * (see also QCU/DCU functions)
1527 */
1528u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue)
1529{
1530 u16 tx_reg;
1531
1532 ATH5K_TRACE(ah->ah_sc);
1533 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1534
1535 /*
1536 * Get the transmit queue descriptor pointer from the selected queue
1537 */
1538 /*5210 doesn't have QCU*/
1539 if (ah->ah_version == AR5K_AR5210) {
1540 switch (ah->ah_txq[queue].tqi_type) {
1541 case AR5K_TX_QUEUE_DATA:
1542 tx_reg = AR5K_NOQCU_TXDP0;
1543 break;
1544 case AR5K_TX_QUEUE_BEACON:
1545 case AR5K_TX_QUEUE_CAB:
1546 tx_reg = AR5K_NOQCU_TXDP1;
1547 break;
1548 default:
1549 return 0xffffffff;
1550 }
1551 } else {
1552 tx_reg = AR5K_QUEUE_TXDP(queue);
1553 }
1554
1555 return ath5k_hw_reg_read(ah, tx_reg);
1556}
1557
1558/*
1559 * Set the address of the TX Descriptor for a specific queue
1560 * (see also QCU/DCU functions)
1561 */
1562int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
1563{
1564 u16 tx_reg;
1565
1566 ATH5K_TRACE(ah->ah_sc);
1567 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1568
1569 /*
1570 * Set the transmit queue descriptor pointer register by type
1571 * on 5210
1572 */
1573 if (ah->ah_version == AR5K_AR5210) {
1574 switch (ah->ah_txq[queue].tqi_type) {
1575 case AR5K_TX_QUEUE_DATA:
1576 tx_reg = AR5K_NOQCU_TXDP0;
1577 break;
1578 case AR5K_TX_QUEUE_BEACON:
1579 case AR5K_TX_QUEUE_CAB:
1580 tx_reg = AR5K_NOQCU_TXDP1;
1581 break;
1582 default:
1583 return -EINVAL;
1584 }
1585 } else {
1586 /*
1587 * Set the transmit queue descriptor pointer for
1588 * the selected queue on QCU for 5211+
1589 * (this won't work if the queue is still active)
1590 */
1591 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
1592 return -EIO;
1593
1594 tx_reg = AR5K_QUEUE_TXDP(queue);
1595 }
1596
1597 /* Set descriptor pointer */
1598 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
1599
1600 return 0;
1601}
1602
1603/*
1604 * Update tx trigger level
1605 */
1606int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
1607{
1608 u32 trigger_level, imr;
1609 int ret = -EIO;
1610
1611 ATH5K_TRACE(ah->ah_sc);
1612
1613 /*
1614 * Disable interrupts by setting the mask
1615 */
1616 imr = ath5k_hw_set_intr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
1617
1618 /*TODO: Boundary check on trigger_level*/
1619 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
1620 AR5K_TXCFG_TXFULL);
1621
1622 if (!increase) {
1623 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
1624 goto done;
1625 } else
1626 trigger_level +=
1627 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
1628
1629 /*
1630 * Update trigger level on success
1631 */
1632 if (ah->ah_version == AR5K_AR5210)
1633 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
1634 else
1635 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1636 AR5K_TXCFG_TXFULL, trigger_level);
1637
1638 ret = 0;
1639
1640done:
1641 /*
1642 * Restore interrupt mask
1643 */
1644 ath5k_hw_set_intr(ah, imr);
1645
1646 return ret;
1647}
1648
1649/*
1650 * Interrupt handling
1651 */
1652
1653/*
1654 * Check if we have pending interrupts
1655 */
1656bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
1657{
1658 ATH5K_TRACE(ah->ah_sc);
1659 return ath5k_hw_reg_read(ah, AR5K_INTPEND);
1660}
1661
1662/*
1663 * Get interrupt mask (ISR)
1664 */
1665int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
1666{
1667 u32 data;
1668
1669 ATH5K_TRACE(ah->ah_sc);
1670
1671 /*
1672 * Read interrupt status from the Interrupt Status register
1673 * on 5210
1674 */
1675 if (ah->ah_version == AR5K_AR5210) {
1676 data = ath5k_hw_reg_read(ah, AR5K_ISR);
1677 if (unlikely(data == AR5K_INT_NOCARD)) {
1678 *interrupt_mask = data;
1679 return -ENODEV;
1680 }
1681 } else {
1682 /*
1683 * Read interrupt status from the Read-And-Clear shadow register
1684 * Note: PISR/SISR Not available on 5210
1685 */
1686 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
1687 }
1688
1689 /*
1690 * Get abstract interrupt mask (driver-compatible)
1691 */
1692 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
1693
1694 if (unlikely(data == AR5K_INT_NOCARD))
1695 return -ENODEV;
1696
1697 if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
1698 *interrupt_mask |= AR5K_INT_RX;
1699
1700 if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
1701 | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
1702 *interrupt_mask |= AR5K_INT_TX;
1703
1704 if (ah->ah_version != AR5K_AR5210) {
1705 /*HIU = Host Interface Unit (PCI etc)*/
1706 if (unlikely(data & (AR5K_ISR_HIUERR)))
1707 *interrupt_mask |= AR5K_INT_FATAL;
1708
1709 /*Beacon Not Ready*/
1710 if (unlikely(data & (AR5K_ISR_BNR)))
1711 *interrupt_mask |= AR5K_INT_BNR;
1712 }
1713
1714 /*
1715 * XXX: BMISS interrupts may occur after association.
1716 * I found this on 5210 code but it needs testing. If this is
1717 * true we should disable them before assoc and re-enable them
1718 * after a successfull assoc + some jiffies.
1719 */
1720#if 0
1721 interrupt_mask &= ~AR5K_INT_BMISS;
1722#endif
1723
1724 /*
1725 * In case we didn't handle anything,
1726 * print the register value.
1727 */
1728 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
1729 ATH5K_PRINTF("0x%08x\n", data);
1730
1731 return 0;
1732}
1733
1734/*
1735 * Set interrupt mask
1736 */
1737enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1738{
1739 enum ath5k_int old_mask, int_mask;
1740
1741 /*
1742 * Disable card interrupts to prevent any race conditions
1743 * (they will be re-enabled afterwards).
1744 */
1745 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
1746 ath5k_hw_reg_read(ah, AR5K_IER);
1747
1748 old_mask = ah->ah_imr;
1749
1750 /*
1751 * Add additional, chipset-dependent interrupt mask flags
1752 * and write them to the IMR (interrupt mask register).
1753 */
1754 int_mask = new_mask & AR5K_INT_COMMON;
1755
1756 if (new_mask & AR5K_INT_RX)
1757 int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
1758 AR5K_IMR_RXDESC;
1759
1760 if (new_mask & AR5K_INT_TX)
1761 int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
1762 AR5K_IMR_TXURN;
1763
1764 if (ah->ah_version != AR5K_AR5210) {
1765 if (new_mask & AR5K_INT_FATAL) {
1766 int_mask |= AR5K_IMR_HIUERR;
1767 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
1768 AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
1769 }
1770 }
1771
1772 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
1773
1774 /* Store new interrupt mask */
1775 ah->ah_imr = new_mask;
1776
1777 /* ..re-enable interrupts */
1778 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
1779 ath5k_hw_reg_read(ah, AR5K_IER);
1780
1781 return old_mask;
1782}
1783
1784
1785/*************************\
1786 EEPROM access functions
1787\*************************/
1788
1789/*
1790 * Read from eeprom
1791 */
1792static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
1793{
1794 u32 status, timeout;
1795
1796 ATH5K_TRACE(ah->ah_sc);
1797 /*
1798 * Initialize EEPROM access
1799 */
1800 if (ah->ah_version == AR5K_AR5210) {
1801 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
1802 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
1803 } else {
1804 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
1805 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1806 AR5K_EEPROM_CMD_READ);
1807 }
1808
1809 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
1810 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
1811 if (status & AR5K_EEPROM_STAT_RDDONE) {
1812 if (status & AR5K_EEPROM_STAT_RDERR)
1813 return -EIO;
1814 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
1815 0xffff);
1816 return 0;
1817 }
1818 udelay(15);
1819 }
1820
1821 return -ETIMEDOUT;
1822}
1823
1824/*
1825 * Write to eeprom - currently disabled, use at your own risk
1826 */
1827#if 0
1828static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data)
1829{
1830
1831 u32 status, timeout;
1832
1833 ATH5K_TRACE(ah->ah_sc);
1834
1835 /*
1836 * Initialize eeprom access
1837 */
1838
1839 if (ah->ah_version == AR5K_AR5210) {
1840 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
1841 } else {
1842 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1843 AR5K_EEPROM_CMD_RESET);
1844 }
1845
1846 /*
1847 * Write data to data register
1848 */
1849
1850 if (ah->ah_version == AR5K_AR5210) {
1851 ath5k_hw_reg_write(ah, data, AR5K_EEPROM_BASE + (4 * offset));
1852 } else {
1853 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
1854 ath5k_hw_reg_write(ah, data, AR5K_EEPROM_DATA);
1855 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1856 AR5K_EEPROM_CMD_WRITE);
1857 }
1858
1859 /*
1860 * Check status
1861 */
1862
1863 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
1864 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
1865 if (status & AR5K_EEPROM_STAT_WRDONE) {
1866 if (status & AR5K_EEPROM_STAT_WRERR)
1867 return EIO;
1868 return 0;
1869 }
1870 udelay(15);
1871 }
1872
1873 ATH5K_ERR(ah->ah_sc, "EEPROM Write is disabled!");
1874 return -EIO;
1875}
1876#endif
1877
1878/*
1879 * Translate binary channel representation in EEPROM to frequency
1880 */
1881static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin, unsigned int mode)
1882{
1883 u16 val;
1884
1885 if (bin == AR5K_EEPROM_CHANNEL_DIS)
1886 return bin;
1887
1888 if (mode == AR5K_EEPROM_MODE_11A) {
1889 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
1890 val = (5 * bin) + 4800;
1891 else
1892 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
1893 (bin * 10) + 5100;
1894 } else {
1895 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
1896 val = bin + 2300;
1897 else
1898 val = bin + 2400;
1899 }
1900
1901 return val;
1902}
1903
1904/*
1905 * Read antenna infos from eeprom
1906 */
1907static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
1908 unsigned int mode)
1909{
1910 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1911 u32 o = *offset;
1912 u16 val;
1913 int ret, i = 0;
1914
1915 AR5K_EEPROM_READ(o++, val);
1916 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
1917 ee->ee_ant_tx_rx[mode] = (val >> 2) & 0x3f;
1918 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
1919
1920 AR5K_EEPROM_READ(o++, val);
1921 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
1922 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
1923 ee->ee_ant_control[mode][i++] = val & 0x3f;
1924
1925 AR5K_EEPROM_READ(o++, val);
1926 ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
1927 ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
1928 ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
1929
1930 AR5K_EEPROM_READ(o++, val);
1931 ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
1932 ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
1933 ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
1934 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
1935
1936 AR5K_EEPROM_READ(o++, val);
1937 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
1938 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
1939 ee->ee_ant_control[mode][i++] = val & 0x3f;
1940
1941 /* Get antenna modes */
1942 ah->ah_antenna[mode][0] =
1943 (ee->ee_ant_control[mode][0] << 4) | 0x1;
1944 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
1945 ee->ee_ant_control[mode][1] |
1946 (ee->ee_ant_control[mode][2] << 6) |
1947 (ee->ee_ant_control[mode][3] << 12) |
1948 (ee->ee_ant_control[mode][4] << 18) |
1949 (ee->ee_ant_control[mode][5] << 24);
1950 ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
1951 ee->ee_ant_control[mode][6] |
1952 (ee->ee_ant_control[mode][7] << 6) |
1953 (ee->ee_ant_control[mode][8] << 12) |
1954 (ee->ee_ant_control[mode][9] << 18) |
1955 (ee->ee_ant_control[mode][10] << 24);
1956
1957 /* return new offset */
1958 *offset = o;
1959
1960 return 0;
1961}
1962
1963/*
1964 * Read supported modes from eeprom
1965 */
1966static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
1967 unsigned int mode)
1968{
1969 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1970 u32 o = *offset;
1971 u16 val;
1972 int ret;
1973
1974 AR5K_EEPROM_READ(o++, val);
1975 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
1976 ee->ee_thr_62[mode] = val & 0xff;
1977
1978 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
1979 ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
1980
1981 AR5K_EEPROM_READ(o++, val);
1982 ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
1983 ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
1984
1985 AR5K_EEPROM_READ(o++, val);
1986 ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
1987
1988 if ((val & 0xff) & 0x80)
1989 ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
1990 else
1991 ee->ee_noise_floor_thr[mode] = val & 0xff;
1992
1993 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
1994 ee->ee_noise_floor_thr[mode] =
1995 mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
1996
1997 AR5K_EEPROM_READ(o++, val);
1998 ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
1999 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
2000 ee->ee_xpd[mode] = val & 0x1;
2001
2002 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
2003 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
2004
2005 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
2006 AR5K_EEPROM_READ(o++, val);
2007 ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
2008
2009 if (mode == AR5K_EEPROM_MODE_11A)
2010 ee->ee_xr_power[mode] = val & 0x3f;
2011 else {
2012 ee->ee_ob[mode][0] = val & 0x7;
2013 ee->ee_db[mode][0] = (val >> 3) & 0x7;
2014 }
2015 }
2016
2017 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
2018 ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
2019 ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
2020 } else {
2021 ee->ee_i_gain[mode] = (val >> 13) & 0x7;
2022
2023 AR5K_EEPROM_READ(o++, val);
2024 ee->ee_i_gain[mode] |= (val << 3) & 0x38;
2025
2026 if (mode == AR5K_EEPROM_MODE_11G)
2027 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
2028 }
2029
2030 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
2031 mode == AR5K_EEPROM_MODE_11A) {
2032 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
2033 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
2034 }
2035
2036 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
2037 mode == AR5K_EEPROM_MODE_11G)
2038 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
2039
2040 /* return new offset */
2041 *offset = o;
2042
2043 return 0;
2044}
2045
2046/*
2047 * Initialize eeprom & capabilities structs
2048 */
2049static int ath5k_eeprom_init(struct ath5k_hw *ah)
2050{
2051 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
2052 unsigned int mode, i;
2053 int ret;
2054 u32 offset;
2055 u16 val;
2056
2057 /* Initial TX thermal adjustment values */
2058 ee->ee_tx_clip = 4;
2059 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
2060 ee->ee_gain_select = 1;
2061
2062 /*
2063 * Read values from EEPROM and store them in the capability structure
2064 */
2065 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
2066 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
2067 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
2068 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
2069 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
2070
2071 /* Return if we have an old EEPROM */
2072 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
2073 return 0;
2074
2075#ifdef notyet
2076 /*
2077 * Validate the checksum of the EEPROM date. There are some
2078 * devices with invalid EEPROMs.
2079 */
2080 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
2081 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
2082 cksum ^= val;
2083 }
2084 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
2085 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
2086 return -EIO;
2087 }
2088#endif
2089
2090 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
2091 ee_ant_gain);
2092
2093 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2094 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
2095 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
2096 }
2097
2098 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
2099 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
2100 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
2101 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
2102
2103 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
2104 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
2105 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
2106 }
2107
2108 /*
2109 * Get conformance test limit values
2110 */
2111 offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
2112 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
2113
2114 for (i = 0; i < ee->ee_ctls; i++) {
2115 AR5K_EEPROM_READ(offset++, val);
2116 ee->ee_ctl[i] = (val >> 8) & 0xff;
2117 ee->ee_ctl[i + 1] = val & 0xff;
2118 }
2119
2120 /*
2121 * Get values for 802.11a (5GHz)
2122 */
2123 mode = AR5K_EEPROM_MODE_11A;
2124
2125 ee->ee_turbo_max_power[mode] =
2126 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
2127
2128 offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
2129
2130 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2131 if (ret)
2132 return ret;
2133
2134 AR5K_EEPROM_READ(offset++, val);
2135 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2136 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
2137 ee->ee_db[mode][3] = (val >> 2) & 0x7;
2138 ee->ee_ob[mode][2] = (val << 1) & 0x7;
2139
2140 AR5K_EEPROM_READ(offset++, val);
2141 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
2142 ee->ee_db[mode][2] = (val >> 12) & 0x7;
2143 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
2144 ee->ee_db[mode][1] = (val >> 6) & 0x7;
2145 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
2146 ee->ee_db[mode][0] = val & 0x7;
2147
2148 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2149 if (ret)
2150 return ret;
2151
2152 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
2153 AR5K_EEPROM_READ(offset++, val);
2154 ee->ee_margin_tx_rx[mode] = val & 0x3f;
2155 }
2156
2157 /*
2158 * Get values for 802.11b (2.4GHz)
2159 */
2160 mode = AR5K_EEPROM_MODE_11B;
2161 offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
2162
2163 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2164 if (ret)
2165 return ret;
2166
2167 AR5K_EEPROM_READ(offset++, val);
2168 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2169 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
2170 ee->ee_db[mode][1] = val & 0x7;
2171
2172 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2173 if (ret)
2174 return ret;
2175
2176 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2177 AR5K_EEPROM_READ(offset++, val);
2178 ee->ee_cal_pier[mode][0] =
2179 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2180 ee->ee_cal_pier[mode][1] =
2181 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
2182
2183 AR5K_EEPROM_READ(offset++, val);
2184 ee->ee_cal_pier[mode][2] =
2185 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2186 }
2187
2188 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
2189 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
2190
2191 /*
2192 * Get values for 802.11g (2.4GHz)
2193 */
2194 mode = AR5K_EEPROM_MODE_11G;
2195 offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
2196
2197 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2198 if (ret)
2199 return ret;
2200
2201 AR5K_EEPROM_READ(offset++, val);
2202 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2203 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
2204 ee->ee_db[mode][1] = val & 0x7;
2205
2206 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2207 if (ret)
2208 return ret;
2209
2210 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2211 AR5K_EEPROM_READ(offset++, val);
2212 ee->ee_cal_pier[mode][0] =
2213 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2214 ee->ee_cal_pier[mode][1] =
2215 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
2216
2217 AR5K_EEPROM_READ(offset++, val);
2218 ee->ee_turbo_max_power[mode] = val & 0x7f;
2219 ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
2220
2221 AR5K_EEPROM_READ(offset++, val);
2222 ee->ee_cal_pier[mode][2] =
2223 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2224
2225 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
2226 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
2227
2228 AR5K_EEPROM_READ(offset++, val);
2229 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
2230 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
2231
2232 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
2233 AR5K_EEPROM_READ(offset++, val);
2234 ee->ee_cck_ofdm_gain_delta = val & 0xff;
2235 }
2236 }
2237
2238 /*
2239 * Read 5GHz EEPROM channels
2240 */
2241
2242 return 0;
2243}
2244
2245/*
2246 * Read the MAC address from eeprom
2247 */
2248static int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
2249{
2250 u8 mac_d[ETH_ALEN];
2251 u32 total, offset;
2252 u16 data;
2253 int octet, ret;
2254
2255 memset(mac, 0, ETH_ALEN);
2256 memset(mac_d, 0, ETH_ALEN);
2257
2258 ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
2259 if (ret)
2260 return ret;
2261
2262 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
2263 ret = ath5k_hw_eeprom_read(ah, offset, &data);
2264 if (ret)
2265 return ret;
2266
2267 total += data;
2268 mac_d[octet + 1] = data & 0xff;
2269 mac_d[octet] = data >> 8;
2270 octet += 2;
2271 }
2272
2273 memcpy(mac, mac_d, ETH_ALEN);
2274
2275 if (!total || total == 3 * 0xffff)
2276 return -EINVAL;
2277
2278 return 0;
2279}
2280
2281/*
2282 * Fill the capabilities struct
2283 */
2284static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
2285{
2286 u16 ee_header;
2287
2288 ATH5K_TRACE(ah->ah_sc);
2289 /* Capabilities stored in the EEPROM */
2290 ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
2291
2292 if (ah->ah_version == AR5K_AR5210) {
2293 /*
2294 * Set radio capabilities
2295 * (The AR5110 only supports the middle 5GHz band)
2296 */
2297 ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
2298 ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
2299 ah->ah_capabilities.cap_range.range_2ghz_min = 0;
2300 ah->ah_capabilities.cap_range.range_2ghz_max = 0;
2301
2302 /* Set supported modes */
2303 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
2304 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
2305 } else {
2306 /*
2307 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
2308 * XXX and from 2312 to 2732GHz. There are problems with the
2309 * XXX current ieee80211 implementation because the IEEE
2310 * XXX channel mapping does not support negative channel
2311 * XXX numbers (2312MHz is channel -19). Of course, this
2312 * XXX doesn't matter because these channels are out of range
2313 * XXX but some regulation domains like MKK (Japan) will
2314 * XXX support frequencies somewhere around 4.8GHz.
2315 */
2316
2317 /*
2318 * Set radio capabilities
2319 */
2320
2321 if (AR5K_EEPROM_HDR_11A(ee_header)) {
2322 ah->ah_capabilities.cap_range.range_5ghz_min = 5005; /* 4920 */
2323 ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
2324
2325 /* Set supported modes */
2326 __set_bit(AR5K_MODE_11A,
2327 ah->ah_capabilities.cap_mode);
2328 __set_bit(AR5K_MODE_11A_TURBO,
2329 ah->ah_capabilities.cap_mode);
2330 if (ah->ah_version == AR5K_AR5212)
2331 __set_bit(AR5K_MODE_11G_TURBO,
2332 ah->ah_capabilities.cap_mode);
2333 }
2334
2335 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
2336 * connected */
2337 if (AR5K_EEPROM_HDR_11B(ee_header) ||
2338 AR5K_EEPROM_HDR_11G(ee_header)) {
2339 ah->ah_capabilities.cap_range.range_2ghz_min = 2412; /* 2312 */
2340 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
2341
2342 if (AR5K_EEPROM_HDR_11B(ee_header))
2343 __set_bit(AR5K_MODE_11B,
2344 ah->ah_capabilities.cap_mode);
2345
2346 if (AR5K_EEPROM_HDR_11G(ee_header))
2347 __set_bit(AR5K_MODE_11G,
2348 ah->ah_capabilities.cap_mode);
2349 }
2350 }
2351
2352 /* GPIO */
2353 ah->ah_gpio_npins = AR5K_NUM_GPIO;
2354
2355 /* Set number of supported TX queues */
2356 if (ah->ah_version == AR5K_AR5210)
2357 ah->ah_capabilities.cap_queues.q_tx_num =
2358 AR5K_NUM_TX_QUEUES_NOQCU;
2359 else
2360 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
2361
2362 return 0;
2363}
2364
2365/*********************************\
2366 Protocol Control Unit Functions
2367\*********************************/
2368
2369/*
2370 * Set Operation mode
2371 */
2372int ath5k_hw_set_opmode(struct ath5k_hw *ah)
2373{
2374 u32 pcu_reg, beacon_reg, low_id, high_id;
2375
2376 pcu_reg = 0;
2377 beacon_reg = 0;
2378
2379 ATH5K_TRACE(ah->ah_sc);
2380
2381 switch (ah->ah_op_mode) {
2382 case IEEE80211_IF_TYPE_IBSS:
2383 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
2384 (ah->ah_version == AR5K_AR5210 ?
2385 AR5K_STA_ID1_NO_PSPOLL : 0);
2386 beacon_reg |= AR5K_BCR_ADHOC;
2387 break;
2388
2389 case IEEE80211_IF_TYPE_AP:
2390 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
2391 (ah->ah_version == AR5K_AR5210 ?
2392 AR5K_STA_ID1_NO_PSPOLL : 0);
2393 beacon_reg |= AR5K_BCR_AP;
2394 break;
2395
2396 case IEEE80211_IF_TYPE_STA:
2397 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
2398 (ah->ah_version == AR5K_AR5210 ?
2399 AR5K_STA_ID1_PWR_SV : 0);
2400 case IEEE80211_IF_TYPE_MNTR:
2401 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
2402 (ah->ah_version == AR5K_AR5210 ?
2403 AR5K_STA_ID1_NO_PSPOLL : 0);
2404 break;
2405
2406 default:
2407 return -EINVAL;
2408 }
2409
2410 /*
2411 * Set PCU registers
2412 */
2413 low_id = AR5K_LOW_ID(ah->ah_sta_id);
2414 high_id = AR5K_HIGH_ID(ah->ah_sta_id);
2415 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
2416 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
2417
2418 /*
2419 * Set Beacon Control Register on 5210
2420 */
2421 if (ah->ah_version == AR5K_AR5210)
2422 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
2423
2424 return 0;
2425}
2426
2427/*
2428 * BSSID Functions
2429 */
2430
2431/*
2432 * Get station id
2433 */
2434void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
2435{
2436 ATH5K_TRACE(ah->ah_sc);
2437 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
2438}
2439
2440/*
2441 * Set station id
2442 */
2443int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
2444{
2445 u32 low_id, high_id;
2446
2447 ATH5K_TRACE(ah->ah_sc);
2448 /* Set new station ID */
2449 memcpy(ah->ah_sta_id, mac, ETH_ALEN);
2450
2451 low_id = AR5K_LOW_ID(mac);
2452 high_id = AR5K_HIGH_ID(mac);
2453
2454 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
2455 ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
2456
2457 return 0;
2458}
2459
2460/*
2461 * Set BSSID
2462 */
2463void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
2464{
2465 u32 low_id, high_id;
2466 u16 tim_offset = 0;
2467
2468 /*
2469 * Set simple BSSID mask on 5212
2470 */
2471 if (ah->ah_version == AR5K_AR5212) {
2472 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
2473 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
2474 }
2475
2476 /*
2477 * Set BSSID which triggers the "SME Join" operation
2478 */
2479 low_id = AR5K_LOW_ID(bssid);
2480 high_id = AR5K_HIGH_ID(bssid);
2481 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
2482 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
2483 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
2484
2485 if (assoc_id == 0) {
2486 ath5k_hw_disable_pspoll(ah);
2487 return;
2488 }
2489
2490 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
2491 tim_offset ? tim_offset + 4 : 0);
2492
2493 ath5k_hw_enable_pspoll(ah, NULL, 0);
2494}
2495/**
2496 * ath5k_hw_set_bssid_mask - set common bits we should listen to
2497 *
2498 * The bssid_mask is a utility used by AR5212 hardware to inform the hardware
2499 * which bits of the interface's MAC address should be looked at when trying
2500 * to decide which packets to ACK. In station mode every bit matters. In AP
2501 * mode with a single BSS every bit matters as well. In AP mode with
2502 * multiple BSSes not every bit matters.
2503 *
2504 * @ah: the &struct ath5k_hw
2505 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
2506 *
2507 * Note that this is a simple filter and *does* not filter out all
2508 * relevant frames. Some non-relevant frames will get through, probability
2509 * jocks are welcomed to compute.
2510 *
2511 * When handling multiple BSSes (or VAPs) you can get the BSSID mask by
2512 * computing the set of:
2513 *
2514 * ~ ( MAC XOR BSSID )
2515 *
2516 * When you do this you are essentially computing the common bits. Later it
2517 * is assumed the harware will "and" (&) the BSSID mask with the MAC address
2518 * to obtain the relevant bits which should match on the destination frame.
2519 *
2520 * Simple example: on your card you have have two BSSes you have created with
2521 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
2522 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
2523 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
2524 *
2525 * \
2526 * MAC: 0001 |
2527 * BSSID-01: 0100 | --> Belongs to us
2528 * BSSID-02: 1001 |
2529 * /
2530 * -------------------
2531 * BSSID-03: 0110 | --> External
2532 * -------------------
2533 *
2534 * Our bssid_mask would then be:
2535 *
2536 * On loop iteration for BSSID-01:
2537 * ~(0001 ^ 0100) -> ~(0101)
2538 * -> 1010
2539 * bssid_mask = 1010
2540 *
2541 * On loop iteration for BSSID-02:
2542 * bssid_mask &= ~(0001 ^ 1001)
2543 * bssid_mask = (1010) & ~(0001 ^ 1001)
2544 * bssid_mask = (1010) & ~(1001)
2545 * bssid_mask = (1010) & (0110)
2546 * bssid_mask = 0010
2547 *
2548 * A bssid_mask of 0010 means "only pay attention to the second least
2549 * significant bit". This is because its the only bit common
2550 * amongst the MAC and all BSSIDs we support. To findout what the real
2551 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
2552 * or our MAC address (we assume the hardware uses the MAC address).
2553 *
2554 * Now, suppose there's an incoming frame for BSSID-03:
2555 *
2556 * IFRAME-01: 0110
2557 *
2558 * An easy eye-inspeciton of this already should tell you that this frame
2559 * will not pass our check. This is beacuse the bssid_mask tells the
2560 * hardware to only look at the second least significant bit and the
2561 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
2562 * as 1, which does not match 0.
2563 *
2564 * So with IFRAME-01 we *assume* the hardware will do:
2565 *
2566 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
2567 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
2568 * --> allow = (0010) == 0000 ? 1 : 0;
2569 * --> allow = 0
2570 *
2571 * Lets now test a frame that should work:
2572 *
2573 * IFRAME-02: 0001 (we should allow)
2574 *
2575 * allow = (0001 & 1010) == 1010
2576 *
2577 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
2578 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
2579 * --> allow = (0010) == (0010)
2580 * --> allow = 1
2581 *
2582 * Other examples:
2583 *
2584 * IFRAME-03: 0100 --> allowed
2585 * IFRAME-04: 1001 --> allowed
2586 * IFRAME-05: 1101 --> allowed but its not for us!!!
2587 *
2588 */
2589int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
2590{
2591 u32 low_id, high_id;
2592 ATH5K_TRACE(ah->ah_sc);
2593
2594 if (ah->ah_version == AR5K_AR5212) {
2595 low_id = AR5K_LOW_ID(mask);
2596 high_id = AR5K_HIGH_ID(mask);
2597
2598 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
2599 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
2600
2601 return 0;
2602 }
2603
2604 return -EIO;
2605}
2606
2607/*
2608 * Receive start/stop functions
2609 */
2610
2611/*
2612 * Start receive on PCU
2613 */
2614void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
2615{
2616 ATH5K_TRACE(ah->ah_sc);
2617 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2618
2619 /* TODO: ANI Support */
2620}
2621
2622/*
2623 * Stop receive on PCU
2624 */
2625void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah)
2626{
2627 ATH5K_TRACE(ah->ah_sc);
2628 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2629
2630 /* TODO: ANI Support */
2631}
2632
2633/*
2634 * RX Filter functions
2635 */
2636
2637/*
2638 * Set multicast filter
2639 */
2640void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
2641{
2642 ATH5K_TRACE(ah->ah_sc);
2643 /* Set the multicat filter */
2644 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
2645 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
2646}
2647
2648/*
2649 * Set multicast filter by index
2650 */
2651int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index)
2652{
2653
2654 ATH5K_TRACE(ah->ah_sc);
2655 if (index >= 64)
2656 return -EINVAL;
2657 else if (index >= 32)
2658 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
2659 (1 << (index - 32)));
2660 else
2661 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
2662
2663 return 0;
2664}
2665
2666/*
2667 * Clear Multicast filter by index
2668 */
2669int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
2670{
2671
2672 ATH5K_TRACE(ah->ah_sc);
2673 if (index >= 64)
2674 return -EINVAL;
2675 else if (index >= 32)
2676 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
2677 (1 << (index - 32)));
2678 else
2679 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
2680
2681 return 0;
2682}
2683
2684/*
2685 * Get current rx filter
2686 */
2687u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
2688{
2689 u32 data, filter = 0;
2690
2691 ATH5K_TRACE(ah->ah_sc);
2692 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
2693
2694 /*Radar detection for 5212*/
2695 if (ah->ah_version == AR5K_AR5212) {
2696 data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
2697
2698 if (data & AR5K_PHY_ERR_FIL_RADAR)
2699 filter |= AR5K_RX_FILTER_RADARERR;
2700 if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
2701 filter |= AR5K_RX_FILTER_PHYERR;
2702 }
2703
2704 return filter;
2705}
2706
2707/*
2708 * Set rx filter
2709 */
2710void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
2711{
2712 u32 data = 0;
2713
2714 ATH5K_TRACE(ah->ah_sc);
2715
2716 /* Set PHY error filter register on 5212*/
2717 if (ah->ah_version == AR5K_AR5212) {
2718 if (filter & AR5K_RX_FILTER_RADARERR)
2719 data |= AR5K_PHY_ERR_FIL_RADAR;
2720 if (filter & AR5K_RX_FILTER_PHYERR)
2721 data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
2722 }
2723
2724 /*
2725 * The AR5210 uses promiscous mode to detect radar activity
2726 */
2727 if (ah->ah_version == AR5K_AR5210 &&
2728 (filter & AR5K_RX_FILTER_RADARERR)) {
2729 filter &= ~AR5K_RX_FILTER_RADARERR;
2730 filter |= AR5K_RX_FILTER_PROM;
2731 }
2732
2733 /*Zero length DMA*/
2734 if (data)
2735 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
2736 else
2737 AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
2738
2739 /*Write RX Filter register*/
2740 ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
2741
2742 /*Write PHY error filter register on 5212*/
2743 if (ah->ah_version == AR5K_AR5212)
2744 ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
2745
2746}
2747
2748/*
2749 * Beacon related functions
2750 */
2751
2752/*
2753 * Get a 32bit TSF
2754 */
2755u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
2756{
2757 ATH5K_TRACE(ah->ah_sc);
2758 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
2759}
2760
2761/*
2762 * Get the full 64bit TSF
2763 */
2764u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
2765{
2766 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
2767 ATH5K_TRACE(ah->ah_sc);
2768
2769 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
2770}
2771
2772/*
2773 * Force a TSF reset
2774 */
2775void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
2776{
2777 ATH5K_TRACE(ah->ah_sc);
2778 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_RESET_TSF);
2779}
2780
2781/*
2782 * Initialize beacon timers
2783 */
2784void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
2785{
2786 u32 timer1, timer2, timer3;
2787
2788 ATH5K_TRACE(ah->ah_sc);
2789 /*
2790 * Set the additional timers by mode
2791 */
2792 switch (ah->ah_op_mode) {
2793 case IEEE80211_IF_TYPE_STA:
2794 if (ah->ah_version == AR5K_AR5210) {
2795 timer1 = 0xffffffff;
2796 timer2 = 0xffffffff;
2797 } else {
2798 timer1 = 0x0000ffff;
2799 timer2 = 0x0007ffff;
2800 }
2801 break;
2802
2803 default:
2804 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
2805 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
2806 }
2807
2808 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
2809
2810 /*
2811 * Set the beacon register and enable all timers.
2812 * (next beacon, DMA beacon, software beacon, ATIM window time)
2813 */
2814 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
2815 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
2816 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
2817 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
2818
2819 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
2820 AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
2821 AR5K_BEACON);
2822}
2823
2824#if 0
2825/*
2826 * Set beacon timers
2827 */
2828int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
2829 const struct ath5k_beacon_state *state)
2830{
2831 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
2832
2833 /*
2834 * TODO: should be changed through *state
2835 * review struct ath5k_beacon_state struct
2836 *
2837 * XXX: These are used for cfp period bellow, are they
2838 * ok ? Is it O.K. for tsf here to be 0 or should we use
2839 * get_tsf ?
2840 */
2841 u32 dtim_count = 0; /* XXX */
2842 u32 cfp_count = 0; /* XXX */
2843 u32 tsf = 0; /* XXX */
2844
2845 ATH5K_TRACE(ah->ah_sc);
2846 /* Return on an invalid beacon state */
2847 if (state->bs_interval < 1)
2848 return -EINVAL;
2849
2850 interval = state->bs_interval;
2851 dtim = state->bs_dtim_period;
2852
2853 /*
2854 * PCF support?
2855 */
2856 if (state->bs_cfp_period > 0) {
2857 /*
2858 * Enable PCF mode and set the CFP
2859 * (Contention Free Period) and timer registers
2860 */
2861 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
2862 state->bs_interval;
2863 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
2864 state->bs_interval;
2865
2866 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
2867 AR5K_STA_ID1_DEFAULT_ANTENNA |
2868 AR5K_STA_ID1_PCF);
2869 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
2870 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
2871 AR5K_CFP_DUR);
2872 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
2873 next_cfp)) << 3, AR5K_TIMER2);
2874 } else {
2875 /* Disable PCF mode */
2876 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
2877 AR5K_STA_ID1_DEFAULT_ANTENNA |
2878 AR5K_STA_ID1_PCF);
2879 }
2880
2881 /*
2882 * Enable the beacon timer register
2883 */
2884 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
2885
2886 /*
2887 * Start the beacon timers
2888 */
2889 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &~
2890 (AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
2891 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
2892 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
2893 AR5K_BEACON_PERIOD), AR5K_BEACON);
2894
2895 /*
2896 * Write new beacon miss threshold, if it appears to be valid
2897 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
2898 * and return if its not in range. We can test this by reading value and
2899 * setting value to a largest value and seeing which values register.
2900 */
2901
2902 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
2903 state->bs_bmiss_threshold);
2904
2905 /*
2906 * Set sleep control register
2907 * XXX: Didn't find this in 5210 code but since this register
2908 * exists also in ar5k's 5210 headers i leave it as common code.
2909 */
2910 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
2911 (state->bs_sleep_duration - 3) << 3);
2912
2913 /*
2914 * Set enhanced sleep registers on 5212
2915 */
2916 if (ah->ah_version == AR5K_AR5212) {
2917 if (state->bs_sleep_duration > state->bs_interval &&
2918 roundup(state->bs_sleep_duration, interval) ==
2919 state->bs_sleep_duration)
2920 interval = state->bs_sleep_duration;
2921
2922 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
2923 roundup(state->bs_sleep_duration, dtim) ==
2924 state->bs_sleep_duration))
2925 dtim = state->bs_sleep_duration;
2926
2927 if (interval > dtim)
2928 return -EINVAL;
2929
2930 next_beacon = interval == dtim ? state->bs_next_dtim :
2931 state->bs_next_beacon;
2932
2933 ath5k_hw_reg_write(ah,
2934 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
2935 AR5K_SLEEP0_NEXT_DTIM) |
2936 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
2937 AR5K_SLEEP0_ENH_SLEEP_EN |
2938 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
2939
2940 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
2941 AR5K_SLEEP1_NEXT_TIM) |
2942 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
2943
2944 ath5k_hw_reg_write(ah,
2945 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
2946 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
2947 }
2948
2949 return 0;
2950}
2951
2952/*
2953 * Reset beacon timers
2954 */
2955void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
2956{
2957 ATH5K_TRACE(ah->ah_sc);
2958 /*
2959 * Disable beacon timer
2960 */
2961 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
2962
2963 /*
2964 * Disable some beacon register values
2965 */
2966 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
2967 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
2968 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
2969}
2970
2971/*
2972 * Wait for beacon queue to finish
2973 */
2974int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
2975{
2976 unsigned int i;
2977 int ret;
2978
2979 ATH5K_TRACE(ah->ah_sc);
2980
2981 /* 5210 doesn't have QCU*/
2982 if (ah->ah_version == AR5K_AR5210) {
2983 /*
2984 * Wait for beaconn queue to finish by checking
2985 * Control Register and Beacon Status Register.
2986 */
2987 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
2988 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
2989 ||
2990 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
2991 break;
2992 udelay(10);
2993 }
2994
2995 /* Timeout... */
2996 if (i <= 0) {
2997 /*
2998 * Re-schedule the beacon queue
2999 */
3000 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
3001 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
3002 AR5K_BCR);
3003
3004 return -EIO;
3005 }
3006 ret = 0;
3007 } else {
3008 /*5211/5212*/
3009 ret = ath5k_hw_register_timeout(ah,
3010 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
3011 AR5K_QCU_STS_FRMPENDCNT, 0, false);
3012
3013 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
3014 return -EIO;
3015 }
3016
3017 return ret;
3018}
3019#endif
3020
3021/*
3022 * Update mib counters (statistics)
3023 */
3024void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
3025 struct ieee80211_low_level_stats *stats)
3026{
3027 ATH5K_TRACE(ah->ah_sc);
3028
3029 /* Read-And-Clear */
3030 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
3031 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
3032 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
3033 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
3034
3035 /* XXX: Should we use this to track beacon count ?
3036 * -we read it anyway to clear the register */
3037 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
3038
3039 /* Reset profile count registers on 5212*/
3040 if (ah->ah_version == AR5K_AR5212) {
3041 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
3042 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
3043 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
3044 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
3045 }
3046}
3047
3048/** ath5k_hw_set_ack_bitrate - set bitrate for ACKs
3049 *
3050 * @ah: the &struct ath5k_hw
3051 * @high: determines if to use low bit rate or now
3052 */
3053void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
3054{
3055 if (ah->ah_version != AR5K_AR5212)
3056 return;
3057 else {
3058 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
3059 if (high)
3060 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
3061 else
3062 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
3063 }
3064}
3065
3066
3067/*
3068 * ACK/CTS Timeouts
3069 */
3070
3071/*
3072 * Set ACK timeout on PCU
3073 */
3074int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
3075{
3076 ATH5K_TRACE(ah->ah_sc);
3077 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
3078 ah->ah_turbo) <= timeout)
3079 return -EINVAL;
3080
3081 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
3082 ath5k_hw_htoclock(timeout, ah->ah_turbo));
3083
3084 return 0;
3085}
3086
3087/*
3088 * Read the ACK timeout from PCU
3089 */
3090unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
3091{
3092 ATH5K_TRACE(ah->ah_sc);
3093
3094 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
3095 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
3096}
3097
3098/*
3099 * Set CTS timeout on PCU
3100 */
3101int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
3102{
3103 ATH5K_TRACE(ah->ah_sc);
3104 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
3105 ah->ah_turbo) <= timeout)
3106 return -EINVAL;
3107
3108 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
3109 ath5k_hw_htoclock(timeout, ah->ah_turbo));
3110
3111 return 0;
3112}
3113
3114/*
3115 * Read CTS timeout from PCU
3116 */
3117unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
3118{
3119 ATH5K_TRACE(ah->ah_sc);
3120 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
3121 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
3122}
3123
3124/*
3125 * Key table (WEP) functions
3126 */
3127
3128int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
3129{
3130 unsigned int i;
3131
3132 ATH5K_TRACE(ah->ah_sc);
3133 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3134
3135 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
3136 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
3137
3138 /*
3139 * Set NULL encryption on AR5212+
3140 *
3141 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
3142 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
3143 *
3144 * Note2: Windows driver (ndiswrapper) sets this to
3145 * 0x00000714 instead of 0x00000007
3146 */
3147 if (ah->ah_version > AR5K_AR5211)
3148 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
3149 AR5K_KEYTABLE_TYPE(entry));
3150
3151 return 0;
3152}
3153
3154int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
3155{
3156 ATH5K_TRACE(ah->ah_sc);
3157 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3158
3159 /* Check the validation flag at the end of the entry */
3160 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
3161 AR5K_KEYTABLE_VALID;
3162}
3163
3164int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
3165 const struct ieee80211_key_conf *key, const u8 *mac)
3166{
3167 unsigned int i;
3168 __le32 key_v[5] = {};
3169 u32 keytype;
3170
3171 ATH5K_TRACE(ah->ah_sc);
3172
3173 /* key->keylen comes in from mac80211 in bytes */
3174
3175 if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
3176 return -EOPNOTSUPP;
3177
3178 switch (key->keylen) {
3179 /* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit */
3180 case 40 / 8:
3181 memcpy(&key_v[0], key->key, 5);
3182 keytype = AR5K_KEYTABLE_TYPE_40;
3183 break;
3184
3185 /* WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit */
3186 case 104 / 8:
3187 memcpy(&key_v[0], &key->key[0], 6);
3188 memcpy(&key_v[2], &key->key[6], 6);
3189 memcpy(&key_v[4], &key->key[12], 1);
3190 keytype = AR5K_KEYTABLE_TYPE_104;
3191 break;
3192 /* WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit */
3193 case 128 / 8:
3194 memcpy(&key_v[0], &key->key[0], 6);
3195 memcpy(&key_v[2], &key->key[6], 6);
3196 memcpy(&key_v[4], &key->key[12], 4);
3197 keytype = AR5K_KEYTABLE_TYPE_128;
3198 break;
3199
3200 default:
3201 return -EINVAL; /* shouldn't happen */
3202 }
3203
3204 for (i = 0; i < ARRAY_SIZE(key_v); i++)
3205 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
3206 AR5K_KEYTABLE_OFF(entry, i));
3207
3208 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
3209
3210 return ath5k_hw_set_key_lladdr(ah, entry, mac);
3211}
3212
3213int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
3214{
3215 u32 low_id, high_id;
3216
3217 ATH5K_TRACE(ah->ah_sc);
3218 /* Invalid entry (key table overflow) */
3219 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3220
3221 /* MAC may be NULL if it's a broadcast key. In this case no need to
3222 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
3223 if (unlikely(mac == NULL)) {
3224 low_id = 0xffffffff;
3225 high_id = 0xffff | AR5K_KEYTABLE_VALID;
3226 } else {
3227 low_id = AR5K_LOW_ID(mac);
3228 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
3229 }
3230
3231 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
3232 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
3233
3234 return 0;
3235}
3236
3237
3238/********************************************\
3239Queue Control Unit, DFS Control Unit Functions
3240\********************************************/
3241
3242/*
3243 * Initialize a transmit queue
3244 */
3245int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
3246 struct ath5k_txq_info *queue_info)
3247{
3248 unsigned int queue;
3249 int ret;
3250
3251 ATH5K_TRACE(ah->ah_sc);
3252
3253 /*
3254 * Get queue by type
3255 */
3256 /*5210 only has 2 queues*/
3257 if (ah->ah_version == AR5K_AR5210) {
3258 switch (queue_type) {
3259 case AR5K_TX_QUEUE_DATA:
3260 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
3261 break;
3262 case AR5K_TX_QUEUE_BEACON:
3263 case AR5K_TX_QUEUE_CAB:
3264 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
3265 break;
3266 default:
3267 return -EINVAL;
3268 }
3269 } else {
3270 switch (queue_type) {
3271 case AR5K_TX_QUEUE_DATA:
3272 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
3273 ah->ah_txq[queue].tqi_type !=
3274 AR5K_TX_QUEUE_INACTIVE; queue++) {
3275
3276 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
3277 return -EINVAL;
3278 }
3279 break;
3280 case AR5K_TX_QUEUE_UAPSD:
3281 queue = AR5K_TX_QUEUE_ID_UAPSD;
3282 break;
3283 case AR5K_TX_QUEUE_BEACON:
3284 queue = AR5K_TX_QUEUE_ID_BEACON;
3285 break;
3286 case AR5K_TX_QUEUE_CAB:
3287 queue = AR5K_TX_QUEUE_ID_CAB;
3288 break;
3289 case AR5K_TX_QUEUE_XR_DATA:
3290 if (ah->ah_version != AR5K_AR5212)
3291 ATH5K_ERR(ah->ah_sc,
3292 "XR data queues only supported in"
3293 " 5212!\n");
3294 queue = AR5K_TX_QUEUE_ID_XR_DATA;
3295 break;
3296 default:
3297 return -EINVAL;
3298 }
3299 }
3300
3301 /*
3302 * Setup internal queue structure
3303 */
3304 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
3305 ah->ah_txq[queue].tqi_type = queue_type;
3306
3307 if (queue_info != NULL) {
3308 queue_info->tqi_type = queue_type;
3309 ret = ath5k_hw_setup_tx_queueprops(ah, queue, queue_info);
3310 if (ret)
3311 return ret;
3312 }
3313 /*
3314 * We use ah_txq_status to hold a temp value for
3315 * the Secondary interrupt mask registers on 5211+
3316 * check out ath5k_hw_reset_tx_queue
3317 */
3318 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
3319
3320 return queue;
3321}
3322
3323/*
3324 * Setup a transmit queue
3325 */
3326int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue,
3327 const struct ath5k_txq_info *queue_info)
3328{
3329 ATH5K_TRACE(ah->ah_sc);
3330 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3331
3332 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
3333 return -EIO;
3334
3335 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
3336
3337 /*XXX: Is this supported on 5210 ?*/
3338 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
3339 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
3340 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
3341 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
3342 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
3343
3344 return 0;
3345}
3346
3347/*
3348 * Get properties for a specific transmit queue
3349 */
3350int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
3351 struct ath5k_txq_info *queue_info)
3352{
3353 ATH5K_TRACE(ah->ah_sc);
3354 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
3355 return 0;
3356}
3357
3358/*
3359 * Set a transmit queue inactive
3360 */
3361void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3362{
3363 ATH5K_TRACE(ah->ah_sc);
3364 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
3365 return;
3366
3367 /* This queue will be skipped in further operations */
3368 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
3369 /*For SIMR setup*/
3370 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
3371}
3372
3373/*
3374 * Set DFS params for a transmit queue
3375 */
3376int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3377{
3378 u32 cw_min, cw_max, retry_lg, retry_sh;
3379 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
3380
3381 ATH5K_TRACE(ah->ah_sc);
3382 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3383
3384 tq = &ah->ah_txq[queue];
3385
3386 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
3387 return 0;
3388
3389 if (ah->ah_version == AR5K_AR5210) {
3390 /* Only handle data queues, others will be ignored */
3391 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
3392 return 0;
3393
3394 /* Set Slot time */
3395 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3396 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
3397 AR5K_SLOT_TIME);
3398 /* Set ACK_CTS timeout */
3399 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3400 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
3401 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
3402 /* Set Transmit Latency */
3403 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3404 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
3405 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
3406 /* Set IFS0 */
3407 if (ah->ah_turbo)
3408 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
3409 (ah->ah_aifs + tq->tqi_aifs) *
3410 AR5K_INIT_SLOT_TIME_TURBO) <<
3411 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
3412 AR5K_IFS0);
3413 else
3414 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
3415 (ah->ah_aifs + tq->tqi_aifs) *
3416 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
3417 AR5K_INIT_SIFS, AR5K_IFS0);
3418
3419 /* Set IFS1 */
3420 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3421 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
3422 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
3423 /* Set AR5K_PHY_SETTLING */
3424 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3425 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3426 | 0x38 :
3427 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3428 | 0x1C,
3429 AR5K_PHY_SETTLING);
3430 /* Set Frame Control Register */
3431 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3432 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
3433 AR5K_PHY_TURBO_SHORT | 0x2020) :
3434 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
3435 AR5K_PHY_FRAME_CTL_5210);
3436 }
3437
3438 /*
3439 * Calculate cwmin/max by channel mode
3440 */
3441 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
3442 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
3443 ah->ah_aifs = AR5K_TUNE_AIFS;
3444 /*XR is only supported on 5212*/
3445 if (IS_CHAN_XR(ah->ah_current_channel) &&
3446 ah->ah_version == AR5K_AR5212) {
3447 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
3448 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
3449 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
3450 /*B mode is not supported on 5210*/
3451 } else if (IS_CHAN_B(ah->ah_current_channel) &&
3452 ah->ah_version != AR5K_AR5210) {
3453 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
3454 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
3455 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
3456 }
3457
3458 cw_min = 1;
3459 while (cw_min < ah->ah_cw_min)
3460 cw_min = (cw_min << 1) | 1;
3461
3462 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
3463 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
3464 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
3465 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
3466
3467 /*
3468 * Calculate and set retry limits
3469 */
3470 if (ah->ah_software_retry) {
3471 /* XXX Need to test this */
3472 retry_lg = ah->ah_limit_tx_retries;
3473 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
3474 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
3475 } else {
3476 retry_lg = AR5K_INIT_LG_RETRY;
3477 retry_sh = AR5K_INIT_SH_RETRY;
3478 }
3479
3480 /*No QCU/DCU [5210]*/
3481 if (ah->ah_version == AR5K_AR5210) {
3482 ath5k_hw_reg_write(ah,
3483 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
3484 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
3485 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
3486 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
3487 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
3488 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
3489 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
3490 AR5K_NODCU_RETRY_LMT);
3491 } else {
3492 /*QCU/DCU [5211+]*/
3493 ath5k_hw_reg_write(ah,
3494 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
3495 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
3496 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
3497 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
3498 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
3499 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
3500 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
3501
3502 /*===Rest is also for QCU/DCU only [5211+]===*/
3503
3504 /*
3505 * Set initial content window (cw_min/cw_max)
3506 * and arbitrated interframe space (aifs)...
3507 */
3508 ath5k_hw_reg_write(ah,
3509 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
3510 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
3511 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
3512 AR5K_DCU_LCL_IFS_AIFS),
3513 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
3514
3515 /*
3516 * Set misc registers
3517 */
3518 ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
3519 AR5K_QUEUE_MISC(queue));
3520
3521 if (tq->tqi_cbr_period) {
3522 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
3523 AR5K_QCU_CBRCFG_INTVAL) |
3524 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
3525 AR5K_QCU_CBRCFG_ORN_THRES),
3526 AR5K_QUEUE_CBRCFG(queue));
3527 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3528 AR5K_QCU_MISC_FRSHED_CBR);
3529 if (tq->tqi_cbr_overflow_limit)
3530 AR5K_REG_ENABLE_BITS(ah,
3531 AR5K_QUEUE_MISC(queue),
3532 AR5K_QCU_MISC_CBR_THRES_ENABLE);
3533 }
3534
3535 if (tq->tqi_ready_time)
3536 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
3537 AR5K_QCU_RDYTIMECFG_INTVAL) |
3538 AR5K_QCU_RDYTIMECFG_ENABLE,
3539 AR5K_QUEUE_RDYTIMECFG(queue));
3540
3541 if (tq->tqi_burst_time) {
3542 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
3543 AR5K_DCU_CHAN_TIME_DUR) |
3544 AR5K_DCU_CHAN_TIME_ENABLE,
3545 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
3546
3547 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
3548 AR5K_REG_ENABLE_BITS(ah,
3549 AR5K_QUEUE_MISC(queue),
3550 AR5K_QCU_MISC_RDY_VEOL_POLICY);
3551 }
3552
3553 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
3554 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
3555 AR5K_QUEUE_DFS_MISC(queue));
3556
3557 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
3558 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
3559 AR5K_QUEUE_DFS_MISC(queue));
3560
3561 /*
3562 * Set registers by queue type
3563 */
3564 switch (tq->tqi_type) {
3565 case AR5K_TX_QUEUE_BEACON:
3566 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3567 AR5K_QCU_MISC_FRSHED_DBA_GT |
3568 AR5K_QCU_MISC_CBREXP_BCN |
3569 AR5K_QCU_MISC_BCN_ENABLE);
3570
3571 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
3572 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
3573 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
3574 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
3575 AR5K_DCU_MISC_BCN_ENABLE);
3576
3577 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
3578 (AR5K_TUNE_SW_BEACON_RESP -
3579 AR5K_TUNE_DMA_BEACON_RESP) -
3580 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
3581 AR5K_QCU_RDYTIMECFG_ENABLE,
3582 AR5K_QUEUE_RDYTIMECFG(queue));
3583 break;
3584
3585 case AR5K_TX_QUEUE_CAB:
3586 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3587 AR5K_QCU_MISC_FRSHED_DBA_GT |
3588 AR5K_QCU_MISC_CBREXP |
3589 AR5K_QCU_MISC_CBREXP_BCN);
3590
3591 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
3592 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
3593 AR5K_DCU_MISC_ARBLOCK_CTL_S));
3594 break;
3595
3596 case AR5K_TX_QUEUE_UAPSD:
3597 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3598 AR5K_QCU_MISC_CBREXP);
3599 break;
3600
3601 case AR5K_TX_QUEUE_DATA:
3602 default:
3603 break;
3604 }
3605
3606 /*
3607 * Enable interrupts for this tx queue
3608 * in the secondary interrupt mask registers
3609 */
3610 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
3611 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
3612
3613 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
3614 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
3615
3616 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
3617 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
3618
3619 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
3620 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
3621
3622 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
3623 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
3624
3625
3626 /* Update secondary interrupt mask registers */
3627 ah->ah_txq_imr_txok &= ah->ah_txq_status;
3628 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
3629 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
3630 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
3631 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
3632
3633 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
3634 AR5K_SIMR0_QCU_TXOK) |
3635 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
3636 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
3637 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
3638 AR5K_SIMR1_QCU_TXERR) |
3639 AR5K_REG_SM(ah->ah_txq_imr_txeol,
3640 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
3641 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
3642 AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
3643 }
3644
3645 return 0;
3646}
3647
3648/*
3649 * Get number of pending frames
3650 * for a specific queue [5211+]
3651 */
3652u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) {
3653 ATH5K_TRACE(ah->ah_sc);
3654 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3655
3656 /* Return if queue is declared inactive */
3657 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
3658 return false;
3659
3660 /* XXX: How about AR5K_CFG_TXCNT ? */
3661 if (ah->ah_version == AR5K_AR5210)
3662 return false;
3663
3664 return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
3665}
3666
3667/*
3668 * Set slot time
3669 */
3670int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
3671{
3672 ATH5K_TRACE(ah->ah_sc);
3673 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
3674 return -EINVAL;
3675
3676 if (ah->ah_version == AR5K_AR5210)
3677 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
3678 ah->ah_turbo), AR5K_SLOT_TIME);
3679 else
3680 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
3681
3682 return 0;
3683}
3684
3685/*
3686 * Get slot time
3687 */
3688unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
3689{
3690 ATH5K_TRACE(ah->ah_sc);
3691 if (ah->ah_version == AR5K_AR5210)
3692 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
3693 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
3694 else
3695 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
3696}
3697
3698
3699/******************************\
3700 Hardware Descriptor Functions
3701\******************************/
3702
3703/*
3704 * TX Descriptor
3705 */
3706
3707/*
3708 * Initialize the 2-word tx descriptor on 5210/5211
3709 */
3710static int
3711ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3712 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
3713 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
3714 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
3715 unsigned int rtscts_rate, unsigned int rtscts_duration)
3716{
3717 u32 frame_type;
3718 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3719 unsigned int frame_len;
3720
3721 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
3722
3723 /*
3724 * Validate input
3725 * - Zero retries don't make sense.
3726 * - A zero rate will put the HW into a mode where it continously sends
3727 * noise on the channel, so it is important to avoid this.
3728 */
3729 if (unlikely(tx_tries0 == 0)) {
3730 ATH5K_ERR(ah->ah_sc, "zero retries\n");
3731 WARN_ON(1);
3732 return -EINVAL;
3733 }
3734 if (unlikely(tx_rate0 == 0)) {
3735 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3736 WARN_ON(1);
3737 return -EINVAL;
3738 }
3739
3740 /* Clear descriptor */
3741 memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
3742
3743 /* Setup control descriptor */
3744
3745 /* Verify and set frame length */
3746
3747 /* remove padding we might have added before */
3748 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3749
3750 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
3751 return -EINVAL;
3752
3753 tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
3754
3755 /* Verify and set buffer length */
3756
3757 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3758 if(type == AR5K_PKT_TYPE_BEACON)
3759 pkt_len = roundup(pkt_len, 4);
3760
3761 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
3762 return -EINVAL;
3763
3764 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
3765
3766 /*
3767 * Verify and set header length
3768 * XXX: I only found that on 5210 code, does it work on 5211 ?
3769 */
3770 if (ah->ah_version == AR5K_AR5210) {
3771 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
3772 return -EINVAL;
3773 tx_ctl->tx_control_0 |=
3774 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
3775 }
3776
3777 /*Diferences between 5210-5211*/
3778 if (ah->ah_version == AR5K_AR5210) {
3779 switch (type) {
3780 case AR5K_PKT_TYPE_BEACON:
3781 case AR5K_PKT_TYPE_PROBE_RESP:
3782 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
3783 case AR5K_PKT_TYPE_PIFS:
3784 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
3785 default:
3786 frame_type = type /*<< 2 ?*/;
3787 }
3788
3789 tx_ctl->tx_control_0 |=
3790 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
3791 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
3792 } else {
3793 tx_ctl->tx_control_0 |=
3794 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
3795 AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
3796 tx_ctl->tx_control_1 |=
3797 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
3798 }
3799#define _TX_FLAGS(_c, _flag) \
3800 if (flags & AR5K_TXDESC_##_flag) \
3801 tx_ctl->tx_control_##_c |= \
3802 AR5K_2W_TX_DESC_CTL##_c##_##_flag
3803
3804 _TX_FLAGS(0, CLRDMASK);
3805 _TX_FLAGS(0, VEOL);
3806 _TX_FLAGS(0, INTREQ);
3807 _TX_FLAGS(0, RTSENA);
3808 _TX_FLAGS(1, NOACK);
3809
3810#undef _TX_FLAGS
3811
3812 /*
3813 * WEP crap
3814 */
3815 if (key_index != AR5K_TXKEYIX_INVALID) {
3816 tx_ctl->tx_control_0 |=
3817 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3818 tx_ctl->tx_control_1 |=
3819 AR5K_REG_SM(key_index,
3820 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3821 }
3822
3823 /*
3824 * RTS/CTS Duration [5210 ?]
3825 */
3826 if ((ah->ah_version == AR5K_AR5210) &&
3827 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
3828 tx_ctl->tx_control_1 |= rtscts_duration &
3829 AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
3830
3831 return 0;
3832}
3833
3834/*
3835 * Initialize the 4-word tx descriptor on 5212
3836 */
3837static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3838 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
3839 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
3840 unsigned int tx_tries0, unsigned int key_index,
3841 unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate,
3842 unsigned int rtscts_duration)
3843{
3844 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3845 unsigned int frame_len;
3846
3847 ATH5K_TRACE(ah->ah_sc);
3848 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3849
3850 /*
3851 * Validate input
3852 * - Zero retries don't make sense.
3853 * - A zero rate will put the HW into a mode where it continously sends
3854 * noise on the channel, so it is important to avoid this.
3855 */
3856 if (unlikely(tx_tries0 == 0)) {
3857 ATH5K_ERR(ah->ah_sc, "zero retries\n");
3858 WARN_ON(1);
3859 return -EINVAL;
3860 }
3861 if (unlikely(tx_rate0 == 0)) {
3862 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3863 WARN_ON(1);
3864 return -EINVAL;
3865 }
3866
3867 /* Clear descriptor */
3868 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
3869
3870 /* Setup control descriptor */
3871
3872 /* Verify and set frame length */
3873
3874 /* remove padding we might have added before */
3875 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3876
3877 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
3878 return -EINVAL;
3879
3880 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
3881
3882 /* Verify and set buffer length */
3883
3884 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3885 if(type == AR5K_PKT_TYPE_BEACON)
3886 pkt_len = roundup(pkt_len, 4);
3887
3888 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
3889 return -EINVAL;
3890
3891 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
3892
3893 tx_ctl->tx_control_0 |=
3894 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
3895 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
3896 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
3897 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
3898 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
3899 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
3900 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
3901
3902#define _TX_FLAGS(_c, _flag) \
3903 if (flags & AR5K_TXDESC_##_flag) \
3904 tx_ctl->tx_control_##_c |= \
3905 AR5K_4W_TX_DESC_CTL##_c##_##_flag
3906
3907 _TX_FLAGS(0, CLRDMASK);
3908 _TX_FLAGS(0, VEOL);
3909 _TX_FLAGS(0, INTREQ);
3910 _TX_FLAGS(0, RTSENA);
3911 _TX_FLAGS(0, CTSENA);
3912 _TX_FLAGS(1, NOACK);
3913
3914#undef _TX_FLAGS
3915
3916 /*
3917 * WEP crap
3918 */
3919 if (key_index != AR5K_TXKEYIX_INVALID) {
3920 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3921 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
3922 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3923 }
3924
3925 /*
3926 * RTS/CTS
3927 */
3928 if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
3929 if ((flags & AR5K_TXDESC_RTSENA) &&
3930 (flags & AR5K_TXDESC_CTSENA))
3931 return -EINVAL;
3932 tx_ctl->tx_control_2 |= rtscts_duration &
3933 AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
3934 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
3935 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
3936 }
3937
3938 return 0;
3939}
3940
3941/*
3942 * Initialize a 4-word multirate tx descriptor on 5212
3943 */
3944static int
3945ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3946 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
3947 unsigned int tx_rate3, u_int tx_tries3)
3948{
3949 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3950
3951 /*
3952 * Rates can be 0 as long as the retry count is 0 too.
3953 * A zero rate and nonzero retry count will put the HW into a mode where
3954 * it continously sends noise on the channel, so it is important to
3955 * avoid this.
3956 */
3957 if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
3958 (tx_rate2 == 0 && tx_tries2 != 0) ||
3959 (tx_rate3 == 0 && tx_tries3 != 0))) {
3960 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3961 WARN_ON(1);
3962 return -EINVAL;
3963 }
3964
3965 if (ah->ah_version == AR5K_AR5212) {
3966 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3967
3968#define _XTX_TRIES(_n) \
3969 if (tx_tries##_n) { \
3970 tx_ctl->tx_control_2 |= \
3971 AR5K_REG_SM(tx_tries##_n, \
3972 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
3973 tx_ctl->tx_control_3 |= \
3974 AR5K_REG_SM(tx_rate##_n, \
3975 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
3976 }
3977
3978 _XTX_TRIES(1);
3979 _XTX_TRIES(2);
3980 _XTX_TRIES(3);
3981
3982#undef _XTX_TRIES
3983
3984 return 1;
3985 }
3986
3987 return 0;
3988}
3989
3990/*
3991 * Proccess the tx status descriptor on 5210/5211
3992 */
3993static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
3994 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
3995{
3996 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3997 struct ath5k_hw_tx_status *tx_status;
3998
3999 ATH5K_TRACE(ah->ah_sc);
4000
4001 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
4002 tx_status = &desc->ud.ds_tx5210.tx_stat;
4003
4004 /* No frame has been send or error */
4005 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
4006 return -EINPROGRESS;
4007
4008 /*
4009 * Get descriptor status
4010 */
4011 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
4012 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
4013 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
4014 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
4015 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
4016 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
4017 /*TODO: ts->ts_virtcol + test*/
4018 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
4019 AR5K_DESC_TX_STATUS1_SEQ_NUM);
4020 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
4021 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
4022 ts->ts_antenna = 1;
4023 ts->ts_status = 0;
4024 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
4025 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
4026
4027 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
4028 if (tx_status->tx_status_0 &
4029 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
4030 ts->ts_status |= AR5K_TXERR_XRETRY;
4031
4032 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
4033 ts->ts_status |= AR5K_TXERR_FIFO;
4034
4035 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
4036 ts->ts_status |= AR5K_TXERR_FILT;
4037 }
4038
4039 return 0;
4040}
4041
4042/*
4043 * Proccess a tx descriptor on 5212
4044 */
4045static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
4046 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
4047{
4048 struct ath5k_hw_4w_tx_ctl *tx_ctl;
4049 struct ath5k_hw_tx_status *tx_status;
4050
4051 ATH5K_TRACE(ah->ah_sc);
4052
4053 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
4054 tx_status = &desc->ud.ds_tx5212.tx_stat;
4055
4056 /* No frame has been send or error */
4057 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
4058 return -EINPROGRESS;
4059
4060 /*
4061 * Get descriptor status
4062 */
4063 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
4064 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
4065 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
4066 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
4067 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
4068 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
4069 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
4070 AR5K_DESC_TX_STATUS1_SEQ_NUM);
4071 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
4072 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
4073 ts->ts_antenna = (tx_status->tx_status_1 &
4074 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
4075 ts->ts_status = 0;
4076
4077 switch (AR5K_REG_MS(tx_status->tx_status_1,
4078 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
4079 case 0:
4080 ts->ts_rate = tx_ctl->tx_control_3 &
4081 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
4082 break;
4083 case 1:
4084 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4085 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
4086 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4087 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
4088 break;
4089 case 2:
4090 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4091 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
4092 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4093 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
4094 break;
4095 case 3:
4096 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4097 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
4098 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4099 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
4100 break;
4101 }
4102
4103 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
4104 if (tx_status->tx_status_0 &
4105 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
4106 ts->ts_status |= AR5K_TXERR_XRETRY;
4107
4108 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
4109 ts->ts_status |= AR5K_TXERR_FIFO;
4110
4111 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
4112 ts->ts_status |= AR5K_TXERR_FILT;
4113 }
4114
4115 return 0;
4116}
4117
4118/*
4119 * RX Descriptor
4120 */
4121
4122/*
4123 * Initialize an rx descriptor
4124 */
4125int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
4126 u32 size, unsigned int flags)
4127{
4128 struct ath5k_hw_rx_ctl *rx_ctl;
4129
4130 ATH5K_TRACE(ah->ah_sc);
4131 rx_ctl = &desc->ud.ds_rx.rx_ctl;
4132
4133 /*
4134 * Clear the descriptor
4135 * If we don't clean the status descriptor,
4136 * while scanning we get too many results,
4137 * most of them virtual, after some secs
4138 * of scanning system hangs. M.F.
4139 */
4140 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
4141
4142 /* Setup descriptor */
4143 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
4144 if (unlikely(rx_ctl->rx_control_1 != size))
4145 return -EINVAL;
4146
4147 if (flags & AR5K_RXDESC_INTREQ)
4148 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
4149
4150 return 0;
4151}
4152
4153/*
4154 * Proccess the rx status descriptor on 5210/5211
4155 */
4156static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
4157 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
4158{
4159 struct ath5k_hw_rx_status *rx_status;
4160
4161 rx_status = &desc->ud.ds_rx.u.rx_stat;
4162
4163 /* No frame received / not ready */
4164 if (unlikely((rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE)
4165 == 0))
4166 return -EINPROGRESS;
4167
4168 /*
4169 * Frame receive status
4170 */
4171 rs->rs_datalen = rx_status->rx_status_0 &
4172 AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
4173 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
4174 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
4175 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
4176 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
4177 rs->rs_antenna = rx_status->rx_status_0 &
4178 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
4179 rs->rs_more = rx_status->rx_status_0 &
4180 AR5K_5210_RX_DESC_STATUS0_MORE;
4181 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
4182 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
4183 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4184 rs->rs_status = 0;
4185 rs->rs_phyerr = 0;
4186
4187 /*
4188 * Key table status
4189 */
4190 if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
4191 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
4192 AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
4193 else
4194 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
4195
4196 /*
4197 * Receive/descriptor errors
4198 */
4199 if ((rx_status->rx_status_1 &
4200 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4201 if (rx_status->rx_status_1 &
4202 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
4203 rs->rs_status |= AR5K_RXERR_CRC;
4204
4205 if (rx_status->rx_status_1 &
4206 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
4207 rs->rs_status |= AR5K_RXERR_FIFO;
4208
4209 if (rx_status->rx_status_1 &
4210 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
4211 rs->rs_status |= AR5K_RXERR_PHY;
4212 rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
4213 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
4214 }
4215
4216 if (rx_status->rx_status_1 &
4217 AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4218 rs->rs_status |= AR5K_RXERR_DECRYPT;
4219 }
4220
4221 return 0;
4222}
4223
4224/*
4225 * Proccess the rx status descriptor on 5212
4226 */
4227static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
4228 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
4229{
4230 struct ath5k_hw_rx_status *rx_status;
4231 struct ath5k_hw_rx_error *rx_err;
4232
4233 ATH5K_TRACE(ah->ah_sc);
4234 rx_status = &desc->ud.ds_rx.u.rx_stat;
4235
4236 /* Overlay on error */
4237 rx_err = &desc->ud.ds_rx.u.rx_err;
4238
4239 /* No frame received / not ready */
4240 if (unlikely((rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_DONE)
4241 == 0))
4242 return -EINPROGRESS;
4243
4244 /*
4245 * Frame receive status
4246 */
4247 rs->rs_datalen = rx_status->rx_status_0 &
4248 AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
4249 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
4250 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
4251 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
4252 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
4253 rs->rs_antenna = rx_status->rx_status_0 &
4254 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
4255 rs->rs_more = rx_status->rx_status_0 &
4256 AR5K_5212_RX_DESC_STATUS0_MORE;
4257 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
4258 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4259 rs->rs_status = 0;
4260 rs->rs_phyerr = 0;
4261
4262 /*
4263 * Key table status
4264 */
4265 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
4266 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
4267 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
4268 else
4269 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
4270
4271 /*
4272 * Receive/descriptor errors
4273 */
4274 if ((rx_status->rx_status_1 &
4275 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4276 if (rx_status->rx_status_1 &
4277 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
4278 rs->rs_status |= AR5K_RXERR_CRC;
4279
4280 if (rx_status->rx_status_1 &
4281 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
4282 rs->rs_status |= AR5K_RXERR_PHY;
4283 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
4284 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
4285 }
4286
4287 if (rx_status->rx_status_1 &
4288 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4289 rs->rs_status |= AR5K_RXERR_DECRYPT;
4290
4291 if (rx_status->rx_status_1 &
4292 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
4293 rs->rs_status |= AR5K_RXERR_MIC;
4294 }
4295
4296 return 0;
4297}
4298
4299
4300/****************\
4301 GPIO Functions
4302\****************/
4303
4304/*
4305 * Set led state
4306 */
4307void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
4308{
4309 u32 led;
4310 /*5210 has different led mode handling*/
4311 u32 led_5210;
4312
4313 ATH5K_TRACE(ah->ah_sc);
4314
4315 /*Reset led status*/
4316 if (ah->ah_version != AR5K_AR5210)
4317 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
4318 AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
4319 else
4320 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
4321
4322 /*
4323 * Some blinking values, define at your wish
4324 */
4325 switch (state) {
4326 case AR5K_LED_SCAN:
4327 case AR5K_LED_AUTH:
4328 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
4329 led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
4330 break;
4331
4332 case AR5K_LED_INIT:
4333 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
4334 led_5210 = AR5K_PCICFG_LED_PEND;
4335 break;
4336
4337 case AR5K_LED_ASSOC:
4338 case AR5K_LED_RUN:
4339 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
4340 led_5210 = AR5K_PCICFG_LED_ASSOC;
4341 break;
4342
4343 default:
4344 led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
4345 led_5210 = AR5K_PCICFG_LED_PEND;
4346 break;
4347 }
4348
4349 /*Write new status to the register*/
4350 if (ah->ah_version != AR5K_AR5210)
4351 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
4352 else
4353 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
4354}
4355
4356/*
4357 * Set GPIO outputs
4358 */
4359int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
4360{
4361 ATH5K_TRACE(ah->ah_sc);
4362 if (gpio > AR5K_NUM_GPIO)
4363 return -EINVAL;
4364
4365 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
4366 AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
4367
4368 return 0;
4369}
4370
4371/*
4372 * Set GPIO inputs
4373 */
4374int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
4375{
4376 ATH5K_TRACE(ah->ah_sc);
4377 if (gpio > AR5K_NUM_GPIO)
4378 return -EINVAL;
4379
4380 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
4381 AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
4382
4383 return 0;
4384}
4385
4386/*
4387 * Get GPIO state
4388 */
4389u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
4390{
4391 ATH5K_TRACE(ah->ah_sc);
4392 if (gpio > AR5K_NUM_GPIO)
4393 return 0xffffffff;
4394
4395 /* GPIO input magic */
4396 return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
4397 0x1;
4398}
4399
4400/*
4401 * Set GPIO state
4402 */
4403int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
4404{
4405 u32 data;
4406 ATH5K_TRACE(ah->ah_sc);
4407
4408 if (gpio > AR5K_NUM_GPIO)
4409 return -EINVAL;
4410
4411 /* GPIO output magic */
4412 data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
4413
4414 data &= ~(1 << gpio);
4415 data |= (val & 1) << gpio;
4416
4417 ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
4418
4419 return 0;
4420}
4421
4422/*
4423 * Initialize the GPIO interrupt (RFKill switch)
4424 */
4425void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
4426 u32 interrupt_level)
4427{
4428 u32 data;
4429
4430 ATH5K_TRACE(ah->ah_sc);
4431 if (gpio > AR5K_NUM_GPIO)
4432 return;
4433
4434 /*
4435 * Set the GPIO interrupt
4436 */
4437 data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
4438 ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
4439 AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
4440 (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
4441
4442 ath5k_hw_reg_write(ah, interrupt_level ? data :
4443 (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
4444
4445 ah->ah_imr |= AR5K_IMR_GPIO;
4446
4447 /* Enable GPIO interrupts */
4448 AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
4449}
4450
4451
4452
4453
4454/****************\
4455 Misc functions
4456\****************/
4457
4458int ath5k_hw_get_capability(struct ath5k_hw *ah,
4459 enum ath5k_capability_type cap_type,
4460 u32 capability, u32 *result)
4461{
4462 ATH5K_TRACE(ah->ah_sc);
4463
4464 switch (cap_type) {
4465 case AR5K_CAP_NUM_TXQUEUES:
4466 if (result) {
4467 if (ah->ah_version == AR5K_AR5210)
4468 *result = AR5K_NUM_TX_QUEUES_NOQCU;
4469 else
4470 *result = AR5K_NUM_TX_QUEUES;
4471 goto yes;
4472 }
4473 case AR5K_CAP_VEOL:
4474 goto yes;
4475 case AR5K_CAP_COMPRESSION:
4476 if (ah->ah_version == AR5K_AR5212)
4477 goto yes;
4478 else
4479 goto no;
4480 case AR5K_CAP_BURST:
4481 goto yes;
4482 case AR5K_CAP_TPC:
4483 goto yes;
4484 case AR5K_CAP_BSSIDMASK:
4485 if (ah->ah_version == AR5K_AR5212)
4486 goto yes;
4487 else
4488 goto no;
4489 case AR5K_CAP_XR:
4490 if (ah->ah_version == AR5K_AR5212)
4491 goto yes;
4492 else
4493 goto no;
4494 default:
4495 goto no;
4496 }
4497
4498no:
4499 return -EINVAL;
4500yes:
4501 return 0;
4502}
4503
4504static int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
4505 u16 assoc_id)
4506{
4507 ATH5K_TRACE(ah->ah_sc);
4508
4509 if (ah->ah_version == AR5K_AR5210) {
4510 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
4511 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
4512 return 0;
4513 }
4514
4515 return -EIO;
4516}
4517
4518static int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
4519{
4520 ATH5K_TRACE(ah->ah_sc);
4521
4522 if (ah->ah_version == AR5K_AR5210) {
4523 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
4524 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
4525 return 0;
4526 }
4527
4528 return -EIO;
4529}
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 2806b21bf90b..ea2e1a20b499 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Initial register settings functions 2 * Initial register settings functions
3 * 3 *
4 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 9 * purpose with or without fee is hereby granted, provided that the above
@@ -20,13 +20,9 @@
20 */ 20 */
21 21
22#include "ath5k.h" 22#include "ath5k.h"
23#include "base.h"
24#include "reg.h" 23#include "reg.h"
25 24#include "debug.h"
26/* 25#include "base.h"
27 * MAC/PHY REGISTERS
28 */
29
30 26
31/* 27/*
32 * Mode-independent initial register writes 28 * Mode-independent initial register writes
@@ -65,10 +61,10 @@ static const struct ath5k_ini ar5210_ini[] = {
65 { AR5K_TXCFG, AR5K_DMASIZE_128B }, 61 { AR5K_TXCFG, AR5K_DMASIZE_128B },
66 { AR5K_RXCFG, AR5K_DMASIZE_128B }, 62 { AR5K_RXCFG, AR5K_DMASIZE_128B },
67 { AR5K_CFG, AR5K_INIT_CFG }, 63 { AR5K_CFG, AR5K_INIT_CFG },
68 { AR5K_TOPS, AR5K_INIT_TOPS }, 64 { AR5K_TOPS, 8 },
69 { AR5K_RXNOFRM, AR5K_INIT_RXNOFRM }, 65 { AR5K_RXNOFRM, 8 },
70 { AR5K_RPGTO, AR5K_INIT_RPGTO }, 66 { AR5K_RPGTO, 0 },
71 { AR5K_TXNOFRM, AR5K_INIT_TXNOFRM }, 67 { AR5K_TXNOFRM, 0 },
72 { AR5K_SFR, 0 }, 68 { AR5K_SFR, 0 },
73 { AR5K_MIBC, 0 }, 69 { AR5K_MIBC, 0 },
74 { AR5K_MISC, 0 }, 70 { AR5K_MISC, 0 },
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
new file mode 100644
index 000000000000..c77cee2a5582
--- /dev/null
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -0,0 +1,1002 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
7 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22
23/*********************************\
24* Protocol Control Unit Functions *
25\*********************************/
26
27#include "ath5k.h"
28#include "reg.h"
29#include "debug.h"
30#include "base.h"
31
32/*******************\
33* Generic functions *
34\*******************/
35
36/**
37 * ath5k_hw_set_opmode - Set PCU operating mode
38 *
39 * @ah: The &struct ath5k_hw
40 *
41 * Initialize PCU for the various operating modes (AP/STA etc)
42 *
43 * NOTE: ah->ah_op_mode must be set before calling this.
44 */
45int ath5k_hw_set_opmode(struct ath5k_hw *ah)
46{
47 u32 pcu_reg, beacon_reg, low_id, high_id;
48
49 pcu_reg = 0;
50 beacon_reg = 0;
51
52 ATH5K_TRACE(ah->ah_sc);
53
54 switch (ah->ah_op_mode) {
55 case NL80211_IFTYPE_ADHOC:
56 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
57 (ah->ah_version == AR5K_AR5210 ?
58 AR5K_STA_ID1_NO_PSPOLL : 0);
59 beacon_reg |= AR5K_BCR_ADHOC;
60 break;
61
62 case NL80211_IFTYPE_AP:
63 case NL80211_IFTYPE_MESH_POINT:
64 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
65 (ah->ah_version == AR5K_AR5210 ?
66 AR5K_STA_ID1_NO_PSPOLL : 0);
67 beacon_reg |= AR5K_BCR_AP;
68 break;
69
70 case NL80211_IFTYPE_STATION:
71 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
72 (ah->ah_version == AR5K_AR5210 ?
73 AR5K_STA_ID1_PWR_SV : 0);
74 case NL80211_IFTYPE_MONITOR:
75 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
76 (ah->ah_version == AR5K_AR5210 ?
77 AR5K_STA_ID1_NO_PSPOLL : 0);
78 break;
79
80 default:
81 return -EINVAL;
82 }
83
84 /*
85 * Set PCU registers
86 */
87 low_id = AR5K_LOW_ID(ah->ah_sta_id);
88 high_id = AR5K_HIGH_ID(ah->ah_sta_id);
89 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
90 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
91
92 /*
93 * Set Beacon Control Register on 5210
94 */
95 if (ah->ah_version == AR5K_AR5210)
96 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
97
98 return 0;
99}
100
101/**
102 * ath5k_hw_update - Update mib counters (mac layer statistics)
103 *
104 * @ah: The &struct ath5k_hw
105 * @stats: The &struct ieee80211_low_level_stats we use to track
106 * statistics on the driver
107 *
108 * Reads MIB counters from PCU and updates sw statistics. Must be
109 * called after a MIB interrupt.
110 */
111void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
112 struct ieee80211_low_level_stats *stats)
113{
114 ATH5K_TRACE(ah->ah_sc);
115
116 /* Read-And-Clear */
117 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
118 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
119 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
120 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
121
122 /* XXX: Should we use this to track beacon count ?
123 * -we read it anyway to clear the register */
124 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
125
126 /* Reset profile count registers on 5212*/
127 if (ah->ah_version == AR5K_AR5212) {
128 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
129 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
130 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
131 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
132 }
133}
134
135/**
136 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
137 *
138 * @ah: The &struct ath5k_hw
139 * @high: Flag to determine if we want to use high transmition rate
140 * for ACKs or not
141 *
142 * If high flag is set, we tell hw to use a set of control rates based on
143 * the current transmition rate (check out control_rates array inside reset.c).
144 * If not hw just uses the lowest rate available for the current modulation
145 * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
146 */
147void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
148{
149 if (ah->ah_version != AR5K_AR5212)
150 return;
151 else {
152 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
153 if (high)
154 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
155 else
156 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
157 }
158}
159
160
161/******************\
162* ACK/CTS Timeouts *
163\******************/
164
165/**
166 * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
167 *
168 * @ah: The &struct ath5k_hw
169 */
170unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
171{
172 ATH5K_TRACE(ah->ah_sc);
173
174 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
175 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
176}
177
178/**
179 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
180 *
181 * @ah: The &struct ath5k_hw
182 * @timeout: Timeout in usec
183 */
184int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
185{
186 ATH5K_TRACE(ah->ah_sc);
187 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
188 ah->ah_turbo) <= timeout)
189 return -EINVAL;
190
191 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
192 ath5k_hw_htoclock(timeout, ah->ah_turbo));
193
194 return 0;
195}
196
197/**
198 * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
199 *
200 * @ah: The &struct ath5k_hw
201 */
202unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
203{
204 ATH5K_TRACE(ah->ah_sc);
205 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
206 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
207}
208
209/**
210 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
211 *
212 * @ah: The &struct ath5k_hw
213 * @timeout: Timeout in usec
214 */
215int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
216{
217 ATH5K_TRACE(ah->ah_sc);
218 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
219 ah->ah_turbo) <= timeout)
220 return -EINVAL;
221
222 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
223 ath5k_hw_htoclock(timeout, ah->ah_turbo));
224
225 return 0;
226}
227
228
229/****************\
230* BSSID handling *
231\****************/
232
233/**
234 * ath5k_hw_get_lladdr - Get station id
235 *
236 * @ah: The &struct ath5k_hw
237 * @mac: The card's mac address
238 *
239 * Initialize ah->ah_sta_id using the mac address provided
240 * (just a memcpy).
241 *
242 * TODO: Remove it once we merge ath5k_softc and ath5k_hw
243 */
244void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
245{
246 ATH5K_TRACE(ah->ah_sc);
247 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
248}
249
250/**
251 * ath5k_hw_set_lladdr - Set station id
252 *
253 * @ah: The &struct ath5k_hw
254 * @mac: The card's mac address
255 *
256 * Set station id on hw using the provided mac address
257 */
258int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
259{
260 u32 low_id, high_id;
261
262 ATH5K_TRACE(ah->ah_sc);
263 /* Set new station ID */
264 memcpy(ah->ah_sta_id, mac, ETH_ALEN);
265
266 low_id = AR5K_LOW_ID(mac);
267 high_id = AR5K_HIGH_ID(mac);
268
269 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
270 ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
271
272 return 0;
273}
274
275/**
276 * ath5k_hw_set_associd - Set BSSID for association
277 *
278 * @ah: The &struct ath5k_hw
279 * @bssid: BSSID
280 * @assoc_id: Assoc id
281 *
282 * Sets the BSSID which trigers the "SME Join" operation
283 */
284void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
285{
286 u32 low_id, high_id;
287 u16 tim_offset = 0;
288
289 /*
290 * Set simple BSSID mask on 5212
291 */
292 if (ah->ah_version == AR5K_AR5212) {
293 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
294 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
295 }
296
297 /*
298 * Set BSSID which triggers the "SME Join" operation
299 */
300 low_id = AR5K_LOW_ID(bssid);
301 high_id = AR5K_HIGH_ID(bssid);
302 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
303 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
304 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
305
306 if (assoc_id == 0) {
307 ath5k_hw_disable_pspoll(ah);
308 return;
309 }
310
311 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
312 tim_offset ? tim_offset + 4 : 0);
313
314 ath5k_hw_enable_pspoll(ah, NULL, 0);
315}
316
317/**
318 * ath5k_hw_set_bssid_mask - filter out bssids we listen
319 *
320 * @ah: the &struct ath5k_hw
321 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
322 *
323 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
324 * which bits of the interface's MAC address should be looked at when trying
325 * to decide which packets to ACK. In station mode and AP mode with a single
326 * BSS every bit matters since we lock to only one BSS. In AP mode with
327 * multiple BSSes (virtual interfaces) not every bit matters because hw must
328 * accept frames for all BSSes and so we tweak some bits of our mac address
329 * in order to have multiple BSSes.
330 *
331 * NOTE: This is a simple filter and does *not* filter out all
332 * relevant frames. Some frames that are not for us might get ACKed from us
333 * by PCU because they just match the mask.
334 *
335 * When handling multiple BSSes you can get the BSSID mask by computing the
336 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
337 *
338 * When you do this you are essentially computing the common bits of all your
339 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
340 * the MAC address to obtain the relevant bits and compare the result with
341 * (frame's BSSID & mask) to see if they match.
342 */
343/*
344 * Simple example: on your card you have have two BSSes you have created with
345 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
346 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
347 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
348 *
349 * \
350 * MAC: 0001 |
351 * BSSID-01: 0100 | --> Belongs to us
352 * BSSID-02: 1001 |
353 * /
354 * -------------------
355 * BSSID-03: 0110 | --> External
356 * -------------------
357 *
358 * Our bssid_mask would then be:
359 *
360 * On loop iteration for BSSID-01:
361 * ~(0001 ^ 0100) -> ~(0101)
362 * -> 1010
363 * bssid_mask = 1010
364 *
365 * On loop iteration for BSSID-02:
366 * bssid_mask &= ~(0001 ^ 1001)
367 * bssid_mask = (1010) & ~(0001 ^ 1001)
368 * bssid_mask = (1010) & ~(1001)
369 * bssid_mask = (1010) & (0110)
370 * bssid_mask = 0010
371 *
372 * A bssid_mask of 0010 means "only pay attention to the second least
373 * significant bit". This is because its the only bit common
374 * amongst the MAC and all BSSIDs we support. To findout what the real
375 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
376 * or our MAC address (we assume the hardware uses the MAC address).
377 *
378 * Now, suppose there's an incoming frame for BSSID-03:
379 *
380 * IFRAME-01: 0110
381 *
382 * An easy eye-inspeciton of this already should tell you that this frame
383 * will not pass our check. This is beacuse the bssid_mask tells the
384 * hardware to only look at the second least significant bit and the
385 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
386 * as 1, which does not match 0.
387 *
388 * So with IFRAME-01 we *assume* the hardware will do:
389 *
390 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
391 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
392 * --> allow = (0010) == 0000 ? 1 : 0;
393 * --> allow = 0
394 *
395 * Lets now test a frame that should work:
396 *
397 * IFRAME-02: 0001 (we should allow)
398 *
399 * allow = (0001 & 1010) == 1010
400 *
401 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
402 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
403 * --> allow = (0010) == (0010)
404 * --> allow = 1
405 *
406 * Other examples:
407 *
408 * IFRAME-03: 0100 --> allowed
409 * IFRAME-04: 1001 --> allowed
410 * IFRAME-05: 1101 --> allowed but its not for us!!!
411 *
412 */
413int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
414{
415 u32 low_id, high_id;
416 ATH5K_TRACE(ah->ah_sc);
417
418 if (ah->ah_version == AR5K_AR5212) {
419 low_id = AR5K_LOW_ID(mask);
420 high_id = AR5K_HIGH_ID(mask);
421
422 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
423 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
424
425 return 0;
426 }
427
428 return -EIO;
429}
430
431
432/************\
433* RX Control *
434\************/
435
436/**
437 * ath5k_hw_start_rx_pcu - Start RX engine
438 *
439 * @ah: The &struct ath5k_hw
440 *
441 * Starts RX engine on PCU so that hw can process RXed frames
442 * (ACK etc).
443 *
444 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
445 * TODO: Init ANI here
446 */
447void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
448{
449 ATH5K_TRACE(ah->ah_sc);
450 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
451}
452
453/**
454 * at5k_hw_stop_rx_pcu - Stop RX engine
455 *
456 * @ah: The &struct ath5k_hw
457 *
458 * Stops RX engine on PCU
459 *
460 * TODO: Detach ANI here
461 */
462void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
463{
464 ATH5K_TRACE(ah->ah_sc);
465 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
466}
467
468/*
469 * Set multicast filter
470 */
471void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
472{
473 ATH5K_TRACE(ah->ah_sc);
474 /* Set the multicat filter */
475 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
476 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
477}
478
479/*
480 * Set multicast filter by index
481 */
482int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
483{
484
485 ATH5K_TRACE(ah->ah_sc);
486 if (index >= 64)
487 return -EINVAL;
488 else if (index >= 32)
489 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
490 (1 << (index - 32)));
491 else
492 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
493
494 return 0;
495}
496
497/*
498 * Clear Multicast filter by index
499 */
500int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
501{
502
503 ATH5K_TRACE(ah->ah_sc);
504 if (index >= 64)
505 return -EINVAL;
506 else if (index >= 32)
507 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
508 (1 << (index - 32)));
509 else
510 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
511
512 return 0;
513}
514
515/**
516 * ath5k_hw_get_rx_filter - Get current rx filter
517 *
518 * @ah: The &struct ath5k_hw
519 *
520 * Returns the RX filter by reading rx filter and
521 * phy error filter registers. RX filter is used
522 * to set the allowed frame types that PCU will accept
523 * and pass to the driver. For a list of frame types
524 * check out reg.h.
525 */
526u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
527{
528 u32 data, filter = 0;
529
530 ATH5K_TRACE(ah->ah_sc);
531 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
532
533 /*Radar detection for 5212*/
534 if (ah->ah_version == AR5K_AR5212) {
535 data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
536
537 if (data & AR5K_PHY_ERR_FIL_RADAR)
538 filter |= AR5K_RX_FILTER_RADARERR;
539 if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
540 filter |= AR5K_RX_FILTER_PHYERR;
541 }
542
543 return filter;
544}
545
546/**
547 * ath5k_hw_set_rx_filter - Set rx filter
548 *
549 * @ah: The &struct ath5k_hw
550 * @filter: RX filter mask (see reg.h)
551 *
552 * Sets RX filter register and also handles PHY error filter
553 * register on 5212 and newer chips so that we have proper PHY
554 * error reporting.
555 */
556void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
557{
558 u32 data = 0;
559
560 ATH5K_TRACE(ah->ah_sc);
561
562 /* Set PHY error filter register on 5212*/
563 if (ah->ah_version == AR5K_AR5212) {
564 if (filter & AR5K_RX_FILTER_RADARERR)
565 data |= AR5K_PHY_ERR_FIL_RADAR;
566 if (filter & AR5K_RX_FILTER_PHYERR)
567 data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
568 }
569
570 /*
571 * The AR5210 uses promiscous mode to detect radar activity
572 */
573 if (ah->ah_version == AR5K_AR5210 &&
574 (filter & AR5K_RX_FILTER_RADARERR)) {
575 filter &= ~AR5K_RX_FILTER_RADARERR;
576 filter |= AR5K_RX_FILTER_PROM;
577 }
578
579 /*Zero length DMA*/
580 if (data)
581 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
582 else
583 AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
584
585 /*Write RX Filter register*/
586 ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
587
588 /*Write PHY error filter register on 5212*/
589 if (ah->ah_version == AR5K_AR5212)
590 ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
591
592}
593
594
595/****************\
596* Beacon control *
597\****************/
598
599/**
600 * ath5k_hw_get_tsf32 - Get a 32bit TSF
601 *
602 * @ah: The &struct ath5k_hw
603 *
604 * Returns lower 32 bits of current TSF
605 */
606u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
607{
608 ATH5K_TRACE(ah->ah_sc);
609 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
610}
611
612/**
613 * ath5k_hw_get_tsf64 - Get the full 64bit TSF
614 *
615 * @ah: The &struct ath5k_hw
616 *
617 * Returns the current TSF
618 */
619u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
620{
621 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
622 ATH5K_TRACE(ah->ah_sc);
623
624 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
625}
626
627/**
628 * ath5k_hw_reset_tsf - Force a TSF reset
629 *
630 * @ah: The &struct ath5k_hw
631 *
632 * Forces a TSF reset on PCU
633 */
634void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
635{
636 ATH5K_TRACE(ah->ah_sc);
637 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_RESET_TSF);
638}
639
640/*
641 * Initialize beacon timers
642 */
643void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
644{
645 u32 timer1, timer2, timer3;
646
647 ATH5K_TRACE(ah->ah_sc);
648 /*
649 * Set the additional timers by mode
650 */
651 switch (ah->ah_op_mode) {
652 case NL80211_IFTYPE_STATION:
653 if (ah->ah_version == AR5K_AR5210) {
654 timer1 = 0xffffffff;
655 timer2 = 0xffffffff;
656 } else {
657 timer1 = 0x0000ffff;
658 timer2 = 0x0007ffff;
659 }
660 break;
661
662 default:
663 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
664 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
665 }
666
667 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
668
669 /*
670 * Set the beacon register and enable all timers.
671 * (next beacon, DMA beacon, software beacon, ATIM window time)
672 */
673 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
674 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
675 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
676 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
677
678 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
679 AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
680 AR5K_BEACON);
681}
682
683#if 0
684/*
685 * Set beacon timers
686 */
687int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
688 const struct ath5k_beacon_state *state)
689{
690 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
691
692 /*
693 * TODO: should be changed through *state
694 * review struct ath5k_beacon_state struct
695 *
696 * XXX: These are used for cfp period bellow, are they
697 * ok ? Is it O.K. for tsf here to be 0 or should we use
698 * get_tsf ?
699 */
700 u32 dtim_count = 0; /* XXX */
701 u32 cfp_count = 0; /* XXX */
702 u32 tsf = 0; /* XXX */
703
704 ATH5K_TRACE(ah->ah_sc);
705 /* Return on an invalid beacon state */
706 if (state->bs_interval < 1)
707 return -EINVAL;
708
709 interval = state->bs_interval;
710 dtim = state->bs_dtim_period;
711
712 /*
713 * PCF support?
714 */
715 if (state->bs_cfp_period > 0) {
716 /*
717 * Enable PCF mode and set the CFP
718 * (Contention Free Period) and timer registers
719 */
720 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
721 state->bs_interval;
722 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
723 state->bs_interval;
724
725 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
726 AR5K_STA_ID1_DEFAULT_ANTENNA |
727 AR5K_STA_ID1_PCF);
728 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
729 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
730 AR5K_CFP_DUR);
731 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
732 next_cfp)) << 3, AR5K_TIMER2);
733 } else {
734 /* Disable PCF mode */
735 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
736 AR5K_STA_ID1_DEFAULT_ANTENNA |
737 AR5K_STA_ID1_PCF);
738 }
739
740 /*
741 * Enable the beacon timer register
742 */
743 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
744
745 /*
746 * Start the beacon timers
747 */
748 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
749 ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
750 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
751 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
752 AR5K_BEACON_PERIOD), AR5K_BEACON);
753
754 /*
755 * Write new beacon miss threshold, if it appears to be valid
756 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
757 * and return if its not in range. We can test this by reading value and
758 * setting value to a largest value and seeing which values register.
759 */
760
761 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
762 state->bs_bmiss_threshold);
763
764 /*
765 * Set sleep control register
766 * XXX: Didn't find this in 5210 code but since this register
767 * exists also in ar5k's 5210 headers i leave it as common code.
768 */
769 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
770 (state->bs_sleep_duration - 3) << 3);
771
772 /*
773 * Set enhanced sleep registers on 5212
774 */
775 if (ah->ah_version == AR5K_AR5212) {
776 if (state->bs_sleep_duration > state->bs_interval &&
777 roundup(state->bs_sleep_duration, interval) ==
778 state->bs_sleep_duration)
779 interval = state->bs_sleep_duration;
780
781 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
782 roundup(state->bs_sleep_duration, dtim) ==
783 state->bs_sleep_duration))
784 dtim = state->bs_sleep_duration;
785
786 if (interval > dtim)
787 return -EINVAL;
788
789 next_beacon = interval == dtim ? state->bs_next_dtim :
790 state->bs_next_beacon;
791
792 ath5k_hw_reg_write(ah,
793 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
794 AR5K_SLEEP0_NEXT_DTIM) |
795 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
796 AR5K_SLEEP0_ENH_SLEEP_EN |
797 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
798
799 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
800 AR5K_SLEEP1_NEXT_TIM) |
801 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
802
803 ath5k_hw_reg_write(ah,
804 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
805 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
806 }
807
808 return 0;
809}
810
811/*
812 * Reset beacon timers
813 */
814void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
815{
816 ATH5K_TRACE(ah->ah_sc);
817 /*
818 * Disable beacon timer
819 */
820 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
821
822 /*
823 * Disable some beacon register values
824 */
825 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
826 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
827 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
828}
829
830/*
831 * Wait for beacon queue to finish
832 */
833int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
834{
835 unsigned int i;
836 int ret;
837
838 ATH5K_TRACE(ah->ah_sc);
839
840 /* 5210 doesn't have QCU*/
841 if (ah->ah_version == AR5K_AR5210) {
842 /*
843 * Wait for beaconn queue to finish by checking
844 * Control Register and Beacon Status Register.
845 */
846 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
847 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
848 ||
849 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
850 break;
851 udelay(10);
852 }
853
854 /* Timeout... */
855 if (i <= 0) {
856 /*
857 * Re-schedule the beacon queue
858 */
859 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
860 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
861 AR5K_BCR);
862
863 return -EIO;
864 }
865 ret = 0;
866 } else {
867 /*5211/5212*/
868 ret = ath5k_hw_register_timeout(ah,
869 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
870 AR5K_QCU_STS_FRMPENDCNT, 0, false);
871
872 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
873 return -EIO;
874 }
875
876 return ret;
877}
878#endif
879
880
881/*********************\
882* Key table functions *
883\*********************/
884
885/*
886 * Reset a key entry on the table
887 */
888int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
889{
890 unsigned int i;
891
892 ATH5K_TRACE(ah->ah_sc);
893 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
894
895 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
896 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
897
898 /*
899 * Set NULL encryption on AR5212+
900 *
901 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
902 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
903 *
904 * Note2: Windows driver (ndiswrapper) sets this to
905 * 0x00000714 instead of 0x00000007
906 */
907 if (ah->ah_version > AR5K_AR5211)
908 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
909 AR5K_KEYTABLE_TYPE(entry));
910
911 return 0;
912}
913
914/*
915 * Check if a table entry is valid
916 */
917int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
918{
919 ATH5K_TRACE(ah->ah_sc);
920 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
921
922 /* Check the validation flag at the end of the entry */
923 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
924 AR5K_KEYTABLE_VALID;
925}
926
927/*
928 * Set a key entry on the table
929 */
930int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
931 const struct ieee80211_key_conf *key, const u8 *mac)
932{
933 unsigned int i;
934 __le32 key_v[5] = {};
935 u32 keytype;
936
937 ATH5K_TRACE(ah->ah_sc);
938
939 /* key->keylen comes in from mac80211 in bytes */
940
941 if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
942 return -EOPNOTSUPP;
943
944 switch (key->keylen) {
945 /* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit */
946 case 40 / 8:
947 memcpy(&key_v[0], key->key, 5);
948 keytype = AR5K_KEYTABLE_TYPE_40;
949 break;
950
951 /* WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit */
952 case 104 / 8:
953 memcpy(&key_v[0], &key->key[0], 6);
954 memcpy(&key_v[2], &key->key[6], 6);
955 memcpy(&key_v[4], &key->key[12], 1);
956 keytype = AR5K_KEYTABLE_TYPE_104;
957 break;
958 /* WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit */
959 case 128 / 8:
960 memcpy(&key_v[0], &key->key[0], 6);
961 memcpy(&key_v[2], &key->key[6], 6);
962 memcpy(&key_v[4], &key->key[12], 4);
963 keytype = AR5K_KEYTABLE_TYPE_128;
964 break;
965
966 default:
967 return -EINVAL; /* shouldn't happen */
968 }
969
970 for (i = 0; i < ARRAY_SIZE(key_v); i++)
971 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
972 AR5K_KEYTABLE_OFF(entry, i));
973
974 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
975
976 return ath5k_hw_set_key_lladdr(ah, entry, mac);
977}
978
979int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
980{
981 u32 low_id, high_id;
982
983 ATH5K_TRACE(ah->ah_sc);
984 /* Invalid entry (key table overflow) */
985 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
986
987 /* MAC may be NULL if it's a broadcast key. In this case no need to
988 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
989 if (unlikely(mac == NULL)) {
990 low_id = 0xffffffff;
991 high_id = 0xffff | AR5K_KEYTABLE_VALID;
992 } else {
993 low_id = AR5K_LOW_ID(mac);
994 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
995 }
996
997 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
998 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
999
1000 return 0;
1001}
1002
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index fa0d47faf574..1ea8ed962d26 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * PHY functions 2 * PHY functions
3 * 3 *
4 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 9 * purpose with or without fee is hereby granted, provided that the above
@@ -19,6 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#define _ATH5K_PHY
23
22#include <linux/delay.h> 24#include <linux/delay.h>
23 25
24#include "ath5k.h" 26#include "ath5k.h"
@@ -2501,3 +2503,5 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power)
2501 2503
2502 return ath5k_hw_txpower(ah, channel, power); 2504 return ath5k_hw_txpower(ah, channel, power);
2503} 2505}
2506
2507#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath5k/qcu.c
new file mode 100644
index 000000000000..2e20f7816ca7
--- /dev/null
+++ b/drivers/net/wireless/ath5k/qcu.c
@@ -0,0 +1,488 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/********************************************\
20Queue Control Unit, DFS Control Unit Functions
21\********************************************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Get properties for a transmit queue
30 */
31int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
32 struct ath5k_txq_info *queue_info)
33{
34 ATH5K_TRACE(ah->ah_sc);
35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
36 return 0;
37}
38
39/*
40 * Set properties for a transmit queue
41 */
42int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
43 const struct ath5k_txq_info *queue_info)
44{
45 ATH5K_TRACE(ah->ah_sc);
46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
47
48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
49 return -EIO;
50
51 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
52
53 /*XXX: Is this supported on 5210 ?*/
54 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
55 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
56 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
57 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
58 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
59
60 return 0;
61}
62
63/*
64 * Initialize a transmit queue
65 */
66int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
67 struct ath5k_txq_info *queue_info)
68{
69 unsigned int queue;
70 int ret;
71
72 ATH5K_TRACE(ah->ah_sc);
73
74 /*
75 * Get queue by type
76 */
77 /*5210 only has 2 queues*/
78 if (ah->ah_version == AR5K_AR5210) {
79 switch (queue_type) {
80 case AR5K_TX_QUEUE_DATA:
81 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
82 break;
83 case AR5K_TX_QUEUE_BEACON:
84 case AR5K_TX_QUEUE_CAB:
85 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
86 break;
87 default:
88 return -EINVAL;
89 }
90 } else {
91 switch (queue_type) {
92 case AR5K_TX_QUEUE_DATA:
93 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
94 ah->ah_txq[queue].tqi_type !=
95 AR5K_TX_QUEUE_INACTIVE; queue++) {
96
97 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
98 return -EINVAL;
99 }
100 break;
101 case AR5K_TX_QUEUE_UAPSD:
102 queue = AR5K_TX_QUEUE_ID_UAPSD;
103 break;
104 case AR5K_TX_QUEUE_BEACON:
105 queue = AR5K_TX_QUEUE_ID_BEACON;
106 break;
107 case AR5K_TX_QUEUE_CAB:
108 queue = AR5K_TX_QUEUE_ID_CAB;
109 break;
110 case AR5K_TX_QUEUE_XR_DATA:
111 if (ah->ah_version != AR5K_AR5212)
112 ATH5K_ERR(ah->ah_sc,
113 "XR data queues only supported in"
114 " 5212!\n");
115 queue = AR5K_TX_QUEUE_ID_XR_DATA;
116 break;
117 default:
118 return -EINVAL;
119 }
120 }
121
122 /*
123 * Setup internal queue structure
124 */
125 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
126 ah->ah_txq[queue].tqi_type = queue_type;
127
128 if (queue_info != NULL) {
129 queue_info->tqi_type = queue_type;
130 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
131 if (ret)
132 return ret;
133 }
134
135 /*
136 * We use ah_txq_status to hold a temp value for
137 * the Secondary interrupt mask registers on 5211+
138 * check out ath5k_hw_reset_tx_queue
139 */
140 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
141
142 return queue;
143}
144
145/*
146 * Get number of pending frames
147 * for a specific queue [5211+]
148 */
149u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
150{
151 ATH5K_TRACE(ah->ah_sc);
152 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
153
154 /* Return if queue is declared inactive */
155 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
156 return false;
157
158 /* XXX: How about AR5K_CFG_TXCNT ? */
159 if (ah->ah_version == AR5K_AR5210)
160 return false;
161
162 return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
163}
164
165/*
166 * Set a transmit queue inactive
167 */
168void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
169{
170 ATH5K_TRACE(ah->ah_sc);
171 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
172 return;
173
174 /* This queue will be skipped in further operations */
175 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
176 /*For SIMR setup*/
177 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
178}
179
180/*
181 * Set DFS properties for a transmit queue on DCU
182 */
183int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
184{
185 u32 cw_min, cw_max, retry_lg, retry_sh;
186 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
187
188 ATH5K_TRACE(ah->ah_sc);
189 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
190
191 tq = &ah->ah_txq[queue];
192
193 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
194 return 0;
195
196 if (ah->ah_version == AR5K_AR5210) {
197 /* Only handle data queues, others will be ignored */
198 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
199 return 0;
200
201 /* Set Slot time */
202 ath5k_hw_reg_write(ah, ah->ah_turbo ?
203 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
204 AR5K_SLOT_TIME);
205 /* Set ACK_CTS timeout */
206 ath5k_hw_reg_write(ah, ah->ah_turbo ?
207 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
208 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
209 /* Set Transmit Latency */
210 ath5k_hw_reg_write(ah, ah->ah_turbo ?
211 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
212 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
213
214 /* Set IFS0 */
215 if (ah->ah_turbo) {
216 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
217 (ah->ah_aifs + tq->tqi_aifs) *
218 AR5K_INIT_SLOT_TIME_TURBO) <<
219 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
220 AR5K_IFS0);
221 } else {
222 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
223 (ah->ah_aifs + tq->tqi_aifs) *
224 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
225 AR5K_INIT_SIFS, AR5K_IFS0);
226 }
227
228 /* Set IFS1 */
229 ath5k_hw_reg_write(ah, ah->ah_turbo ?
230 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
231 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
232 /* Set AR5K_PHY_SETTLING */
233 ath5k_hw_reg_write(ah, ah->ah_turbo ?
234 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
235 | 0x38 :
236 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
237 | 0x1C,
238 AR5K_PHY_SETTLING);
239 /* Set Frame Control Register */
240 ath5k_hw_reg_write(ah, ah->ah_turbo ?
241 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
242 AR5K_PHY_TURBO_SHORT | 0x2020) :
243 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
244 AR5K_PHY_FRAME_CTL_5210);
245 }
246
247 /*
248 * Calculate cwmin/max by channel mode
249 */
250 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
251 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
252 ah->ah_aifs = AR5K_TUNE_AIFS;
253 /*XR is only supported on 5212*/
254 if (IS_CHAN_XR(ah->ah_current_channel) &&
255 ah->ah_version == AR5K_AR5212) {
256 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
257 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
258 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
259 /*B mode is not supported on 5210*/
260 } else if (IS_CHAN_B(ah->ah_current_channel) &&
261 ah->ah_version != AR5K_AR5210) {
262 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
263 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
264 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
265 }
266
267 cw_min = 1;
268 while (cw_min < ah->ah_cw_min)
269 cw_min = (cw_min << 1) | 1;
270
271 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
272 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
273 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
274 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
275
276 /*
277 * Calculate and set retry limits
278 */
279 if (ah->ah_software_retry) {
280 /* XXX Need to test this */
281 retry_lg = ah->ah_limit_tx_retries;
282 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
283 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
284 } else {
285 retry_lg = AR5K_INIT_LG_RETRY;
286 retry_sh = AR5K_INIT_SH_RETRY;
287 }
288
289 /*No QCU/DCU [5210]*/
290 if (ah->ah_version == AR5K_AR5210) {
291 ath5k_hw_reg_write(ah,
292 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
293 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
294 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
295 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
296 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
297 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
298 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
299 AR5K_NODCU_RETRY_LMT);
300 } else {
301 /*QCU/DCU [5211+]*/
302 ath5k_hw_reg_write(ah,
303 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
304 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
305 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
306 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
307 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
308 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
309 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
310
311 /*===Rest is also for QCU/DCU only [5211+]===*/
312
313 /*
314 * Set initial content window (cw_min/cw_max)
315 * and arbitrated interframe space (aifs)...
316 */
317 ath5k_hw_reg_write(ah,
318 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
319 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
320 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
321 AR5K_DCU_LCL_IFS_AIFS),
322 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
323
324 /*
325 * Set misc registers
326 */
327 ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
328 AR5K_QUEUE_MISC(queue));
329
330 if (tq->tqi_cbr_period) {
331 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
332 AR5K_QCU_CBRCFG_INTVAL) |
333 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
334 AR5K_QCU_CBRCFG_ORN_THRES),
335 AR5K_QUEUE_CBRCFG(queue));
336 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
337 AR5K_QCU_MISC_FRSHED_CBR);
338 if (tq->tqi_cbr_overflow_limit)
339 AR5K_REG_ENABLE_BITS(ah,
340 AR5K_QUEUE_MISC(queue),
341 AR5K_QCU_MISC_CBR_THRES_ENABLE);
342 }
343
344 if (tq->tqi_ready_time)
345 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
346 AR5K_QCU_RDYTIMECFG_INTVAL) |
347 AR5K_QCU_RDYTIMECFG_ENABLE,
348 AR5K_QUEUE_RDYTIMECFG(queue));
349
350 if (tq->tqi_burst_time) {
351 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
352 AR5K_DCU_CHAN_TIME_DUR) |
353 AR5K_DCU_CHAN_TIME_ENABLE,
354 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
355
356 if (tq->tqi_flags
357 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
358 AR5K_REG_ENABLE_BITS(ah,
359 AR5K_QUEUE_MISC(queue),
360 AR5K_QCU_MISC_RDY_VEOL_POLICY);
361 }
362
363 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
364 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
365 AR5K_QUEUE_DFS_MISC(queue));
366
367 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
368 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
369 AR5K_QUEUE_DFS_MISC(queue));
370
371 /*
372 * Set registers by queue type
373 */
374 switch (tq->tqi_type) {
375 case AR5K_TX_QUEUE_BEACON:
376 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
377 AR5K_QCU_MISC_FRSHED_DBA_GT |
378 AR5K_QCU_MISC_CBREXP_BCN |
379 AR5K_QCU_MISC_BCN_ENABLE);
380
381 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
382 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
383 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
384 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
385 AR5K_DCU_MISC_BCN_ENABLE);
386
387 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
388 (AR5K_TUNE_SW_BEACON_RESP -
389 AR5K_TUNE_DMA_BEACON_RESP) -
390 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
391 AR5K_QCU_RDYTIMECFG_ENABLE,
392 AR5K_QUEUE_RDYTIMECFG(queue));
393 break;
394
395 case AR5K_TX_QUEUE_CAB:
396 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
397 AR5K_QCU_MISC_FRSHED_DBA_GT |
398 AR5K_QCU_MISC_CBREXP |
399 AR5K_QCU_MISC_CBREXP_BCN);
400
401 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
402 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
403 AR5K_DCU_MISC_ARBLOCK_CTL_S));
404 break;
405
406 case AR5K_TX_QUEUE_UAPSD:
407 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
408 AR5K_QCU_MISC_CBREXP);
409 break;
410
411 case AR5K_TX_QUEUE_DATA:
412 default:
413 break;
414 }
415
416 /*
417 * Enable interrupts for this tx queue
418 * in the secondary interrupt mask registers
419 */
420 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
421 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
422
423 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
424 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
425
426 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
427 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
428
429 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
430 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
431
432 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
434
435
436 /* Update secondary interrupt mask registers */
437 ah->ah_txq_imr_txok &= ah->ah_txq_status;
438 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
439 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
440 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
441 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
442
443 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
444 AR5K_SIMR0_QCU_TXOK) |
445 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
446 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
447 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
448 AR5K_SIMR1_QCU_TXERR) |
449 AR5K_REG_SM(ah->ah_txq_imr_txeol,
450 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
451 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
452 AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
453 }
454
455 return 0;
456}
457
458/*
459 * Get slot time from DCU
460 */
461unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
462{
463 ATH5K_TRACE(ah->ah_sc);
464 if (ah->ah_version == AR5K_AR5210)
465 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
466 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
467 else
468 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
469}
470
471/*
472 * Set slot time on DCU
473 */
474int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
475{
476 ATH5K_TRACE(ah->ah_sc);
477 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
478 return -EINVAL;
479
480 if (ah->ah_version == AR5K_AR5210)
481 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
482 ah->ah_turbo), AR5K_SLOT_TIME);
483 else
484 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
485
486 return 0;
487}
488
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 7562bf173d3e..a98832364448 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2007 Nick Kossifidis <mickflemm@gmail.com> 2 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
3 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
4 * Copyright (c) 2007 Michael Taylor <mike.taylor@apprion.com> 4 * Copyright (c) 2007-2008 Michael Taylor <mike.taylor@apprion.com>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
@@ -977,98 +977,6 @@
977#define AR5K_EEPROM_BASE 0x6000 977#define AR5K_EEPROM_BASE 0x6000
978 978
979/* 979/*
980 * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
981 */
982#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
983#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
984#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
985#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
986#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
987
988#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
989#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
990#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
991#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
992#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
993#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
994#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
995#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
996#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
997#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
998#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
999#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
1000#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
1001#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
1002#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
1003#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
1004#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
1005#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
1006#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
1007#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
1008#define AR5K_EEPROM_INFO_CKSUM 0xffff
1009#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
1010
1011#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
1012#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
1013#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
1014#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
1015#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
1016#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
1017#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
1018#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
1019#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
1020#define AR5K_EEPROM_VERSION_4_3 0x4003
1021#define AR5K_EEPROM_VERSION_4_4 0x4004
1022#define AR5K_EEPROM_VERSION_4_5 0x4005
1023#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
1024#define AR5K_EEPROM_VERSION_4_7 0x4007
1025
1026#define AR5K_EEPROM_MODE_11A 0
1027#define AR5K_EEPROM_MODE_11B 1
1028#define AR5K_EEPROM_MODE_11G 2
1029
1030#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
1031#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
1032#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
1033#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
1034#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
1035#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
1036#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
1037#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz (?) */
1038#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
1039
1040#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
1041#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
1042#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
1043#define AR5K_EEPROM_RFKILL_POLARITY_S 1
1044
1045/* Newer EEPROMs are using a different offset */
1046#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
1047 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
1048
1049#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
1050#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((int8_t)(((_v) >> 8) & 0xff))
1051#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((int8_t)((_v) & 0xff))
1052
1053/* calibration settings */
1054#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
1055#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
1056#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
1057#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
1058
1059/* [3.1 - 3.3] */
1060#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
1061#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
1062
1063/* Misc values available since EEPROM 4.0 */
1064#define AR5K_EEPROM_MISC0 0x00c4
1065#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
1066#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
1067#define AR5K_EEPROM_MISC1 0x00c5
1068#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
1069#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
1070
1071/*
1072 * EEPROM data register 980 * EEPROM data register
1073 */ 981 */
1074#define AR5K_EEPROM_DATA_5211 0x6004 982#define AR5K_EEPROM_DATA_5211 0x6004
@@ -1950,13 +1858,13 @@
1950#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */ 1858#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
1951 1859
1952/* 1860/*
1953 * Desired size register 1861 * Desired ADC/PGA size register
1954 * (for more infos read ANI patent) 1862 * (for more infos read ANI patent)
1955 */ 1863 */
1956#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */ 1864#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
1957#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */ 1865#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */
1958#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */ 1866#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */
1959#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size (?) */ 1867#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size */
1960 1868
1961/* 1869/*
1962 * PHY signal register 1870 * PHY signal register
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
new file mode 100644
index 000000000000..f5c3de890cdb
--- /dev/null
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -0,0 +1,925 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
5 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 *
20 */
21
22#define _ATH5K_RESET
23
24/*****************************\
25 Reset functions and helpers
26\*****************************/
27
28#include <linux/pci.h>
29#include "ath5k.h"
30#include "reg.h"
31#include "base.h"
32#include "debug.h"
33
34/**
35 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
36 *
37 * @ah: the &struct ath5k_hw
38 * @channel: the currently set channel upon reset
39 *
40 * Write the OFDM timings for the AR5212 upon reset. This is a helper for
41 * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
42 * depending on the bandwidth of the channel.
43 *
44 */
45static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
46 struct ieee80211_channel *channel)
47{
48 /* Get exponent and mantissa and set it */
49 u32 coef_scaled, coef_exp, coef_man,
50 ds_coef_exp, ds_coef_man, clock;
51
52 if (!(ah->ah_version == AR5K_AR5212) ||
53 !(channel->hw_value & CHANNEL_OFDM))
54 BUG();
55
56 /* Seems there are two PLLs, one for baseband sampling and one
57 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
58 * turbo. */
59 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
60 coef_scaled = ((5 * (clock << 24)) / 2) /
61 channel->center_freq;
62
63 for (coef_exp = 31; coef_exp > 0; coef_exp--)
64 if ((coef_scaled >> coef_exp) & 0x1)
65 break;
66
67 if (!coef_exp)
68 return -EINVAL;
69
70 coef_exp = 14 - (coef_exp - 24);
71 coef_man = coef_scaled +
72 (1 << (24 - coef_exp - 1));
73 ds_coef_man = coef_man >> (24 - coef_exp);
74 ds_coef_exp = coef_exp - 16;
75
76 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
77 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
78 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
79 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
80
81 return 0;
82}
83
84
85/*
86 * index into rates for control rates, we can set it up like this because
87 * this is only used for AR5212 and we know it supports G mode
88 */
89static int control_rates[] =
90 { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
91
92/**
93 * ath5k_hw_write_rate_duration - set rate duration during hw resets
94 *
95 * @ah: the &struct ath5k_hw
96 * @mode: one of enum ath5k_driver_mode
97 *
98 * Write the rate duration table upon hw reset. This is a helper for
99 * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout for
100 * the hardware for the current mode for each rate. The rates which are capable
101 * of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have another
102 * register for the short preamble ACK timeout calculation.
103 */
104static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
105 unsigned int mode)
106{
107 struct ath5k_softc *sc = ah->ah_sc;
108 struct ieee80211_rate *rate;
109 unsigned int i;
110
111 /* Write rate duration table */
112 for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) {
113 u32 reg;
114 u16 tx_time;
115
116 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]];
117
118 /* Set ACK timeout */
119 reg = AR5K_RATE_DUR(rate->hw_value);
120
121 /* An ACK frame consists of 10 bytes. If you add the FCS,
122 * which ieee80211_generic_frame_duration() adds,
123 * its 14 bytes. Note we use the control rate and not the
124 * actual rate for this rate. See mac80211 tx.c
125 * ieee80211_duration() for a brief description of
126 * what rate we should choose to TX ACKs. */
127 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
128 sc->vif, 10, rate));
129
130 ath5k_hw_reg_write(ah, tx_time, reg);
131
132 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
133 continue;
134
135 /*
136 * We're not distinguishing short preamble here,
137 * This is true, all we'll get is a longer value here
138 * which is not necessarilly bad. We could use
139 * export ieee80211_frame_duration() but that needs to be
140 * fixed first to be properly used by mac802111 drivers:
141 *
142 * - remove erp stuff and let the routine figure ofdm
143 * erp rates
144 * - remove passing argument ieee80211_local as
145 * drivers don't have access to it
146 * - move drivers using ieee80211_generic_frame_duration()
147 * to this
148 */
149 ath5k_hw_reg_write(ah, tx_time,
150 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
151 }
152}
153
154/*
155 * Reset chipset
156 */
157static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
158{
159 int ret;
160 u32 mask = val ? val : ~0U;
161
162 ATH5K_TRACE(ah->ah_sc);
163
164 /* Read-and-clear RX Descriptor Pointer*/
165 ath5k_hw_reg_read(ah, AR5K_RXDP);
166
167 /*
168 * Reset the device and wait until success
169 */
170 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
171
172 /* Wait at least 128 PCI clocks */
173 udelay(15);
174
175 if (ah->ah_version == AR5K_AR5210) {
176 val &= AR5K_RESET_CTL_CHIP;
177 mask &= AR5K_RESET_CTL_CHIP;
178 } else {
179 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
180 mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
181 }
182
183 ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
184
185 /*
186 * Reset configuration register (for hw byte-swap). Note that this
187 * is only set for big endian. We do the necessary magic in
188 * AR5K_INIT_CFG.
189 */
190 if ((val & AR5K_RESET_CTL_PCU) == 0)
191 ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
192
193 return ret;
194}
195
196/*
197 * Sleep control
198 */
199int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
200 bool set_chip, u16 sleep_duration)
201{
202 unsigned int i;
203 u32 staid, data;
204
205 ATH5K_TRACE(ah->ah_sc);
206 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
207
208 switch (mode) {
209 case AR5K_PM_AUTO:
210 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
211 /* fallthrough */
212 case AR5K_PM_NETWORK_SLEEP:
213 if (set_chip)
214 ath5k_hw_reg_write(ah,
215 AR5K_SLEEP_CTL_SLE_ALLOW |
216 sleep_duration,
217 AR5K_SLEEP_CTL);
218
219 staid |= AR5K_STA_ID1_PWR_SV;
220 break;
221
222 case AR5K_PM_FULL_SLEEP:
223 if (set_chip)
224 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
225 AR5K_SLEEP_CTL);
226
227 staid |= AR5K_STA_ID1_PWR_SV;
228 break;
229
230 case AR5K_PM_AWAKE:
231
232 staid &= ~AR5K_STA_ID1_PWR_SV;
233
234 if (!set_chip)
235 goto commit;
236
237 /* Preserve sleep duration */
238 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
239 if (data & 0xffc00000)
240 data = 0;
241 else
242 data = data & 0xfffcffff;
243
244 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
245 udelay(15);
246
247 for (i = 50; i > 0; i--) {
248 /* Check if the chip did wake up */
249 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
250 AR5K_PCICFG_SPWR_DN) == 0)
251 break;
252
253 /* Wait a bit and retry */
254 udelay(200);
255 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
256 }
257
258 /* Fail if the chip didn't wake up */
259 if (i <= 0)
260 return -EIO;
261
262 break;
263
264 default:
265 return -EINVAL;
266 }
267
268commit:
269 ah->ah_power_mode = mode;
270 ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
271
272 return 0;
273}
274
275/*
276 * Bring up MAC + PHY Chips
277 */
278int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
279{
280 struct pci_dev *pdev = ah->ah_sc->pdev;
281 u32 turbo, mode, clock, bus_flags;
282 int ret;
283
284 turbo = 0;
285 mode = 0;
286 clock = 0;
287
288 ATH5K_TRACE(ah->ah_sc);
289
290 /* Wakeup the device */
291 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
292 if (ret) {
293 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
294 return ret;
295 }
296
297 if (ah->ah_version != AR5K_AR5210) {
298 /*
299 * Get channel mode flags
300 */
301
302 if (ah->ah_radio >= AR5K_RF5112) {
303 mode = AR5K_PHY_MODE_RAD_RF5112;
304 clock = AR5K_PHY_PLL_RF5112;
305 } else {
306 mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
307 clock = AR5K_PHY_PLL_RF5111; /*Zero*/
308 }
309
310 if (flags & CHANNEL_2GHZ) {
311 mode |= AR5K_PHY_MODE_FREQ_2GHZ;
312 clock |= AR5K_PHY_PLL_44MHZ;
313
314 if (flags & CHANNEL_CCK) {
315 mode |= AR5K_PHY_MODE_MOD_CCK;
316 } else if (flags & CHANNEL_OFDM) {
317 /* XXX Dynamic OFDM/CCK is not supported by the
318 * AR5211 so we set MOD_OFDM for plain g (no
319 * CCK headers) operation. We need to test
320 * this, 5211 might support ofdm-only g after
321 * all, there are also initial register values
322 * in the code for g mode (see initvals.c). */
323 if (ah->ah_version == AR5K_AR5211)
324 mode |= AR5K_PHY_MODE_MOD_OFDM;
325 else
326 mode |= AR5K_PHY_MODE_MOD_DYN;
327 } else {
328 ATH5K_ERR(ah->ah_sc,
329 "invalid radio modulation mode\n");
330 return -EINVAL;
331 }
332 } else if (flags & CHANNEL_5GHZ) {
333 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
334 clock |= AR5K_PHY_PLL_40MHZ;
335
336 if (flags & CHANNEL_OFDM)
337 mode |= AR5K_PHY_MODE_MOD_OFDM;
338 else {
339 ATH5K_ERR(ah->ah_sc,
340 "invalid radio modulation mode\n");
341 return -EINVAL;
342 }
343 } else {
344 ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
345 return -EINVAL;
346 }
347
348 if (flags & CHANNEL_TURBO)
349 turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
350 } else { /* Reset the device */
351
352 /* ...enable Atheros turbo mode if requested */
353 if (flags & CHANNEL_TURBO)
354 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
355 AR5K_PHY_TURBO);
356 }
357
358 /* reseting PCI on PCI-E cards results card to hang
359 * and always return 0xffff... so we ingore that flag
360 * for PCI-E cards */
361 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
362
363 /* Reset chipset */
364 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
365 AR5K_RESET_CTL_BASEBAND | bus_flags);
366 if (ret) {
367 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
368 return -EIO;
369 }
370
371 if (ah->ah_version == AR5K_AR5210)
372 udelay(2300);
373
374 /* ...wakeup again!*/
375 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
376 if (ret) {
377 ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
378 return ret;
379 }
380
381 /* ...final warm reset */
382 if (ath5k_hw_nic_reset(ah, 0)) {
383 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
384 return -EIO;
385 }
386
387 if (ah->ah_version != AR5K_AR5210) {
388 /* ...set the PHY operating mode */
389 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
390 udelay(300);
391
392 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
393 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
394 }
395
396 return 0;
397}
398
399/*
400 * Main reset function
401 */
402int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
403 struct ieee80211_channel *channel, bool change_channel)
404{
405 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
406 struct pci_dev *pdev = ah->ah_sc->pdev;
407 u32 data, s_seq, s_ant, s_led[3], dma_size;
408 unsigned int i, mode, freq, ee_mode, ant[2];
409 int ret;
410
411 ATH5K_TRACE(ah->ah_sc);
412
413 s_seq = 0;
414 s_ant = 0;
415 ee_mode = 0;
416 freq = 0;
417 mode = 0;
418
419 /*
420 * Save some registers before a reset
421 */
422 /*DCU/Antenna selection not available on 5210*/
423 if (ah->ah_version != AR5K_AR5210) {
424 if (change_channel) {
425 /* Seq number for queue 0 -do this for all queues ? */
426 s_seq = ath5k_hw_reg_read(ah,
427 AR5K_QUEUE_DFS_SEQNUM(0));
428 /*Default antenna*/
429 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
430 }
431 }
432
433 /*GPIOs*/
434 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
435 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
436 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
437
438 if (change_channel && ah->ah_rf_banks != NULL)
439 ath5k_hw_get_rf_gain(ah);
440
441
442 /*Wakeup the device*/
443 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
444 if (ret)
445 return ret;
446
447 /*
448 * Initialize operating mode
449 */
450 ah->ah_op_mode = op_mode;
451
452 /*
453 * 5111/5112 Settings
454 * 5210 only comes with RF5110
455 */
456 if (ah->ah_version != AR5K_AR5210) {
457 if (ah->ah_radio != AR5K_RF5111 &&
458 ah->ah_radio != AR5K_RF5112 &&
459 ah->ah_radio != AR5K_RF5413 &&
460 ah->ah_radio != AR5K_RF2413 &&
461 ah->ah_radio != AR5K_RF2425) {
462 ATH5K_ERR(ah->ah_sc,
463 "invalid phy radio: %u\n", ah->ah_radio);
464 return -EINVAL;
465 }
466
467 switch (channel->hw_value & CHANNEL_MODES) {
468 case CHANNEL_A:
469 mode = AR5K_MODE_11A;
470 freq = AR5K_INI_RFGAIN_5GHZ;
471 ee_mode = AR5K_EEPROM_MODE_11A;
472 break;
473 case CHANNEL_G:
474 mode = AR5K_MODE_11G;
475 freq = AR5K_INI_RFGAIN_2GHZ;
476 ee_mode = AR5K_EEPROM_MODE_11G;
477 break;
478 case CHANNEL_B:
479 mode = AR5K_MODE_11B;
480 freq = AR5K_INI_RFGAIN_2GHZ;
481 ee_mode = AR5K_EEPROM_MODE_11B;
482 break;
483 case CHANNEL_T:
484 mode = AR5K_MODE_11A_TURBO;
485 freq = AR5K_INI_RFGAIN_5GHZ;
486 ee_mode = AR5K_EEPROM_MODE_11A;
487 break;
488 /*Is this ok on 5211 too ?*/
489 case CHANNEL_TG:
490 mode = AR5K_MODE_11G_TURBO;
491 freq = AR5K_INI_RFGAIN_2GHZ;
492 ee_mode = AR5K_EEPROM_MODE_11G;
493 break;
494 case CHANNEL_XR:
495 if (ah->ah_version == AR5K_AR5211) {
496 ATH5K_ERR(ah->ah_sc,
497 "XR mode not available on 5211");
498 return -EINVAL;
499 }
500 mode = AR5K_MODE_XR;
501 freq = AR5K_INI_RFGAIN_5GHZ;
502 ee_mode = AR5K_EEPROM_MODE_11A;
503 break;
504 default:
505 ATH5K_ERR(ah->ah_sc,
506 "invalid channel: %d\n", channel->center_freq);
507 return -EINVAL;
508 }
509
510 /* PHY access enable */
511 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
512
513 }
514
515 ret = ath5k_hw_write_initvals(ah, mode, change_channel);
516 if (ret)
517 return ret;
518
519 /*
520 * 5211/5212 Specific
521 */
522 if (ah->ah_version != AR5K_AR5210) {
523 /*
524 * Write initial RF gain settings
525 * This should work for both 5111/5112
526 */
527 ret = ath5k_hw_rfgain(ah, freq);
528 if (ret)
529 return ret;
530
531 mdelay(1);
532
533 /*
534 * Write some more initial register settings
535 */
536 if (ah->ah_version == AR5K_AR5212) {
537 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
538
539 if (channel->hw_value == CHANNEL_G)
540 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
541 ath5k_hw_reg_write(ah, 0x00f80d80,
542 0x994c);
543 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
544 ath5k_hw_reg_write(ah, 0x00380140,
545 0x994c);
546 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
547 ath5k_hw_reg_write(ah, 0x00fc0ec0,
548 0x994c);
549 else /* 2425 */
550 ath5k_hw_reg_write(ah, 0x00fc0fc0,
551 0x994c);
552 else
553 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
554
555 /* Some bits are disabled here, we know nothing about
556 * register 0xa228 yet, most of the times this ends up
557 * with a value 0x9b5 -haven't seen any dump with
558 * a different value- */
559 /* Got this from decompiling binary HAL */
560 data = ath5k_hw_reg_read(ah, 0xa228);
561 data &= 0xfffffdff;
562 ath5k_hw_reg_write(ah, data, 0xa228);
563
564 data = ath5k_hw_reg_read(ah, 0xa228);
565 data &= 0xfffe03ff;
566 ath5k_hw_reg_write(ah, data, 0xa228);
567 data = 0;
568
569 /* Just write 0x9b5 ? */
570 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
571 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
572 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
573 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
574 }
575
576 /* Fix for first revision of the RF5112 RF chipset */
577 if (ah->ah_radio >= AR5K_RF5112 &&
578 ah->ah_radio_5ghz_revision <
579 AR5K_SREV_RAD_5112A) {
580 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
581 AR5K_PHY_CCKTXCTL);
582 if (channel->hw_value & CHANNEL_5GHZ)
583 data = 0xffb81020;
584 else
585 data = 0xffb80d20;
586 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
587 data = 0;
588 }
589
590 /*
591 * Set TX power (FIXME)
592 */
593 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
594 if (ret)
595 return ret;
596
597 /* Write rate duration table only on AR5212 and if
598 * virtual interface has already been brought up
599 * XXX: rethink this after new mode changes to
600 * mac80211 are integrated */
601 if (ah->ah_version == AR5K_AR5212 &&
602 ah->ah_sc->vif != NULL)
603 ath5k_hw_write_rate_duration(ah, mode);
604
605 /*
606 * Write RF registers
607 */
608 ret = ath5k_hw_rfregs(ah, channel, mode);
609 if (ret)
610 return ret;
611
612 /*
613 * Configure additional registers
614 */
615
616 /* Write OFDM timings on 5212*/
617 if (ah->ah_version == AR5K_AR5212 &&
618 channel->hw_value & CHANNEL_OFDM) {
619 ret = ath5k_hw_write_ofdm_timings(ah, channel);
620 if (ret)
621 return ret;
622 }
623
624 /*Enable/disable 802.11b mode on 5111
625 (enable 2111 frequency converter + CCK)*/
626 if (ah->ah_radio == AR5K_RF5111) {
627 if (mode == AR5K_MODE_11B)
628 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
629 AR5K_TXCFG_B_MODE);
630 else
631 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
632 AR5K_TXCFG_B_MODE);
633 }
634
635 /*
636 * Set channel and calibrate the PHY
637 */
638 ret = ath5k_hw_channel(ah, channel);
639 if (ret)
640 return ret;
641
642 /* Set antenna mode */
643 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
644 ah->ah_antenna[ee_mode][0], 0xfffffc06);
645
646 /*
647 * In case a fixed antenna was set as default
648 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
649 * registers.
650 */
651 if (s_ant != 0) {
652 if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
653 ant[0] = ant[1] = AR5K_ANT_FIXED_A;
654 else /* 2 - Aux */
655 ant[0] = ant[1] = AR5K_ANT_FIXED_B;
656 } else {
657 ant[0] = AR5K_ANT_FIXED_A;
658 ant[1] = AR5K_ANT_FIXED_B;
659 }
660
661 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
662 AR5K_PHY_ANT_SWITCH_TABLE_0);
663 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
664 AR5K_PHY_ANT_SWITCH_TABLE_1);
665
666 /* Commit values from EEPROM */
667 if (ah->ah_radio == AR5K_RF5111)
668 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
669 AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
670
671 ath5k_hw_reg_write(ah,
672 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
673 AR5K_PHY_NFTHRES);
674
675 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
676 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
677 0xffffc07f);
678 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
679 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
680 0xfffc0fff);
681 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
682 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
683 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
684 0xffff0000);
685
686 ath5k_hw_reg_write(ah,
687 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
688 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
689 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
690 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
691
692 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
693 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
694 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
695 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
696 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
697
698 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
699 AR5K_PHY_IQ_CORR_ENABLE |
700 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
701 ee->ee_q_cal[ee_mode]);
702
703 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
704 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
705 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
706 ee->ee_margin_tx_rx[ee_mode]);
707
708 } else {
709 mdelay(1);
710 /* Disable phy and wait */
711 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
712 mdelay(1);
713 }
714
715 /*
716 * Restore saved values
717 */
718 /*DCU/Antenna selection not available on 5210*/
719 if (ah->ah_version != AR5K_AR5210) {
720 ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
721 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
722 }
723 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
724 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
725 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
726
727 /*
728 * Misc
729 */
730 /* XXX: add ah->aid once mac80211 gives this to us */
731 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
732
733 ath5k_hw_set_opmode(ah);
734 /*PISR/SISR Not available on 5210*/
735 if (ah->ah_version != AR5K_AR5210) {
736 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
737 /* If we later allow tuning for this, store into sc structure */
738 data = AR5K_TUNE_RSSI_THRES |
739 AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
740 ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
741 }
742
743 /*
744 * Set Rx/Tx DMA Configuration
745 *
746 * Set maximum DMA size (512) except for PCI-E cards since
747 * it causes rx overruns and tx errors (tested on 5424 but since
748 * rx overruns also occur on 5416/5418 with madwifi we set 128
749 * for all PCI-E cards to be safe).
750 *
751 * In dumps this is 128 for allchips.
752 *
753 * XXX: need to check 5210 for this
754 * TODO: Check out tx triger level, it's always 64 on dumps but I
755 * guess we can tweak it and see how it goes ;-)
756 */
757 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
758 if (ah->ah_version != AR5K_AR5210) {
759 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
760 AR5K_TXCFG_SDMAMR, dma_size);
761 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
762 AR5K_RXCFG_SDMAMW, dma_size);
763 }
764
765 /*
766 * Enable the PHY and wait until completion
767 */
768 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
769
770 /*
771 * On 5211+ read activation -> rx delay
772 * and use it.
773 */
774 if (ah->ah_version != AR5K_AR5210) {
775 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
776 AR5K_PHY_RX_DELAY_M;
777 data = (channel->hw_value & CHANNEL_CCK) ?
778 ((data << 2) / 22) : (data / 10);
779
780 udelay(100 + (2 * data));
781 data = 0;
782 } else {
783 mdelay(1);
784 }
785
786 /*
787 * Perform ADC test (?)
788 */
789 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
790 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
791 for (i = 0; i <= 20; i++) {
792 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
793 break;
794 udelay(200);
795 }
796 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
797 data = 0;
798
799 /*
800 * Start automatic gain calibration
801 *
802 * During AGC calibration RX path is re-routed to
803 * a signal detector so we don't receive anything.
804 *
805 * This method is used to calibrate some static offsets
806 * used together with on-the fly I/Q calibration (the
807 * one performed via ath5k_hw_phy_calibrate), that doesn't
808 * interrupt rx path.
809 *
810 * If we are in a noisy environment AGC calibration may time
811 * out.
812 */
813 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
814 AR5K_PHY_AGCCTL_CAL);
815
816 /* At the same time start I/Q calibration for QAM constellation
817 * -no need for CCK- */
818 ah->ah_calibration = false;
819 if (!(mode == AR5K_MODE_11B)) {
820 ah->ah_calibration = true;
821 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
822 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
823 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
824 AR5K_PHY_IQ_RUN);
825 }
826
827 /* Wait for gain calibration to finish (we check for I/Q calibration
828 * during ath5k_phy_calibrate) */
829 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
830 AR5K_PHY_AGCCTL_CAL, 0, false)) {
831 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
832 channel->center_freq);
833 return -EAGAIN;
834 }
835
836 /*
837 * Start noise floor calibration
838 *
839 * If we run NF calibration before AGC, it always times out.
840 * Binary HAL starts NF and AGC calibration at the same time
841 * and only waits for AGC to finish. I believe that's wrong because
842 * during NF calibration, rx path is also routed to a detector, so if
843 * it doesn't finish we won't have RX.
844 *
845 * XXX: Find an interval that's OK for all cards...
846 */
847 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
848 if (ret)
849 return ret;
850
851 /*
852 * Reset queues and start beacon timers at the end of the reset routine
853 */
854 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
855 /*No QCU on 5210*/
856 if (ah->ah_version != AR5K_AR5210)
857 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
858
859 ret = ath5k_hw_reset_tx_queue(ah, i);
860 if (ret) {
861 ATH5K_ERR(ah->ah_sc,
862 "failed to reset TX queue #%d\n", i);
863 return ret;
864 }
865 }
866
867 /* Pre-enable interrupts on 5211/5212*/
868 if (ah->ah_version != AR5K_AR5210)
869 ath5k_hw_set_imr(ah, AR5K_INT_RX | AR5K_INT_TX |
870 AR5K_INT_FATAL);
871
872 /*
873 * Set RF kill flags if supported by the device (read from the EEPROM)
874 * Disable gpio_intr for now since it results system hang.
875 * TODO: Handle this in ath5k_intr
876 */
877#if 0
878 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
879 ath5k_hw_set_gpio_input(ah, 0);
880 ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
881 if (ah->ah_gpio[0] == 0)
882 ath5k_hw_set_gpio_intr(ah, 0, 1);
883 else
884 ath5k_hw_set_gpio_intr(ah, 0, 0);
885 }
886#endif
887
888 /*
889 * Set the 32MHz reference clock on 5212 phy clock sleep register
890 *
891 * TODO: Find out how to switch to external 32Khz clock to save power
892 */
893 if (ah->ah_version == AR5K_AR5212) {
894 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
895 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
896 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
897 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
898 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
899 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
900
901 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
902 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
903 0x00000f80 : 0x00001380 ;
904 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
905 data = 0;
906 }
907
908 if (ah->ah_version == AR5K_AR5212) {
909 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
910 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
911 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
912 if (ah->ah_mac_srev >= AR5K_SREV_VER_AR2413)
913 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
914 }
915
916 /*
917 * Disable beacons and reset the register
918 */
919 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
920 AR5K_BEACON_RESET_TSF);
921
922 return 0;
923}
924
925#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index 9e19dcceb3a2..80a692430413 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -1,6 +1,9 @@
1config ATH9K 1config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211 && WLAN_80211
4 select MAC80211_LEDS
5 select LEDS_CLASS
6 select NEW_LEDS
4 ---help--- 7 ---help---
5 This module adds support for wireless adapters based on 8 This module adds support for wireless adapters based on
6 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets. 9 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index d1b0fbae5a32..0e897c276858 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -144,6 +144,7 @@ struct ath_desc {
144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
145#define ATH9K_TXDESC_VMF 0x0100 145#define ATH9K_TXDESC_VMF 0x0100
146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
147#define ATH9K_TXDESC_CAB 0x0400
147 148
148#define ATH9K_RXDESC_INTREQ 0x0020 149#define ATH9K_RXDESC_INTREQ 0x0020
149 150
@@ -564,8 +565,6 @@ enum ath9k_cipher {
564#define CTL_5GHT40 8 565#define CTL_5GHT40 8
565 566
566#define AR_EEPROM_MAC(i) (0x1d+(i)) 567#define AR_EEPROM_MAC(i) (0x1d+(i))
567#define EEP_SCALE 100
568#define EEP_DELTA 10
569 568
570#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c 569#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
571#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2 570#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
@@ -606,9 +605,6 @@ struct ath9k_country_entry {
606#define REG_CLR_BIT(_a, _r, _f) \ 605#define REG_CLR_BIT(_a, _r, _f) \
607 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) 606 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
608 607
609#define ATH9K_COMP_BUF_MAX_SIZE 9216
610#define ATH9K_COMP_BUF_ALIGN_SIZE 512
611
612#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001 608#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
613 609
614#define INIT_AIFS 2 610#define INIT_AIFS 2
@@ -632,12 +628,6 @@ struct ath9k_country_entry {
632 (IEEE80211_WEP_IVLEN + \ 628 (IEEE80211_WEP_IVLEN + \
633 IEEE80211_WEP_KIDLEN + \ 629 IEEE80211_WEP_KIDLEN + \
634 IEEE80211_WEP_CRCLEN)) 630 IEEE80211_WEP_CRCLEN))
635#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
636 (IEEE80211_WEP_IVLEN + \
637 IEEE80211_WEP_KIDLEN + \
638 IEEE80211_WEP_CRCLEN))
639
640#define MAX_REG_ADD_COUNT 129
641#define MAX_RATE_POWER 63 631#define MAX_RATE_POWER 63
642 632
643enum ath9k_power_mode { 633enum ath9k_power_mode {
@@ -707,13 +697,6 @@ enum phytype {
707}; 697};
708#define PHY_CCK PHY_DS 698#define PHY_CCK PHY_DS
709 699
710enum start_adhoc_option {
711 START_ADHOC_NO_11A,
712 START_ADHOC_PER_11D,
713 START_ADHOC_IN_11A,
714 START_ADHOC_IN_11B,
715};
716
717enum ath9k_tp_scale { 700enum ath9k_tp_scale {
718 ATH9K_TP_SCALE_MAX = 0, 701 ATH9K_TP_SCALE_MAX = 0,
719 ATH9K_TP_SCALE_50, 702 ATH9K_TP_SCALE_50,
@@ -769,14 +752,11 @@ struct ath9k_node_stats {
769 752
770#define ATH9K_RSSI_EP_MULTIPLIER (1<<7) 753#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
771 754
772enum ath9k_gpio_output_mux_type { 755#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
773 ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT, 756#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
774 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED, 757#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
775 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED, 758#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
776 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED, 759#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
777 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
778 ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
779};
780 760
781enum { 761enum {
782 ATH9K_RESET_POWER_ON, 762 ATH9K_RESET_POWER_ON,
@@ -790,19 +770,20 @@ struct ath_hal {
790 u32 ah_magic; 770 u32 ah_magic;
791 u16 ah_devid; 771 u16 ah_devid;
792 u16 ah_subvendorid; 772 u16 ah_subvendorid;
793 struct ath_softc *ah_sc;
794 void __iomem *ah_sh;
795 u16 ah_countryCode;
796 u32 ah_macVersion; 773 u32 ah_macVersion;
797 u16 ah_macRev; 774 u16 ah_macRev;
798 u16 ah_phyRev; 775 u16 ah_phyRev;
799 u16 ah_analog5GhzRev; 776 u16 ah_analog5GhzRev;
800 u16 ah_analog2GhzRev; 777 u16 ah_analog2GhzRev;
801 u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE]; 778
802 u32 ah_flags; 779 void __iomem *ah_sh;
780 struct ath_softc *ah_sc;
803 enum ath9k_opmode ah_opmode; 781 enum ath9k_opmode ah_opmode;
804 struct ath9k_ops_config ah_config; 782 struct ath9k_ops_config ah_config;
805 struct ath9k_hw_capabilities ah_caps; 783 struct ath9k_hw_capabilities ah_caps;
784
785 u16 ah_countryCode;
786 u32 ah_flags;
806 int16_t ah_powerLimit; 787 int16_t ah_powerLimit;
807 u16 ah_maxPowerLevel; 788 u16 ah_maxPowerLevel;
808 u32 ah_tpScale; 789 u32 ah_tpScale;
@@ -812,15 +793,17 @@ struct ath_hal {
812 u16 ah_currentRD5G; 793 u16 ah_currentRD5G;
813 u16 ah_currentRD2G; 794 u16 ah_currentRD2G;
814 char ah_iso[4]; 795 char ah_iso[4];
815 enum start_adhoc_option ah_adHocMode; 796
816 bool ah_commonMode;
817 struct ath9k_channel ah_channels[150]; 797 struct ath9k_channel ah_channels[150];
818 u32 ah_nchan;
819 struct ath9k_channel *ah_curchan; 798 struct ath9k_channel *ah_curchan;
820 u16 ah_rfsilent; 799 u32 ah_nchan;
821 bool ah_rfkillEnabled; 800
822 bool ah_isPciExpress; 801 bool ah_isPciExpress;
823 u16 ah_txTrigLevel; 802 u16 ah_txTrigLevel;
803 u16 ah_rfsilent;
804 u32 ah_rfkill_gpio;
805 u32 ah_rfkill_polarity;
806
824#ifndef ATH_NF_PER_CHAN 807#ifndef ATH_NF_PER_CHAN
825 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 808 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
826#endif 809#endif
@@ -853,7 +836,7 @@ bool ath9k_regd_init_channels(struct ath_hal *ah,
853u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); 836u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
854enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, 837enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
855 enum ath9k_int ints); 838 enum ath9k_int ints);
856bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, 839bool ath9k_hw_reset(struct ath_hal *ah,
857 struct ath9k_channel *chan, 840 struct ath9k_channel *chan,
858 enum ath9k_ht_macmode macmode, 841 enum ath9k_ht_macmode macmode,
859 u8 txchainmask, u8 rxchainmask, 842 u8 txchainmask, u8 rxchainmask,
@@ -1018,4 +1001,9 @@ void ath9k_hw_get_channel_centers(struct ath_hal *ah,
1018bool ath9k_get_channel_edges(struct ath_hal *ah, 1001bool ath9k_get_channel_edges(struct ath_hal *ah,
1019 u16 flags, u16 *low, 1002 u16 flags, u16 *low,
1020 u16 *high); 1003 u16 *high);
1004void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
1005 u32 ah_signal_type);
1006void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 value);
1007u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
1008void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
1021#endif 1009#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index caf569401a34..c43fd5861163 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -33,7 +33,7 @@ static int ath_beaconq_config(struct ath_softc *sc)
33 struct ath9k_tx_queue_info qi; 33 struct ath9k_tx_queue_info qi;
34 34
35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi); 35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
36 if (sc->sc_opmode == ATH9K_M_HOSTAP) { 36 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
37 /* Always burst out beacon and CAB traffic. */ 37 /* Always burst out beacon and CAB traffic. */
38 qi.tqi_aifs = 1; 38 qi.tqi_aifs = 1;
39 qi.tqi_cwmin = 0; 39 qi.tqi_cwmin = 0;
@@ -85,7 +85,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
85 85
86 flags = ATH9K_TXDESC_NOACK; 86 flags = ATH9K_TXDESC_NOACK;
87 87
88 if (sc->sc_opmode == ATH9K_M_IBSS && 88 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
90 ds->ds_link = bf->bf_daddr; /* self-linked */ 90 ds->ds_link = bf->bf_daddr; /* self-linked */
91 flags |= ATH9K_TXDESC_VEOL; 91 flags |= ATH9K_TXDESC_VEOL;
@@ -111,24 +111,24 @@ static void ath_beacon_setup(struct ath_softc *sc,
111 rix = 0; 111 rix = 0;
112 rt = sc->sc_currates; 112 rt = sc->sc_currates;
113 rate = rt->info[rix].rateCode; 113 rate = rt->info[rix].rateCode;
114 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 114 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
115 rate |= rt->info[rix].shortPreamble; 115 rate |= rt->info[rix].shortPreamble;
116 116
117 ath9k_hw_set11n_txdesc(ah, ds 117 ath9k_hw_set11n_txdesc(ah, ds,
118 , skb->len + FCS_LEN /* frame length */ 118 skb->len + FCS_LEN, /* frame length */
119 , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */ 119 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */
120 , avp->av_btxctl.txpower /* txpower XXX */ 120 avp->av_btxctl.txpower, /* txpower XXX */
121 , ATH9K_TXKEYIX_INVALID /* no encryption */ 121 ATH9K_TXKEYIX_INVALID, /* no encryption */
122 , ATH9K_KEY_TYPE_CLEAR /* no encryption */ 122 ATH9K_KEY_TYPE_CLEAR, /* no encryption */
123 , flags /* no ack, veol for beacons */ 123 flags /* no ack, veol for beacons */
124 ); 124 );
125 125
126 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 126 /* NB: beacon's BufLen must be a multiple of 4 bytes */
127 ath9k_hw_filltxdesc(ah, ds 127 ath9k_hw_filltxdesc(ah, ds,
128 , roundup(skb->len, 4) /* buffer length */ 128 roundup(skb->len, 4), /* buffer length */
129 , true /* first segment */ 129 true, /* first segment */
130 , true /* last segment */ 130 true, /* last segment */
131 , ds /* first descriptor */ 131 ds /* first descriptor */
132 ); 132 );
133 133
134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); 134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
@@ -140,55 +140,6 @@ static void ath_beacon_setup(struct ath_softc *sc,
140 ctsrate, ctsduration, series, 4, 0); 140 ctsrate, ctsduration, series, 4, 0);
141} 141}
142 142
143/* Move everything from the vap's mcast queue to the hardware cab queue.
144 * Caller must hold mcasq lock and cabq lock
145 * XXX MORE_DATA bit?
146 */
147static void empty_mcastq_into_cabq(struct ath_hal *ah,
148 struct ath_txq *mcastq, struct ath_txq *cabq)
149{
150 struct ath_buf *bfmcast;
151
152 BUG_ON(list_empty(&mcastq->axq_q));
153
154 bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
155
156 /* link the descriptors */
157 if (!cabq->axq_link)
158 ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
159 else
160 *cabq->axq_link = bfmcast->bf_daddr;
161
162 /* append the private vap mcast list to the cabq */
163
164 cabq->axq_depth += mcastq->axq_depth;
165 cabq->axq_totalqueued += mcastq->axq_totalqueued;
166 cabq->axq_linkbuf = mcastq->axq_linkbuf;
167 cabq->axq_link = mcastq->axq_link;
168 list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
169 mcastq->axq_depth = 0;
170 mcastq->axq_totalqueued = 0;
171 mcastq->axq_linkbuf = NULL;
172 mcastq->axq_link = NULL;
173}
174
175/* This is only run at DTIM. We move everything from the vap's mcast queue
176 * to the hardware cab queue. Caller must hold the mcastq lock. */
177static void trigger_mcastq(struct ath_hal *ah,
178 struct ath_txq *mcastq, struct ath_txq *cabq)
179{
180 spin_lock_bh(&cabq->axq_lock);
181
182 if (!list_empty(&mcastq->axq_q))
183 empty_mcastq_into_cabq(ah, mcastq, cabq);
184
185 /* cabq is gated by beacon so it is safe to start here */
186 if (!list_empty(&cabq->axq_q))
187 ath9k_hw_txstart(ah, cabq->axq_qnum);
188
189 spin_unlock_bh(&cabq->axq_lock);
190}
191
192/* 143/*
193 * Generate beacon frame and queue cab data for a vap. 144 * Generate beacon frame and queue cab data for a vap.
194 * 145 *
@@ -199,19 +150,14 @@ static void trigger_mcastq(struct ath_hal *ah,
199*/ 150*/
200static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) 151static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
201{ 152{
202 struct ath_hal *ah = sc->sc_ah;
203 struct ath_buf *bf; 153 struct ath_buf *bf;
204 struct ath_vap *avp; 154 struct ath_vap *avp;
205 struct sk_buff *skb; 155 struct sk_buff *skb;
206 int cabq_depth; 156 int cabq_depth;
207 int mcastq_depth;
208 int is_beacon_dtim = 0;
209 unsigned int curlen;
210 struct ath_txq *cabq; 157 struct ath_txq *cabq;
211 struct ath_txq *mcastq; 158 struct ieee80211_tx_info *info;
212 avp = sc->sc_vaps[if_id]; 159 avp = sc->sc_vaps[if_id];
213 160
214 mcastq = &avp->av_mcastq;
215 cabq = sc->sc_cabq; 161 cabq = sc->sc_cabq;
216 162
217 ASSERT(avp); 163 ASSERT(avp);
@@ -223,32 +169,33 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
223 } 169 }
224 bf = avp->av_bcbuf; 170 bf = avp->av_bcbuf;
225 skb = (struct sk_buff *) bf->bf_mpdu; 171 skb = (struct sk_buff *) bf->bf_mpdu;
172 if (skb) {
173 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
174 skb_end_pointer(skb) - skb->head,
175 PCI_DMA_TODEVICE);
176 }
226 177
227 /* 178 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
228 * Update dynamic beacon contents. If this returns 179 bf->bf_mpdu = skb;
229 * non-zero then we need to remap the memory because 180 if (skb == NULL)
230 * the beacon frame changed size (probably because 181 return NULL;
231 * of the TIM bitmap). 182 info = IEEE80211_SKB_CB(skb);
232 */ 183 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
233 curlen = skb->len; 184 /*
234 185 * TODO: make sure the seq# gets assigned properly (vs. other
235 /* XXX: spin_lock_bh should not be used here, but sparse bitches 186 * TX frames)
236 * otherwise. We should fix sparse :) */ 187 */
237 spin_lock_bh(&mcastq->axq_lock); 188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
238 mcastq_depth = avp->av_mcastq.axq_depth; 189 sc->seq_no += 0x10;
239 190 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
240 if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) == 191 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
241 1) {
242 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
243 get_dma_mem_context(bf, bf_dmacontext));
244 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
245 get_dma_mem_context(bf, bf_dmacontext));
246 } else {
247 pci_dma_sync_single_for_cpu(sc->pdev,
248 bf->bf_buf_addr,
249 skb_tailroom(skb),
250 PCI_DMA_TODEVICE);
251 } 192 }
193 bf->bf_buf_addr = bf->bf_dmacontext =
194 pci_map_single(sc->pdev, skb->data,
195 skb_end_pointer(skb) - skb->head,
196 PCI_DMA_TODEVICE);
197
198 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
252 199
253 /* 200 /*
254 * if the CABQ traffic from previous DTIM is pending and the current 201 * if the CABQ traffic from previous DTIM is pending and the current
@@ -262,9 +209,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
262 cabq_depth = cabq->axq_depth; 209 cabq_depth = cabq->axq_depth;
263 spin_unlock_bh(&cabq->axq_lock); 210 spin_unlock_bh(&cabq->axq_lock);
264 211
265 is_beacon_dtim = avp->av_boff.bo_tim[4] & 1; 212 if (skb && cabq_depth) {
266
267 if (mcastq_depth && is_beacon_dtim && cabq_depth) {
268 /* 213 /*
269 * Unlock the cabq lock as ath_tx_draintxq acquires 214 * Unlock the cabq lock as ath_tx_draintxq acquires
270 * the lock again which is a common function and that 215 * the lock again which is a common function and that
@@ -284,10 +229,11 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
284 * Enable the CAB queue before the beacon queue to 229 * Enable the CAB queue before the beacon queue to
285 * insure cab frames are triggered by this beacon. 230 * insure cab frames are triggered by this beacon.
286 */ 231 */
287 if (is_beacon_dtim) 232 while (skb) {
288 trigger_mcastq(ah, mcastq, cabq); 233 ath_tx_cabq(sc, skb);
234 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
235 }
289 236
290 spin_unlock_bh(&mcastq->axq_lock);
291 return bf; 237 return bf;
292} 238}
293 239
@@ -375,7 +321,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
375 struct ath_buf, list); 321 struct ath_buf, list);
376 list_del(&avp->av_bcbuf->list); 322 list_del(&avp->av_bcbuf->list);
377 323
378 if (sc->sc_opmode == ATH9K_M_HOSTAP || 324 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
379 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 325 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
380 int slot; 326 int slot;
381 /* 327 /*
@@ -408,8 +354,9 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
408 bf = avp->av_bcbuf; 354 bf = avp->av_bcbuf;
409 if (bf->bf_mpdu != NULL) { 355 if (bf->bf_mpdu != NULL) {
410 skb = (struct sk_buff *)bf->bf_mpdu; 356 skb = (struct sk_buff *)bf->bf_mpdu;
411 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, 357 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
412 get_dma_mem_context(bf, bf_dmacontext)); 358 skb_end_pointer(skb) - skb->head,
359 PCI_DMA_TODEVICE);
413 dev_kfree_skb_any(skb); 360 dev_kfree_skb_any(skb);
414 bf->bf_mpdu = NULL; 361 bf->bf_mpdu = NULL;
415 } 362 }
@@ -418,7 +365,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
418 * NB: the beacon data buffer must be 32-bit aligned; 365 * NB: the beacon data buffer must be 32-bit aligned;
419 * we assume the wbuf routines will return us something 366 * we assume the wbuf routines will return us something
420 * with this alignment (perhaps should assert). 367 * with this alignment (perhaps should assert).
421 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and 368 * FIXME: Fill avp->av_btxctl.txpower and
422 * avp->av_btxctl.shortPreamble 369 * avp->av_btxctl.shortPreamble
423 */ 370 */
424 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); 371 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
@@ -439,9 +386,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
439 __le64 val; 386 __le64 val;
440 int intval; 387 int intval;
441 388
442 /* FIXME: Use default value for now: Sujith */ 389 intval = sc->hw->conf.beacon_int ?
443 390 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
444 intval = ATH_DEFAULT_BINTVAL;
445 391
446 /* 392 /*
447 * The beacon interval is in TU's; the TSF in usecs. 393 * The beacon interval is in TU's; the TSF in usecs.
@@ -466,8 +412,10 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
466 memcpy(&wh[1], &val, sizeof(val)); 412 memcpy(&wh[1], &val, sizeof(val));
467 } 413 }
468 414
469 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE, 415 bf->bf_buf_addr = bf->bf_dmacontext =
470 get_dma_mem_context(bf, bf_dmacontext)); 416 pci_map_single(sc->pdev, skb->data,
417 skb_end_pointer(skb) - skb->head,
418 PCI_DMA_TODEVICE);
471 bf->bf_mpdu = skb; 419 bf->bf_mpdu = skb;
472 420
473 return 0; 421 return 0;
@@ -493,8 +441,9 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
493 bf = avp->av_bcbuf; 441 bf = avp->av_bcbuf;
494 if (bf->bf_mpdu != NULL) { 442 if (bf->bf_mpdu != NULL) {
495 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 443 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
496 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, 444 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
497 get_dma_mem_context(bf, bf_dmacontext)); 445 skb_end_pointer(skb) - skb->head,
446 PCI_DMA_TODEVICE);
498 dev_kfree_skb_any(skb); 447 dev_kfree_skb_any(skb);
499 bf->bf_mpdu = NULL; 448 bf->bf_mpdu = NULL;
500 } 449 }
@@ -505,30 +454,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
505} 454}
506 455
507/* 456/*
508 * Reclaim beacon resources and return buffer to the pool.
509 *
510 * This function will free any wbuf frames that are still attached to the
511 * beacon buffers in the ATH object. Note that this does not de-allocate
512 * any wbuf objects that are in the transmit queue and have not yet returned
513 * to the ATH object.
514*/
515
516void ath_beacon_free(struct ath_softc *sc)
517{
518 struct ath_buf *bf;
519
520 list_for_each_entry(bf, &sc->sc_bbuf, list) {
521 if (bf->bf_mpdu != NULL) {
522 struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
523 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
524 get_dma_mem_context(bf, bf_dmacontext));
525 dev_kfree_skb_any(skb);
526 bf->bf_mpdu = NULL;
527 }
528 }
529}
530
531/*
532 * Tasklet for Sending Beacons 457 * Tasklet for Sending Beacons
533 * 458 *
534 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame 459 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
@@ -540,9 +465,6 @@ void ath_beacon_free(struct ath_softc *sc)
540 465
541void ath9k_beacon_tasklet(unsigned long data) 466void ath9k_beacon_tasklet(unsigned long data)
542{ 467{
543#define TSF_TO_TU(_h,_l) \
544 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
545
546 struct ath_softc *sc = (struct ath_softc *)data; 468 struct ath_softc *sc = (struct ath_softc *)data;
547 struct ath_hal *ah = sc->sc_ah; 469 struct ath_hal *ah = sc->sc_ah;
548 struct ath_buf *bf = NULL; 470 struct ath_buf *bf = NULL;
@@ -555,7 +477,7 @@ void ath9k_beacon_tasklet(unsigned long data)
555 u32 tsftu; 477 u32 tsftu;
556 u16 intval; 478 u16 intval;
557 479
558 if (sc->sc_noreset) { 480 if (sc->sc_flags & SC_OP_NO_RESET) {
559 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah, 481 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
560 &rx_clear, 482 &rx_clear,
561 &rx_frame, 483 &rx_frame,
@@ -577,7 +499,7 @@ void ath9k_beacon_tasklet(unsigned long data)
577 * (in that layer). 499 * (in that layer).
578 */ 500 */
579 if (sc->sc_bmisscount < BSTUCK_THRESH) { 501 if (sc->sc_bmisscount < BSTUCK_THRESH) {
580 if (sc->sc_noreset) { 502 if (sc->sc_flags & SC_OP_NO_RESET) {
581 DPRINTF(sc, ATH_DBG_BEACON, 503 DPRINTF(sc, ATH_DBG_BEACON,
582 "%s: missed %u consecutive beacons\n", 504 "%s: missed %u consecutive beacons\n",
583 __func__, sc->sc_bmisscount); 505 __func__, sc->sc_bmisscount);
@@ -605,7 +527,7 @@ void ath9k_beacon_tasklet(unsigned long data)
605 __func__, sc->sc_bmisscount); 527 __func__, sc->sc_bmisscount);
606 } 528 }
607 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) { 529 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
608 if (sc->sc_noreset) { 530 if (sc->sc_flags & SC_OP_NO_RESET) {
609 if (sc->sc_bmisscount == BSTUCK_THRESH) { 531 if (sc->sc_bmisscount == BSTUCK_THRESH) {
610 DPRINTF(sc, 532 DPRINTF(sc,
611 ATH_DBG_BEACON, 533 ATH_DBG_BEACON,
@@ -624,7 +546,7 @@ void ath9k_beacon_tasklet(unsigned long data)
624 return; 546 return;
625 } 547 }
626 if (sc->sc_bmisscount != 0) { 548 if (sc->sc_bmisscount != 0) {
627 if (sc->sc_noreset) { 549 if (sc->sc_flags & SC_OP_NO_RESET) {
628 DPRINTF(sc, 550 DPRINTF(sc,
629 ATH_DBG_BEACON, 551 ATH_DBG_BEACON,
630 "%s: resume beacon xmit after %u misses\n", 552 "%s: resume beacon xmit after %u misses\n",
@@ -643,8 +565,8 @@ void ath9k_beacon_tasklet(unsigned long data)
643 * on the tsf to safeguard against missing an swba. 565 * on the tsf to safeguard against missing an swba.
644 */ 566 */
645 567
646 /* FIXME: Use default value for now - Sujith */ 568 intval = sc->hw->conf.beacon_int ?
647 intval = ATH_DEFAULT_BINTVAL; 569 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
648 570
649 tsf = ath9k_hw_gettsf64(ah); 571 tsf = ath9k_hw_gettsf64(ah);
650 tsftu = TSF_TO_TU(tsf>>32, tsf); 572 tsftu = TSF_TO_TU(tsf>>32, tsf);
@@ -704,7 +626,6 @@ void ath9k_beacon_tasklet(unsigned long data)
704 626
705 sc->ast_be_xmit += bc; /* XXX per-vap? */ 627 sc->ast_be_xmit += bc; /* XXX per-vap? */
706 } 628 }
707#undef TSF_TO_TU
708} 629}
709 630
710/* 631/*
@@ -719,7 +640,7 @@ void ath_bstuck_process(struct ath_softc *sc)
719 DPRINTF(sc, ATH_DBG_BEACON, 640 DPRINTF(sc, ATH_DBG_BEACON,
720 "%s: stuck beacon; resetting (bmiss count %u)\n", 641 "%s: stuck beacon; resetting (bmiss count %u)\n",
721 __func__, sc->sc_bmisscount); 642 __func__, sc->sc_bmisscount);
722 ath_internal_reset(sc); 643 ath_reset(sc, false);
723} 644}
724 645
725/* 646/*
@@ -740,8 +661,6 @@ void ath_bstuck_process(struct ath_softc *sc)
740 661
741void ath_beacon_config(struct ath_softc *sc, int if_id) 662void ath_beacon_config(struct ath_softc *sc, int if_id)
742{ 663{
743#define TSF_TO_TU(_h,_l) \
744 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
745 struct ath_hal *ah = sc->sc_ah; 664 struct ath_hal *ah = sc->sc_ah;
746 u32 nexttbtt, intval; 665 u32 nexttbtt, intval;
747 struct ath_beacon_config conf; 666 struct ath_beacon_config conf;
@@ -750,7 +669,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
750 if (if_id != ATH_IF_ID_ANY) 669 if (if_id != ATH_IF_ID_ANY)
751 av_opmode = sc->sc_vaps[if_id]->av_opmode; 670 av_opmode = sc->sc_vaps[if_id]->av_opmode;
752 else 671 else
753 av_opmode = sc->sc_opmode; 672 av_opmode = sc->sc_ah->ah_opmode;
754 673
755 memzero(&conf, sizeof(struct ath_beacon_config)); 674 memzero(&conf, sizeof(struct ath_beacon_config));
756 675
@@ -760,7 +679,8 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
760 * Protocol stack doesn't support dynamic beacon configuration, 679 * Protocol stack doesn't support dynamic beacon configuration,
761 * use default configurations. 680 * use default configurations.
762 */ 681 */
763 conf.beacon_interval = ATH_DEFAULT_BINTVAL; 682 conf.beacon_interval = sc->hw->conf.beacon_int ?
683 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
764 conf.listen_interval = 1; 684 conf.listen_interval = 1;
765 conf.dtim_period = conf.beacon_interval; 685 conf.dtim_period = conf.beacon_interval;
766 conf.dtim_count = 1; 686 conf.dtim_count = 1;
@@ -770,7 +690,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
770 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4), 690 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4),
771 get_unaligned_le32(conf.u.last_tstamp)); 691 get_unaligned_le32(conf.u.last_tstamp));
772 /* XXX conditionalize multi-bss support? */ 692 /* XXX conditionalize multi-bss support? */
773 if (sc->sc_opmode == ATH9K_M_HOSTAP) { 693 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
774 /* 694 /*
775 * For multi-bss ap support beacons are either staggered 695 * For multi-bss ap support beacons are either staggered
776 * evenly over N slots or burst together. For the former 696 * evenly over N slots or burst together. For the former
@@ -791,7 +711,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
791 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 711 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
792 __func__, nexttbtt, intval, conf.beacon_interval); 712 __func__, nexttbtt, intval, conf.beacon_interval);
793 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */ 713 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
794 if (sc->sc_opmode == ATH9K_M_STA) { 714 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
795 struct ath9k_beacon_state bs; 715 struct ath9k_beacon_state bs;
796 u64 tsf; 716 u64 tsf;
797 u32 tsftu; 717 u32 tsftu;
@@ -886,19 +806,19 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
886 "cfp:period %u " 806 "cfp:period %u "
887 "maxdur %u " 807 "maxdur %u "
888 "next %u " 808 "next %u "
889 "timoffset %u\n" 809 "timoffset %u\n",
890 , __func__ 810 __func__,
891 , (unsigned long long)tsf, tsftu 811 (unsigned long long)tsf, tsftu,
892 , bs.bs_intval 812 bs.bs_intval,
893 , bs.bs_nexttbtt 813 bs.bs_nexttbtt,
894 , bs.bs_dtimperiod 814 bs.bs_dtimperiod,
895 , bs.bs_nextdtim 815 bs.bs_nextdtim,
896 , bs.bs_bmissthreshold 816 bs.bs_bmissthreshold,
897 , bs.bs_sleepduration 817 bs.bs_sleepduration,
898 , bs.bs_cfpperiod 818 bs.bs_cfpperiod,
899 , bs.bs_cfpmaxduration 819 bs.bs_cfpmaxduration,
900 , bs.bs_cfpnext 820 bs.bs_cfpnext,
901 , bs.bs_timoffset 821 bs.bs_timoffset
902 ); 822 );
903 823
904 ath9k_hw_set_interrupts(ah, 0); 824 ath9k_hw_set_interrupts(ah, 0);
@@ -911,7 +831,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
911 ath9k_hw_set_interrupts(ah, 0); 831 ath9k_hw_set_interrupts(ah, 0);
912 if (nexttbtt == intval) 832 if (nexttbtt == intval)
913 intval |= ATH9K_BEACON_RESET_TSF; 833 intval |= ATH9K_BEACON_RESET_TSF;
914 if (sc->sc_opmode == ATH9K_M_IBSS) { 834 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
915 /* 835 /*
916 * Pull nexttbtt forward to reflect the current 836 * Pull nexttbtt forward to reflect the current
917 * TSF . 837 * TSF .
@@ -943,7 +863,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
943 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 863 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
944 sc->sc_imask |= ATH9K_INT_SWBA; 864 sc->sc_imask |= ATH9K_INT_SWBA;
945 ath_beaconq_config(sc); 865 ath_beaconq_config(sc);
946 } else if (sc->sc_opmode == ATH9K_M_HOSTAP) { 866 } else if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
947 /* 867 /*
948 * In AP mode we enable the beacon timers and 868 * In AP mode we enable the beacon timers and
949 * SWBA interrupts to prepare beacon frames. 869 * SWBA interrupts to prepare beacon frames.
@@ -959,11 +879,10 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
959 * When using a self-linked beacon descriptor in 879 * When using a self-linked beacon descriptor in
960 * ibss mode load it once here. 880 * ibss mode load it once here.
961 */ 881 */
962 if (sc->sc_opmode == ATH9K_M_IBSS && 882 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
963 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 883 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
964 ath_beacon_start_adhoc(sc, 0); 884 ath_beacon_start_adhoc(sc, 0);
965 } 885 }
966#undef TSF_TO_TU
967} 886}
968 887
969/* Function to collect beacon rssi data and resync beacon if necessary */ 888/* Function to collect beacon rssi data and resync beacon if necessary */
@@ -975,5 +894,5 @@ void ath_beacon_sync(struct ath_softc *sc, int if_id)
975 * beacon frame we just received. 894 * beacon frame we just received.
976 */ 895 */
977 ath_beacon_config(sc, if_id); 896 ath_beacon_config(sc, if_id);
978 sc->sc_beacons = 1; 897 sc->sc_flags |= SC_OP_BEACONS;
979} 898}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index f6c45288d0e7..c262ef279ff3 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -21,9 +21,6 @@
21 21
22static int ath_outdoor; /* enable outdoor use */ 22static int ath_outdoor; /* enable outdoor use */
23 23
24static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27static u32 ath_chainmask_sel_up_rssi_thres = 24static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES; 25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29static u32 ath_chainmask_sel_down_rssi_thres = 26static u32 ath_chainmask_sel_down_rssi_thres =
@@ -54,10 +51,8 @@ static void bus_read_cachesize(struct ath_softc *sc, int *csz)
54 * Set current operating mode 51 * Set current operating mode
55 * 52 *
56 * This function initializes and fills the rate table in the ATH object based 53 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although 54 * on the operating mode.
58 * they have been superceeded by the ath_led module.
59*/ 55*/
60
61static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode) 56static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62{ 57{
63 const struct ath9k_rate_table *rt; 58 const struct ath9k_rate_table *rt;
@@ -235,7 +230,7 @@ static int ath_setup_channels(struct ath_softc *sc)
235 * Determine mode from channel flags 230 * Determine mode from channel flags
236 * 231 *
237 * This routine will provide the enumerated WIRELESSS_MODE value based 232 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags 233 * on the settings of the channel flags. If no valid set of flags
239 * exist, the lowest mode (11b) is selected. 234 * exist, the lowest mode (11b) is selected.
240*/ 235*/
241 236
@@ -260,7 +255,8 @@ static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS) 255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS; 256 return ATH9K_MODE_11NG_HT40MINUS;
262 257
263 /* NB: should not get here */ 258 WARN_ON(1); /* should not get here */
259
264 return ATH9K_MODE_11B; 260 return ATH9K_MODE_11B;
265} 261}
266 262
@@ -275,14 +271,12 @@ static int ath_stop(struct ath_softc *sc)
275{ 271{
276 struct ath_hal *ah = sc->sc_ah; 272 struct ath_hal *ah = sc->sc_ah;
277 273
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n", 274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
279 __func__, sc->sc_invalid); 275 __func__, sc->sc_flags & SC_OP_INVALID);
280 276
281 /* 277 /*
282 * Shutdown the hardware and driver: 278 * Shutdown the hardware and driver:
283 * stop output from above 279 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
286 * turn off timers 280 * turn off timers
287 * disable interrupts 281 * disable interrupts
288 * clear transmit machinery 282 * clear transmit machinery
@@ -294,10 +288,10 @@ static int ath_stop(struct ath_softc *sc)
294 * hardware is gone (invalid). 288 * hardware is gone (invalid).
295 */ 289 */
296 290
297 if (!sc->sc_invalid) 291 if (!(sc->sc_flags & SC_OP_INVALID))
298 ath9k_hw_set_interrupts(ah, 0); 292 ath9k_hw_set_interrupts(ah, 0);
299 ath_draintxq(sc, false); 293 ath_draintxq(sc, false);
300 if (!sc->sc_invalid) { 294 if (!(sc->sc_flags & SC_OP_INVALID)) {
301 ath_stoprecv(sc); 295 ath_stoprecv(sc);
302 ath9k_hw_phy_disable(ah); 296 ath9k_hw_phy_disable(ah);
303 } else 297 } else
@@ -307,56 +301,6 @@ static int ath_stop(struct ath_softc *sc)
307} 301}
308 302
309/* 303/*
310 * Start Scan
311 *
312 * This function is called when starting a channel scan. It will perform
313 * power save wakeup processing, set the filter for the scan, and get the
314 * chip ready to send broadcast packets out during the scan.
315*/
316
317void ath_scan_start(struct ath_softc *sc)
318{
319 struct ath_hal *ah = sc->sc_ah;
320 u32 rfilt;
321 u32 now = (u32) jiffies_to_msecs(get_timestamp());
322
323 sc->sc_scanning = 1;
324 rfilt = ath_calcrxfilter(sc);
325 ath9k_hw_setrxfilter(ah, rfilt);
326 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
327
328 /* Restore previous power management state. */
329
330 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
331 now / 1000, now % 1000, __func__, rfilt);
332}
333
334/*
335 * Scan End
336 *
337 * This routine is called by the upper layer when the scan is completed. This
338 * will set the filters back to normal operating mode, set the BSSID to the
339 * correct value, and restore the power save state.
340*/
341
342void ath_scan_end(struct ath_softc *sc)
343{
344 struct ath_hal *ah = sc->sc_ah;
345 u32 rfilt;
346 u32 now = (u32) jiffies_to_msecs(get_timestamp());
347
348 sc->sc_scanning = 0;
349 /* Request for a full reset due to rx packet filter changes */
350 sc->sc_full_reset = 1;
351 rfilt = ath_calcrxfilter(sc);
352 ath9k_hw_setrxfilter(ah, rfilt);
353 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
354
355 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
356 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
357}
358
359/*
360 * Set the current channel 304 * Set the current channel
361 * 305 *
362 * Set/change channels. If the channel is really being changed, it's done 306 * Set/change channels. If the channel is really being changed, it's done
@@ -367,25 +311,23 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
367{ 311{
368 struct ath_hal *ah = sc->sc_ah; 312 struct ath_hal *ah = sc->sc_ah;
369 bool fastcc = true, stopped; 313 bool fastcc = true, stopped;
370 enum ath9k_ht_macmode ht_macmode;
371 314
372 if (sc->sc_invalid) /* if the device is invalid or removed */ 315 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
373 return -EIO; 316 return -EIO;
374 317
375 DPRINTF(sc, ATH_DBG_CONFIG, 318 DPRINTF(sc, ATH_DBG_CONFIG,
376 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n", 319 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
377 __func__, 320 __func__,
378 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel, 321 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
379 sc->sc_curchan.channelFlags), 322 sc->sc_ah->ah_curchan->channelFlags),
380 sc->sc_curchan.channel, 323 sc->sc_ah->ah_curchan->channel,
381 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags), 324 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
382 hchan->channel, hchan->channelFlags); 325 hchan->channel, hchan->channelFlags);
383 326
384 ht_macmode = ath_cwm_macmode(sc); 327 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
385 328 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
386 if (hchan->channel != sc->sc_curchan.channel || 329 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
387 hchan->channelFlags != sc->sc_curchan.channelFlags || 330 (sc->sc_flags & SC_OP_FULL_RESET)) {
388 sc->sc_update_chainmask || sc->sc_full_reset) {
389 int status; 331 int status;
390 /* 332 /*
391 * This is only performed if the channel settings have 333 * This is only performed if the channel settings have
@@ -404,15 +346,16 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
404 * to flush data frames already in queue because of 346 * to flush data frames already in queue because of
405 * changing channel. */ 347 * changing channel. */
406 348
407 if (!stopped || sc->sc_full_reset) 349 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
408 fastcc = false; 350 fastcc = false;
409 351
410 spin_lock_bh(&sc->sc_resetlock); 352 spin_lock_bh(&sc->sc_resetlock);
411 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan, 353 if (!ath9k_hw_reset(ah, hchan,
412 ht_macmode, sc->sc_tx_chainmask, 354 sc->sc_ht_info.tx_chan_width,
413 sc->sc_rx_chainmask, 355 sc->sc_tx_chainmask,
414 sc->sc_ht_extprotspacing, 356 sc->sc_rx_chainmask,
415 fastcc, &status)) { 357 sc->sc_ht_extprotspacing,
358 fastcc, &status)) {
416 DPRINTF(sc, ATH_DBG_FATAL, 359 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: unable to reset channel %u (%uMhz) " 360 "%s: unable to reset channel %u (%uMhz) "
418 "flags 0x%x hal status %u\n", __func__, 361 "flags 0x%x hal status %u\n", __func__,
@@ -424,9 +367,8 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
424 } 367 }
425 spin_unlock_bh(&sc->sc_resetlock); 368 spin_unlock_bh(&sc->sc_resetlock);
426 369
427 sc->sc_curchan = *hchan; 370 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
428 sc->sc_update_chainmask = 0; 371 sc->sc_flags &= ~SC_OP_FULL_RESET;
429 sc->sc_full_reset = 0;
430 372
431 /* Re-enable rx framework */ 373 /* Re-enable rx framework */
432 if (ath_startrecv(sc) != 0) { 374 if (ath_startrecv(sc) != 0) {
@@ -537,7 +479,7 @@ int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
537 479
538void ath_update_chainmask(struct ath_softc *sc, int is_ht) 480void ath_update_chainmask(struct ath_softc *sc, int is_ht)
539{ 481{
540 sc->sc_update_chainmask = 1; 482 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
541 if (is_ht) { 483 if (is_ht) {
542 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask; 484 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
543 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask; 485 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
@@ -554,62 +496,6 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht)
554/* VAP management */ 496/* VAP management */
555/******************/ 497/******************/
556 498
557/*
558 * VAP in Listen mode
559 *
560 * This routine brings the VAP out of the down state into a "listen" state
561 * where it waits for association requests. This is used in AP and AdHoc
562 * modes.
563*/
564
565int ath_vap_listen(struct ath_softc *sc, int if_id)
566{
567 struct ath_hal *ah = sc->sc_ah;
568 struct ath_vap *avp;
569 u32 rfilt = 0;
570 DECLARE_MAC_BUF(mac);
571
572 avp = sc->sc_vaps[if_id];
573 if (avp == NULL) {
574 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
575 __func__, if_id);
576 return -EINVAL;
577 }
578
579#ifdef CONFIG_SLOW_ANT_DIV
580 ath_slow_ant_div_stop(&sc->sc_antdiv);
581#endif
582
583 /* update ratectrl about the new state */
584 ath_rate_newstate(sc, avp);
585
586 rfilt = ath_calcrxfilter(sc);
587 ath9k_hw_setrxfilter(ah, rfilt);
588
589 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
590 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
591 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
592 } else
593 sc->sc_curaid = 0;
594
595 DPRINTF(sc, ATH_DBG_CONFIG,
596 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
597 __func__, rfilt, print_mac(mac,
598 sc->sc_curbssid), sc->sc_curaid);
599
600 /*
601 * XXXX
602 * Disable BMISS interrupt when we're not associated
603 */
604 ath9k_hw_set_interrupts(ah,
605 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
606 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
607 /* need to reconfigure the beacons when it moves to RUN */
608 sc->sc_beacons = 0;
609
610 return 0;
611}
612
613int ath_vap_attach(struct ath_softc *sc, 499int ath_vap_attach(struct ath_softc *sc,
614 int if_id, 500 int if_id,
615 struct ieee80211_vif *if_data, 501 struct ieee80211_vif *if_data,
@@ -647,16 +533,13 @@ int ath_vap_attach(struct ath_softc *sc,
647 /* Set the VAP opmode */ 533 /* Set the VAP opmode */
648 avp->av_opmode = opmode; 534 avp->av_opmode = opmode;
649 avp->av_bslot = -1; 535 avp->av_bslot = -1;
650 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
651 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
652 spin_lock_init(&avp->av_mcastq.axq_lock);
653 536
654 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 537 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
655 538
656 sc->sc_vaps[if_id] = avp; 539 sc->sc_vaps[if_id] = avp;
657 sc->sc_nvaps++; 540 sc->sc_nvaps++;
658 /* Set the device opmode */ 541 /* Set the device opmode */
659 sc->sc_opmode = opmode; 542 sc->sc_ah->ah_opmode = opmode;
660 543
661 /* default VAP configuration */ 544 /* default VAP configuration */
662 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE; 545 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
@@ -689,9 +572,6 @@ int ath_vap_detach(struct ath_softc *sc, int if_id)
689 ath_stoprecv(sc); /* stop recv side */ 572 ath_stoprecv(sc); /* stop recv side */
690 ath_flushrecv(sc); /* flush recv queue */ 573 ath_flushrecv(sc); /* flush recv queue */
691 574
692 /* Reclaim any pending mcast bufs on the vap. */
693 ath_tx_draintxq(sc, &avp->av_mcastq, false);
694
695 kfree(avp); 575 kfree(avp);
696 sc->sc_vaps[if_id] = NULL; 576 sc->sc_vaps[if_id] = NULL;
697 sc->sc_nvaps--; 577 sc->sc_nvaps--;
@@ -728,9 +608,9 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
728 struct ath_hal *ah = sc->sc_ah; 608 struct ath_hal *ah = sc->sc_ah;
729 int status; 609 int status;
730 int error = 0; 610 int error = 0;
731 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
732 611
733 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode); 612 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
613 __func__, sc->sc_ah->ah_opmode);
734 614
735 /* 615 /*
736 * Stop anything previously setup. This is safe 616 * Stop anything previously setup. This is safe
@@ -752,16 +632,16 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
752 * be followed by initialization of the appropriate bits 632 * be followed by initialization of the appropriate bits
753 * and then setup of the interrupt mask. 633 * and then setup of the interrupt mask.
754 */ 634 */
755 sc->sc_curchan = *initial_chan;
756 635
757 spin_lock_bh(&sc->sc_resetlock); 636 spin_lock_bh(&sc->sc_resetlock);
758 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode, 637 if (!ath9k_hw_reset(ah, initial_chan,
759 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 638 sc->sc_ht_info.tx_chan_width,
760 sc->sc_ht_extprotspacing, false, &status)) { 639 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
640 sc->sc_ht_extprotspacing, false, &status)) {
761 DPRINTF(sc, ATH_DBG_FATAL, 641 DPRINTF(sc, ATH_DBG_FATAL,
762 "%s: unable to reset hardware; hal status %u " 642 "%s: unable to reset hardware; hal status %u "
763 "(freq %u flags 0x%x)\n", __func__, status, 643 "(freq %u flags 0x%x)\n", __func__, status,
764 sc->sc_curchan.channel, sc->sc_curchan.channelFlags); 644 initial_chan->channel, initial_chan->channelFlags);
765 error = -EIO; 645 error = -EIO;
766 spin_unlock_bh(&sc->sc_resetlock); 646 spin_unlock_bh(&sc->sc_resetlock);
767 goto done; 647 goto done;
@@ -802,7 +682,8 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
802 * Note we only do this (at the moment) for station mode. 682 * Note we only do this (at the moment) for station mode.
803 */ 683 */
804 if (ath9k_hw_phycounters(ah) && 684 if (ath9k_hw_phycounters(ah) &&
805 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) 685 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
686 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
806 sc->sc_imask |= ATH9K_INT_MIB; 687 sc->sc_imask |= ATH9K_INT_MIB;
807 /* 688 /*
808 * Some hardware processes the TIM IE and fires an 689 * Some hardware processes the TIM IE and fires an
@@ -811,7 +692,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
811 * enable the TIM interrupt when operating as station. 692 * enable the TIM interrupt when operating as station.
812 */ 693 */
813 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) && 694 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
814 (sc->sc_opmode == ATH9K_M_STA) && 695 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
815 !sc->sc_config.swBeaconProcess) 696 !sc->sc_config.swBeaconProcess)
816 sc->sc_imask |= ATH9K_INT_TIM; 697 sc->sc_imask |= ATH9K_INT_TIM;
817 /* 698 /*
@@ -823,34 +704,34 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
823 704
824 /* XXX: we must make sure h/w is ready and clear invalid flag 705 /* XXX: we must make sure h/w is ready and clear invalid flag
825 * before turning on interrupt. */ 706 * before turning on interrupt. */
826 sc->sc_invalid = 0; 707 sc->sc_flags &= ~SC_OP_INVALID;
827done: 708done:
828 return error; 709 return error;
829} 710}
830 711
831/* 712int ath_reset(struct ath_softc *sc, bool retry_tx)
832 * Reset the hardware w/o losing operational state. This is
833 * basically a more efficient way of doing ath_stop, ath_init,
834 * followed by state transitions to the current 802.11
835 * operational state. Used to recover from errors rx overrun
836 * and to reset the hardware when rf gain settings must be reset.
837 */
838
839static int ath_reset_start(struct ath_softc *sc, u32 flag)
840{ 713{
841 struct ath_hal *ah = sc->sc_ah; 714 struct ath_hal *ah = sc->sc_ah;
715 int status;
716 int error = 0;
842 717
843 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ 718 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
844 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */ 719 ath_draintxq(sc, retry_tx); /* stop xmit */
845 ath_stoprecv(sc); /* stop recv side */ 720 ath_stoprecv(sc); /* stop recv */
846 ath_flushrecv(sc); /* flush recv queue */ 721 ath_flushrecv(sc); /* flush recv queue */
847 722
848 return 0; 723 /* Reset chip */
849} 724 spin_lock_bh(&sc->sc_resetlock);
850 725 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
851static int ath_reset_end(struct ath_softc *sc, u32 flag) 726 sc->sc_ht_info.tx_chan_width,
852{ 727 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
853 struct ath_hal *ah = sc->sc_ah; 728 sc->sc_ht_extprotspacing, false, &status)) {
729 DPRINTF(sc, ATH_DBG_FATAL,
730 "%s: unable to reset hardware; hal status %u\n",
731 __func__, status);
732 error = -EIO;
733 }
734 spin_unlock_bh(&sc->sc_resetlock);
854 735
855 if (ath_startrecv(sc) != 0) /* restart recv */ 736 if (ath_startrecv(sc) != 0) /* restart recv */
856 DPRINTF(sc, ATH_DBG_FATAL, 737 DPRINTF(sc, ATH_DBG_FATAL,
@@ -861,16 +742,17 @@ static int ath_reset_end(struct ath_softc *sc, u32 flag)
861 * that changes the channel so update any state that 742 * that changes the channel so update any state that
862 * might change as a result. 743 * might change as a result.
863 */ 744 */
864 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan)); 745 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
865 746
866 ath_update_txpow(sc); /* update tx power state */ 747 ath_update_txpow(sc);
867 748
868 if (sc->sc_beacons) 749 if (sc->sc_flags & SC_OP_BEACONS)
869 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */ 750 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
751
870 ath9k_hw_set_interrupts(ah, sc->sc_imask); 752 ath9k_hw_set_interrupts(ah, sc->sc_imask);
871 753
872 /* Restart the txq */ 754 /* Restart the txq */
873 if (flag & RESET_RETRY_TXQ) { 755 if (retry_tx) {
874 int i; 756 int i;
875 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 757 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
876 if (ATH_TXQ_SETUP(sc, i)) { 758 if (ATH_TXQ_SETUP(sc, i)) {
@@ -880,28 +762,6 @@ static int ath_reset_end(struct ath_softc *sc, u32 flag)
880 } 762 }
881 } 763 }
882 } 764 }
883 return 0;
884}
885
886int ath_reset(struct ath_softc *sc)
887{
888 struct ath_hal *ah = sc->sc_ah;
889 int status;
890 int error = 0;
891 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
892
893 /* NB: indicate channel change so we do a full reset */
894 spin_lock_bh(&sc->sc_resetlock);
895 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
896 ht_macmode,
897 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
898 sc->sc_ht_extprotspacing, false, &status)) {
899 DPRINTF(sc, ATH_DBG_FATAL,
900 "%s: unable to reset hardware; hal status %u\n",
901 __func__, status);
902 error = -EIO;
903 }
904 spin_unlock_bh(&sc->sc_resetlock);
905 765
906 return error; 766 return error;
907} 767}
@@ -911,7 +771,7 @@ int ath_suspend(struct ath_softc *sc)
911 struct ath_hal *ah = sc->sc_ah; 771 struct ath_hal *ah = sc->sc_ah;
912 772
913 /* No I/O if device has been surprise removed */ 773 /* No I/O if device has been surprise removed */
914 if (sc->sc_invalid) 774 if (sc->sc_flags & SC_OP_INVALID)
915 return -EIO; 775 return -EIO;
916 776
917 /* Shut off the interrupt before setting sc->sc_invalid to '1' */ 777 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
@@ -919,7 +779,7 @@ int ath_suspend(struct ath_softc *sc)
919 779
920 /* XXX: we must make sure h/w will not generate any interrupt 780 /* XXX: we must make sure h/w will not generate any interrupt
921 * before setting the invalid flag. */ 781 * before setting the invalid flag. */
922 sc->sc_invalid = 1; 782 sc->sc_flags |= SC_OP_INVALID;
923 783
924 /* disable HAL and put h/w to sleep */ 784 /* disable HAL and put h/w to sleep */
925 ath9k_hw_disable(sc->sc_ah); 785 ath9k_hw_disable(sc->sc_ah);
@@ -940,7 +800,7 @@ irqreturn_t ath_isr(int irq, void *dev)
940 bool sched = false; 800 bool sched = false;
941 801
942 do { 802 do {
943 if (sc->sc_invalid) { 803 if (sc->sc_flags & SC_OP_INVALID) {
944 /* 804 /*
945 * The hardware is not ready/present, don't 805 * The hardware is not ready/present, don't
946 * touch anything. Note this can happen early 806 * touch anything. Note this can happen early
@@ -1050,7 +910,7 @@ static void ath9k_tasklet(unsigned long data)
1050 910
1051 if (status & ATH9K_INT_FATAL) { 911 if (status & ATH9K_INT_FATAL) {
1052 /* need a chip reset */ 912 /* need a chip reset */
1053 ath_internal_reset(sc); 913 ath_reset(sc, false);
1054 return; 914 return;
1055 } else { 915 } else {
1056 916
@@ -1093,10 +953,9 @@ int ath_init(u16 devid, struct ath_softc *sc)
1093 int status; 953 int status;
1094 int error = 0, i; 954 int error = 0, i;
1095 int csz = 0; 955 int csz = 0;
1096 u32 rd;
1097 956
1098 /* XXX: hardware will not be ready until ath_open() being called */ 957 /* XXX: hardware will not be ready until ath_open() being called */
1099 sc->sc_invalid = 1; 958 sc->sc_flags |= SC_OP_INVALID;
1100 959
1101 sc->sc_debug = DBG_DEFAULT; 960 sc->sc_debug = DBG_DEFAULT;
1102 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid); 961 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
@@ -1126,9 +985,6 @@ int ath_init(u16 devid, struct ath_softc *sc)
1126 } 985 }
1127 sc->sc_ah = ah; 986 sc->sc_ah = ah;
1128 987
1129 /* Get the chipset-specific aggr limit. */
1130 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1131
1132 /* Get the hardware key cache size. */ 988 /* Get the hardware key cache size. */
1133 sc->sc_keymax = ah->ah_caps.keycache_size; 989 sc->sc_keymax = ah->ah_caps.keycache_size;
1134 if (sc->sc_keymax > ATH_KEYMAX) { 990 if (sc->sc_keymax > ATH_KEYMAX) {
@@ -1162,14 +1018,12 @@ int ath_init(u16 devid, struct ath_softc *sc)
1162 * is resposible for filtering this list based on settings 1018 * is resposible for filtering this list based on settings
1163 * like the phy mode. 1019 * like the phy mode.
1164 */ 1020 */
1165 rd = ah->ah_currentRD;
1166
1167 error = ath_setup_channels(sc); 1021 error = ath_setup_channels(sc);
1168 if (error) 1022 if (error)
1169 goto bad; 1023 goto bad;
1170 1024
1171 /* default to STA mode */ 1025 /* default to STA mode */
1172 sc->sc_opmode = ATH9K_M_MONITOR; 1026 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1173 1027
1174 /* Setup rate tables */ 1028 /* Setup rate tables */
1175 1029
@@ -1240,7 +1094,7 @@ int ath_init(u16 devid, struct ath_softc *sc)
1240 1094
1241 sc->sc_rc = ath_rate_attach(ah); 1095 sc->sc_rc = ath_rate_attach(ah);
1242 if (sc->sc_rc == NULL) { 1096 if (sc->sc_rc == NULL) {
1243 error = EIO; 1097 error = -EIO;
1244 goto bad2; 1098 goto bad2;
1245 } 1099 }
1246 1100
@@ -1280,20 +1134,13 @@ int ath_init(u16 devid, struct ath_softc *sc)
1280 1134
1281 /* 11n Capabilities */ 1135 /* 11n Capabilities */
1282 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 1136 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1283 sc->sc_txaggr = 1; 1137 sc->sc_flags |= SC_OP_TXAGGR;
1284 sc->sc_rxaggr = 1; 1138 sc->sc_flags |= SC_OP_RXAGGR;
1285 } 1139 }
1286 1140
1287 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; 1141 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1288 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; 1142 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1289 1143
1290 /* Configuration for rx chain detection */
1291 sc->sc_rxchaindetect_ref = 0;
1292 sc->sc_rxchaindetect_thresh5GHz = 35;
1293 sc->sc_rxchaindetect_thresh2GHz = 35;
1294 sc->sc_rxchaindetect_delta5GHz = 30;
1295 sc->sc_rxchaindetect_delta2GHz = 30;
1296
1297 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1144 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1298 sc->sc_defant = ath9k_hw_getdefantenna(ah); 1145 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1299 1146
@@ -1337,7 +1184,7 @@ void ath_deinit(struct ath_softc *sc)
1337 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); 1184 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1338 1185
1339 ath_stop(sc); 1186 ath_stop(sc);
1340 if (!sc->sc_invalid) 1187 if (!(sc->sc_flags & SC_OP_INVALID))
1341 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1188 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1342 ath_rate_detach(sc->sc_rc); 1189 ath_rate_detach(sc->sc_rc);
1343 /* cleanup tx queues */ 1190 /* cleanup tx queues */
@@ -1464,9 +1311,9 @@ void ath_newassoc(struct ath_softc *sc,
1464 /* if station reassociates, tear down the aggregation state. */ 1311 /* if station reassociates, tear down the aggregation state. */
1465 if (!isnew) { 1312 if (!isnew) {
1466 for (tidno = 0; tidno < WME_NUM_TID; tidno++) { 1313 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1467 if (sc->sc_txaggr) 1314 if (sc->sc_flags & SC_OP_TXAGGR)
1468 ath_tx_aggr_teardown(sc, an, tidno); 1315 ath_tx_aggr_teardown(sc, an, tidno);
1469 if (sc->sc_rxaggr) 1316 if (sc->sc_flags & SC_OP_RXAGGR)
1470 ath_rx_aggr_teardown(sc, an, tidno); 1317 ath_rx_aggr_teardown(sc, an, tidno);
1471 } 1318 }
1472 } 1319 }
@@ -1815,13 +1662,6 @@ void ath_descdma_cleanup(struct ath_softc *sc,
1815/* Utilities */ 1662/* Utilities */
1816/*************/ 1663/*************/
1817 1664
1818void ath_internal_reset(struct ath_softc *sc)
1819{
1820 ath_reset_start(sc, 0);
1821 ath_reset(sc);
1822 ath_reset_end(sc, 0);
1823}
1824
1825int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 1665int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1826{ 1666{
1827 int qnum; 1667 int qnum;
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 673b3d81133a..b66de29cf662 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -39,6 +39,8 @@
39#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <net/mac80211.h> 41#include <net/mac80211.h>
42#include <linux/leds.h>
43#include <linux/rfkill.h>
42 44
43#include "ath9k.h" 45#include "ath9k.h"
44#include "rc.h" 46#include "rc.h"
@@ -79,12 +81,12 @@ struct ath_node;
79 } \ 81 } \
80 } while (0) 82 } while (0)
81 83
84#define TSF_TO_TU(_h,_l) \
85 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
86
82/* XXX: remove */ 87/* XXX: remove */
83#define memzero(_buf, _len) memset(_buf, 0, _len) 88#define memzero(_buf, _len) memset(_buf, 0, _len)
84 89
85#define get_dma_mem_context(var, field) (&((var)->field))
86#define copy_dma_mem_context(dst, src) (*dst = *src)
87
88#define ATH9K_BH_STATUS_INTACT 0 90#define ATH9K_BH_STATUS_INTACT 0
89#define ATH9K_BH_STATUS_CHANGE 1 91#define ATH9K_BH_STATUS_CHANGE 1
90 92
@@ -95,6 +97,8 @@ static inline unsigned long get_timestamp(void)
95 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ); 97 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
96} 98}
97 99
100static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
101
98/*************/ 102/*************/
99/* Debugging */ 103/* Debugging */
100/*************/ 104/*************/
@@ -175,11 +179,6 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht);
175/* Descriptor Management */ 179/* Descriptor Management */
176/*************************/ 180/*************************/
177 181
178/* Number of descriptors per buffer. The only case where we see skbuff
179chains is due to FF aggregation in the driver. */
180#define ATH_TXDESC 1
181/* if there's more fragment for this MSDU */
182#define ATH_BF_MORE_MPDU 1
183#define ATH_TXBUF_RESET(_bf) do { \ 182#define ATH_TXBUF_RESET(_bf) do { \
184 (_bf)->bf_status = 0; \ 183 (_bf)->bf_status = 0; \
185 (_bf)->bf_lastbf = NULL; \ 184 (_bf)->bf_lastbf = NULL; \
@@ -189,28 +188,29 @@ chains is due to FF aggregation in the driver. */
189 sizeof(struct ath_buf_state)); \ 188 sizeof(struct ath_buf_state)); \
190 } while (0) 189 } while (0)
191 190
191enum buffer_type {
192 BUF_DATA = BIT(0),
193 BUF_AGGR = BIT(1),
194 BUF_AMPDU = BIT(2),
195 BUF_HT = BIT(3),
196 BUF_RETRY = BIT(4),
197 BUF_XRETRY = BIT(5),
198 BUF_SHORT_PREAMBLE = BIT(6),
199 BUF_BAR = BIT(7),
200 BUF_PSPOLL = BIT(8),
201 BUF_AGGR_BURST = BIT(9),
202 BUF_CALC_AIRTIME = BIT(10),
203};
204
192struct ath_buf_state { 205struct ath_buf_state {
193 int bfs_nframes; /* # frames in aggregate */ 206 int bfs_nframes; /* # frames in aggregate */
194 u16 bfs_al; /* length of aggregate */ 207 u16 bfs_al; /* length of aggregate */
195 u16 bfs_frmlen; /* length of frame */ 208 u16 bfs_frmlen; /* length of frame */
196 int bfs_seqno; /* sequence number */ 209 int bfs_seqno; /* sequence number */
197 int bfs_tidno; /* tid of this frame */ 210 int bfs_tidno; /* tid of this frame */
198 int bfs_retries; /* current retries */ 211 int bfs_retries; /* current retries */
199 struct ath_rc_series bfs_rcs[4]; /* rate series */ 212 struct ath_rc_series bfs_rcs[4]; /* rate series */
200 u8 bfs_isdata:1; /* is a data frame/aggregate */ 213 u32 bf_type; /* BUF_* (enum buffer_type) */
201 u8 bfs_isaggr:1; /* is an aggregate */
202 u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
203 u8 bfs_ht:1; /* is an HT frame */
204 u8 bfs_isretried:1; /* is retried */
205 u8 bfs_isxretried:1; /* is excessive retried */
206 u8 bfs_shpreamble:1; /* is short preamble */
207 u8 bfs_isbar:1; /* is a BAR */
208 u8 bfs_ispspoll:1; /* is a PS-Poll */
209 u8 bfs_aggrburst:1; /* is a aggr burst */
210 u8 bfs_calcairtime:1; /* requests airtime be calculated
211 when set for tx frame */
212 int bfs_rifsburst_elem; /* RIFS burst/bar */
213 int bfs_nrifsubframes; /* # of elements in burst */
214 /* key type use to encrypt this frame */ 214 /* key type use to encrypt this frame */
215 enum ath9k_key_type bfs_keytype; 215 enum ath9k_key_type bfs_keytype;
216}; 216};
@@ -222,26 +222,22 @@ struct ath_buf_state {
222#define bf_seqno bf_state.bfs_seqno 222#define bf_seqno bf_state.bfs_seqno
223#define bf_tidno bf_state.bfs_tidno 223#define bf_tidno bf_state.bfs_tidno
224#define bf_rcs bf_state.bfs_rcs 224#define bf_rcs bf_state.bfs_rcs
225#define bf_isdata bf_state.bfs_isdata
226#define bf_isaggr bf_state.bfs_isaggr
227#define bf_isampdu bf_state.bfs_isampdu
228#define bf_ht bf_state.bfs_ht
229#define bf_isretried bf_state.bfs_isretried
230#define bf_isxretried bf_state.bfs_isxretried
231#define bf_shpreamble bf_state.bfs_shpreamble
232#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
233#define bf_nrifsubframes bf_state.bfs_nrifsubframes
234#define bf_keytype bf_state.bfs_keytype 225#define bf_keytype bf_state.bfs_keytype
235#define bf_isbar bf_state.bfs_isbar 226#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA)
236#define bf_ispspoll bf_state.bfs_ispspoll 227#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
237#define bf_aggrburst bf_state.bfs_aggrburst 228#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
238#define bf_calcairtime bf_state.bfs_calcairtime 229#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
230#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
231#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
232#define bf_isshpreamble(bf) (bf->bf_state.bf_type & BUF_SHORT_PREAMBLE)
233#define bf_isbar(bf) (bf->bf_state.bf_type & BUF_BAR)
234#define bf_ispspoll(bf) (bf->bf_state.bf_type & BUF_PSPOLL)
235#define bf_isaggrburst(bf) (bf->bf_state.bf_type & BUF_AGGR_BURST)
239 236
240/* 237/*
241 * Abstraction of a contiguous buffer to transmit/receive. There is only 238 * Abstraction of a contiguous buffer to transmit/receive. There is only
242 * a single hw descriptor encapsulated here. 239 * a single hw descriptor encapsulated here.
243 */ 240 */
244
245struct ath_buf { 241struct ath_buf {
246 struct list_head list; 242 struct list_head list;
247 struct list_head *last; 243 struct list_head *last;
@@ -391,10 +387,10 @@ int ath_rx_input(struct ath_softc *sc,
391 struct sk_buff *skb, 387 struct sk_buff *skb,
392 struct ath_recv_status *rx_status, 388 struct ath_recv_status *rx_status,
393 enum ATH_RX_TYPE *status); 389 enum ATH_RX_TYPE *status);
394int ath__rx_indicate(struct ath_softc *sc, 390int _ath_rx_indicate(struct ath_softc *sc,
395 struct sk_buff *skb, 391 struct sk_buff *skb,
396 struct ath_recv_status *status, 392 struct ath_recv_status *status,
397 u16 keyix); 393 u16 keyix);
398int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb, 394int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
399 struct ath_recv_status *status); 395 struct ath_recv_status *status);
400 396
@@ -402,8 +398,7 @@ int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
402/* TX */ 398/* TX */
403/******/ 399/******/
404 400
405#define ATH_FRAG_PER_MSDU 1 401#define ATH_TXBUF 512
406#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
407/* max number of transmit attempts (tries) */ 402/* max number of transmit attempts (tries) */
408#define ATH_TXMAXTRY 13 403#define ATH_TXMAXTRY 13
409/* max number of 11n transmit attempts (tries) */ 404/* max number of 11n transmit attempts (tries) */
@@ -522,7 +517,6 @@ struct ath_tx_control {
522 u32 keyix; 517 u32 keyix;
523 int min_rate; 518 int min_rate;
524 int mcast_rate; 519 int mcast_rate;
525 u16 nextfraglen;
526 struct ath_softc *dev; 520 struct ath_softc *dev;
527 dma_addr_t dmacontext; 521 dma_addr_t dmacontext;
528}; 522};
@@ -557,10 +551,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_setup(struct ath_softc *sc, int haltype); 551int ath_tx_setup(struct ath_softc *sc, int haltype);
558void ath_draintxq(struct ath_softc *sc, bool retry_tx); 552void ath_draintxq(struct ath_softc *sc, bool retry_tx);
559void ath_tx_draintxq(struct ath_softc *sc, 553void ath_tx_draintxq(struct ath_softc *sc,
560 struct ath_txq *txq, bool retry_tx); 554 struct ath_txq *txq, bool retry_tx);
561void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); 555void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
562void ath_tx_node_cleanup(struct ath_softc *sc, 556void ath_tx_node_cleanup(struct ath_softc *sc,
563 struct ath_node *an, bool bh_flag); 557 struct ath_node *an, bool bh_flag);
564void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an); 558void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
565void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); 559void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
566int ath_tx_init(struct ath_softc *sc, int nbufs); 560int ath_tx_init(struct ath_softc *sc, int nbufs);
@@ -575,6 +569,7 @@ u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
575void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); 569void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
576void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 570void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
577 struct ath_xmit_status *tx_status, struct ath_node *an); 571 struct ath_xmit_status *tx_status, struct ath_node *an);
572void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
578 573
579/**********************/ 574/**********************/
580/* Node / Aggregation */ 575/* Node / Aggregation */
@@ -585,7 +580,6 @@ void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
585/* indicates the node is 80211 power save */ 580/* indicates the node is 80211 power save */
586#define ATH_NODE_PWRSAVE 0x2 581#define ATH_NODE_PWRSAVE 0x2
587 582
588#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
589#define ADDBA_EXCHANGE_ATTEMPTS 10 583#define ADDBA_EXCHANGE_ATTEMPTS 10
590#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */ 584#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
591#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 585#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
@@ -705,9 +699,6 @@ struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
705#define ATH_BCBUF 4 /* number of beacon buffers */ 699#define ATH_BCBUF 4 /* number of beacon buffers */
706#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */ 700#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
707#define ATH_DEFAULT_BMISS_LIMIT 10 701#define ATH_DEFAULT_BMISS_LIMIT 10
708#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
709#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
710#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
711#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 702#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
712 703
713/* beacon configuration */ 704/* beacon configuration */
@@ -724,30 +715,16 @@ struct ath_beacon_config {
724 } u; /* last received beacon/probe response timestamp of this BSS. */ 715 } u; /* last received beacon/probe response timestamp of this BSS. */
725}; 716};
726 717
727/* offsets in a beacon frame for
728 * quick acess of beacon content by low-level driver */
729struct ath_beacon_offset {
730 u8 *bo_tim; /* start of atim/dtim */
731};
732
733void ath9k_beacon_tasklet(unsigned long data); 718void ath9k_beacon_tasklet(unsigned long data);
734void ath_beacon_config(struct ath_softc *sc, int if_id); 719void ath_beacon_config(struct ath_softc *sc, int if_id);
735int ath_beaconq_setup(struct ath_hal *ah); 720int ath_beaconq_setup(struct ath_hal *ah);
736int ath_beacon_alloc(struct ath_softc *sc, int if_id); 721int ath_beacon_alloc(struct ath_softc *sc, int if_id);
737void ath_bstuck_process(struct ath_softc *sc); 722void ath_bstuck_process(struct ath_softc *sc);
738void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
739void ath_beacon_free(struct ath_softc *sc);
740void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp); 723void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
741void ath_beacon_sync(struct ath_softc *sc, int if_id); 724void ath_beacon_sync(struct ath_softc *sc, int if_id);
742void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
743void ath_get_beaconconfig(struct ath_softc *sc, 725void ath_get_beaconconfig(struct ath_softc *sc,
744 int if_id, 726 int if_id,
745 struct ath_beacon_config *conf); 727 struct ath_beacon_config *conf);
746int ath_update_beacon(struct ath_softc *sc,
747 int if_id,
748 struct ath_beacon_offset *bo,
749 struct sk_buff *skb,
750 int mcast);
751/********/ 728/********/
752/* VAPs */ 729/* VAPs */
753/********/ 730/********/
@@ -774,10 +751,8 @@ struct ath_vap {
774 struct ieee80211_vif *av_if_data; 751 struct ieee80211_vif *av_if_data;
775 enum ath9k_opmode av_opmode; /* VAP operational mode */ 752 enum ath9k_opmode av_opmode; /* VAP operational mode */
776 struct ath_buf *av_bcbuf; /* beacon buffer */ 753 struct ath_buf *av_bcbuf; /* beacon buffer */
777 struct ath_beacon_offset av_boff; /* dynamic update state */
778 struct ath_tx_control av_btxctl; /* txctl information for beacon */ 754 struct ath_tx_control av_btxctl; /* txctl information for beacon */
779 int av_bslot; /* beacon slot index */ 755 int av_bslot; /* beacon slot index */
780 struct ath_txq av_mcastq; /* multicast transmit queue */
781 struct ath_vap_config av_config;/* vap configuration parameters*/ 756 struct ath_vap_config av_config;/* vap configuration parameters*/
782 struct ath_rate_node *rc_node; 757 struct ath_rate_node *rc_node;
783}; 758};
@@ -788,8 +763,7 @@ int ath_vap_attach(struct ath_softc *sc,
788 enum ath9k_opmode opmode); 763 enum ath9k_opmode opmode);
789int ath_vap_detach(struct ath_softc *sc, int if_id); 764int ath_vap_detach(struct ath_softc *sc, int if_id);
790int ath_vap_config(struct ath_softc *sc, 765int ath_vap_config(struct ath_softc *sc,
791 int if_id, struct ath_vap_config *if_config); 766 int if_id, struct ath_vap_config *if_config);
792int ath_vap_listen(struct ath_softc *sc, int if_id);
793 767
794/*********************/ 768/*********************/
795/* Antenna diversity */ 769/* Antenna diversity */
@@ -830,6 +804,36 @@ void ath_slow_ant_div(struct ath_antdiv *antdiv,
830void ath_setdefantenna(void *sc, u32 antenna); 804void ath_setdefantenna(void *sc, u32 antenna);
831 805
832/********************/ 806/********************/
807/* LED Control */
808/********************/
809
810#define ATH_LED_PIN 1
811
812enum ath_led_type {
813 ATH_LED_RADIO,
814 ATH_LED_ASSOC,
815 ATH_LED_TX,
816 ATH_LED_RX
817};
818
819struct ath_led {
820 struct ath_softc *sc;
821 struct led_classdev led_cdev;
822 enum ath_led_type led_type;
823 char name[32];
824 bool registered;
825};
826
827/* Rfkill */
828#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
829
830struct ath_rfkill {
831 struct rfkill *rfkill;
832 struct delayed_work rfkill_poll;
833 char rfkill_name[32];
834};
835
836/********************/
833/* Main driver core */ 837/* Main driver core */
834/********************/ 838/********************/
835 839
@@ -841,11 +845,7 @@ void ath_setdefantenna(void *sc, u32 antenna);
841#define ATH_DEFAULT_NOISE_FLOOR -95 845#define ATH_DEFAULT_NOISE_FLOOR -95
842#define ATH_REGCLASSIDS_MAX 10 846#define ATH_REGCLASSIDS_MAX 10
843#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 847#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
844#define ATH_PREAMBLE_SHORT (1<<0)
845#define ATH_PROTECT_ENABLE (1<<1)
846#define ATH_MAX_SW_RETRIES 10 848#define ATH_MAX_SW_RETRIES 10
847/* Num farmes difference in tx to flip default recv */
848#define ATH_ANTENNA_DIFF 2
849#define ATH_CHAN_MAX 255 849#define ATH_CHAN_MAX 255
850#define IEEE80211_WEP_NKID 4 /* number of key ids */ 850#define IEEE80211_WEP_NKID 4 /* number of key ids */
851#define IEEE80211_RATE_VAL 0x7f 851#define IEEE80211_RATE_VAL 0x7f
@@ -859,9 +859,7 @@ void ath_setdefantenna(void *sc, u32 antenna);
859 */ 859 */
860#define ATH_KEYMAX 128 /* max key cache size we handle */ 860#define ATH_KEYMAX 128 /* max key cache size we handle */
861 861
862#define RESET_RETRY_TXQ 0x00000001
863#define ATH_IF_ID_ANY 0xff 862#define ATH_IF_ID_ANY 0xff
864
865#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 863#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
866 864
867#define RSSI_LPF_THRESHOLD -20 865#define RSSI_LPF_THRESHOLD -20
@@ -907,60 +905,64 @@ struct ath_ht_info {
907 u8 ext_chan_offset; 905 u8 ext_chan_offset;
908}; 906};
909 907
908#define SC_OP_INVALID BIT(0)
909#define SC_OP_BEACONS BIT(1)
910#define SC_OP_RXAGGR BIT(2)
911#define SC_OP_TXAGGR BIT(3)
912#define SC_OP_CHAINMASK_UPDATE BIT(4)
913#define SC_OP_FULL_RESET BIT(5)
914#define SC_OP_NO_RESET BIT(6)
915#define SC_OP_PREAMBLE_SHORT BIT(7)
916#define SC_OP_PROTECT_ENABLE BIT(8)
917#define SC_OP_RXFLUSH BIT(9)
918#define SC_OP_LED_ASSOCIATED BIT(10)
919#define SC_OP_RFKILL_REGISTERED BIT(11)
920#define SC_OP_RFKILL_SW_BLOCKED BIT(12)
921#define SC_OP_RFKILL_HW_BLOCKED BIT(13)
922
910struct ath_softc { 923struct ath_softc {
911 struct ieee80211_hw *hw; 924 struct ieee80211_hw *hw;
912 struct pci_dev *pdev; 925 struct pci_dev *pdev;
913 void __iomem *mem;
914 struct tasklet_struct intr_tq; 926 struct tasklet_struct intr_tq;
915 struct tasklet_struct bcon_tasklet; 927 struct tasklet_struct bcon_tasklet;
916 struct ath_config sc_config; /* load-time parameters */ 928 struct ath_config sc_config;
917 int sc_debug;
918 struct ath_hal *sc_ah; 929 struct ath_hal *sc_ah;
919 struct ath_rate_softc *sc_rc; /* tx rate control support */ 930 struct ath_rate_softc *sc_rc;
931 void __iomem *mem;
932
933 u8 sc_curbssid[ETH_ALEN];
934 u8 sc_myaddr[ETH_ALEN];
935 u8 sc_bssidmask[ETH_ALEN];
936
937 int sc_debug;
920 u32 sc_intrstatus; 938 u32 sc_intrstatus;
921 enum ath9k_opmode sc_opmode; /* current operating mode */ 939 u32 sc_flags; /* SC_OP_* */
922 940 unsigned int rx_filter;
923 u8 sc_invalid; /* being detached */
924 u8 sc_beacons; /* beacons running */
925 u8 sc_scanning; /* scanning active */
926 u8 sc_txaggr; /* enable 11n tx aggregation */
927 u8 sc_rxaggr; /* enable 11n rx aggregation */
928 u8 sc_update_chainmask; /* change chain mask */
929 u8 sc_full_reset; /* force full reset */
930 enum wireless_mode sc_curmode; /* current phy mode */
931 u16 sc_curtxpow; 941 u16 sc_curtxpow;
932 u16 sc_curaid; 942 u16 sc_curaid;
933 u8 sc_curbssid[ETH_ALEN]; 943 u16 sc_cachelsz;
934 u8 sc_myaddr[ETH_ALEN]; 944 int sc_slotupdate; /* slot to next advance fsm */
945 int sc_slottime;
946 int sc_bslot[ATH_BCBUF];
947 u8 sc_tx_chainmask;
948 u8 sc_rx_chainmask;
949 enum ath9k_int sc_imask;
950 enum wireless_mode sc_curmode; /* current phy mode */
935 enum PROT_MODE sc_protmode; 951 enum PROT_MODE sc_protmode;
936 u8 sc_mcastantenna; 952
937 u8 sc_txantenna; /* data tx antenna (fixed or auto) */
938 u8 sc_nbcnvaps; /* # of vaps sending beacons */ 953 u8 sc_nbcnvaps; /* # of vaps sending beacons */
939 u16 sc_nvaps; /* # of active virtual ap's */ 954 u16 sc_nvaps; /* # of active virtual ap's */
940 struct ath_vap *sc_vaps[ATH_BCBUF]; 955 struct ath_vap *sc_vaps[ATH_BCBUF];
941 enum ath9k_int sc_imask; 956
942 u8 sc_bssidmask[ETH_ALEN]; 957 u8 sc_mcastantenna;
943 u8 sc_defant; /* current default antenna */ 958 u8 sc_defant; /* current default antenna */
944 u8 sc_rxotherant; /* rx's on non-default antenna */ 959 u8 sc_rxotherant; /* rx's on non-default antenna */
945 u16 sc_cachelsz; 960
946 int sc_slotupdate; /* slot to next advance fsm */
947 int sc_slottime;
948 u8 sc_noreset;
949 int sc_bslot[ATH_BCBUF];
950 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */ 961 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
951 struct list_head node_list; 962 struct list_head node_list;
952 struct ath_ht_info sc_ht_info; 963 struct ath_ht_info sc_ht_info;
953 int16_t sc_noise_floor; /* signal noise floor in dBm */
954 enum ath9k_ht_extprotspacing sc_ht_extprotspacing; 964 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
955 u8 sc_tx_chainmask; 965
956 u8 sc_rx_chainmask;
957 u8 sc_rxchaindetect_ref;
958 u8 sc_rxchaindetect_thresh5GHz;
959 u8 sc_rxchaindetect_thresh2GHz;
960 u8 sc_rxchaindetect_delta5GHz;
961 u8 sc_rxchaindetect_delta2GHz;
962 u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
963 u32 sc_flags;
964#ifdef CONFIG_SLOW_ANT_DIV 966#ifdef CONFIG_SLOW_ANT_DIV
965 struct ath_antdiv sc_antdiv; 967 struct ath_antdiv sc_antdiv;
966#endif 968#endif
@@ -981,8 +983,6 @@ struct ath_softc {
981 struct ath_descdma sc_rxdma; 983 struct ath_descdma sc_rxdma;
982 int sc_rxbufsize; /* rx size based on mtu */ 984 int sc_rxbufsize; /* rx size based on mtu */
983 u32 *sc_rxlink; /* link ptr in last RX desc */ 985 u32 *sc_rxlink; /* link ptr in last RX desc */
984 u32 sc_rxflush; /* rx flush in progress */
985 u64 sc_lastrx; /* tsf of last rx'd frame */
986 986
987 /* TX */ 987 /* TX */
988 struct list_head sc_txbuf; 988 struct list_head sc_txbuf;
@@ -991,7 +991,7 @@ struct ath_softc {
991 u32 sc_txqsetup; 991 u32 sc_txqsetup;
992 u32 sc_txintrperiod; /* tx interrupt batching */ 992 u32 sc_txintrperiod; /* tx interrupt batching */
993 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */ 993 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
994 u32 sc_ant_tx[8]; /* recent tx frames/antenna */ 994 u16 seq_no; /* TX sequence number */
995 995
996 /* Beacon */ 996 /* Beacon */
997 struct ath9k_tx_queue_info sc_beacon_qi; 997 struct ath9k_tx_queue_info sc_beacon_qi;
@@ -1015,7 +1015,6 @@ struct ath_softc {
1015 /* Channel, Band */ 1015 /* Channel, Band */
1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; 1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1018 struct ath9k_channel sc_curchan;
1019 1018
1020 /* Locks */ 1019 /* Locks */
1021 spinlock_t sc_rxflushlock; 1020 spinlock_t sc_rxflushlock;
@@ -1023,6 +1022,15 @@ struct ath_softc {
1023 spinlock_t sc_txbuflock; 1022 spinlock_t sc_txbuflock;
1024 spinlock_t sc_resetlock; 1023 spinlock_t sc_resetlock;
1025 spinlock_t node_lock; 1024 spinlock_t node_lock;
1025
1026 /* LEDs */
1027 struct ath_led radio_led;
1028 struct ath_led assoc_led;
1029 struct ath_led tx_led;
1030 struct ath_led rx_led;
1031
1032 /* Rfkill */
1033 struct ath_rfkill rf_kill;
1026}; 1034};
1027 1035
1028int ath_init(u16 devid, struct ath_softc *sc); 1036int ath_init(u16 devid, struct ath_softc *sc);
@@ -1030,14 +1038,8 @@ void ath_deinit(struct ath_softc *sc);
1030int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan); 1038int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1031int ath_suspend(struct ath_softc *sc); 1039int ath_suspend(struct ath_softc *sc);
1032irqreturn_t ath_isr(int irq, void *dev); 1040irqreturn_t ath_isr(int irq, void *dev);
1033int ath_reset(struct ath_softc *sc); 1041int ath_reset(struct ath_softc *sc, bool retry_tx);
1034void ath_scan_start(struct ath_softc *sc);
1035void ath_scan_end(struct ath_softc *sc);
1036int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan); 1042int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1037void ath_setup_rate(struct ath_softc *sc,
1038 enum wireless_mode wMode,
1039 enum RATE_TYPE type,
1040 const struct ath9k_rate_table *rt);
1041 1043
1042/*********************/ 1044/*********************/
1043/* Utility Functions */ 1045/* Utility Functions */
@@ -1056,17 +1058,5 @@ int ath_cabq_update(struct ath_softc *);
1056void ath_get_currentCountry(struct ath_softc *sc, 1058void ath_get_currentCountry(struct ath_softc *sc,
1057 struct ath9k_country_entry *ctry); 1059 struct ath9k_country_entry *ctry);
1058u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp); 1060u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1059void ath_internal_reset(struct ath_softc *sc);
1060u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
1061dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1062 struct sk_buff *skb,
1063 int direction,
1064 dma_addr_t *pa);
1065void ath_skb_unmap_single(struct ath_softc *sc,
1066 struct sk_buff *skb,
1067 int direction,
1068 dma_addr_t *pa);
1069void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1070enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1071 1061
1072#endif /* CORE_H */ 1062#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index a17eb130f574..0251e59f2f84 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -85,29 +85,6 @@ static const struct hal_percal_data adc_init_dc_cal = {
85 ath9k_hw_adc_dccal_calibrate 85 ath9k_hw_adc_dccal_calibrate
86}; 86};
87 87
88static const struct ath_hal ar5416hal = {
89 AR5416_MAGIC,
90 0,
91 0,
92 NULL,
93 NULL,
94 CTRY_DEFAULT,
95 0,
96 0,
97 0,
98 0,
99 0,
100 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 },
109};
110
111static struct ath9k_rate_table ar5416_11a_table = { 88static struct ath9k_rate_table ar5416_11a_table = {
112 8, 89 8,
113 {0}, 90 {0},
@@ -371,7 +348,7 @@ static void ath9k_hw_set_defaults(struct ath_hal *ah)
371 ah->ah_config.intr_mitigation = 0; 348 ah->ah_config.intr_mitigation = 0;
372} 349}
373 350
374static inline void ath9k_hw_override_ini(struct ath_hal *ah, 351static void ath9k_hw_override_ini(struct ath_hal *ah,
375 struct ath9k_channel *chan) 352 struct ath9k_channel *chan)
376{ 353{
377 if (!AR_SREV_5416_V20_OR_LATER(ah) 354 if (!AR_SREV_5416_V20_OR_LATER(ah)
@@ -381,8 +358,8 @@ static inline void ath9k_hw_override_ini(struct ath_hal *ah,
381 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); 358 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
382} 359}
383 360
384static inline void ath9k_hw_init_bb(struct ath_hal *ah, 361static void ath9k_hw_init_bb(struct ath_hal *ah,
385 struct ath9k_channel *chan) 362 struct ath9k_channel *chan)
386{ 363{
387 u32 synthDelay; 364 u32 synthDelay;
388 365
@@ -397,8 +374,8 @@ static inline void ath9k_hw_init_bb(struct ath_hal *ah,
397 udelay(synthDelay + BASE_ACTIVATE_DELAY); 374 udelay(synthDelay + BASE_ACTIVATE_DELAY);
398} 375}
399 376
400static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah, 377static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
401 enum ath9k_opmode opmode) 378 enum ath9k_opmode opmode)
402{ 379{
403 struct ath_hal_5416 *ahp = AH5416(ah); 380 struct ath_hal_5416 *ahp = AH5416(ah);
404 381
@@ -428,7 +405,7 @@ static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
428 } 405 }
429} 406}
430 407
431static inline void ath9k_hw_init_qos(struct ath_hal *ah) 408static void ath9k_hw_init_qos(struct ath_hal *ah)
432{ 409{
433 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 410 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
434 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 411 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -523,7 +500,7 @@ static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
523 return ath9k_hw_eeprom_read(ah, off, data); 500 return ath9k_hw_eeprom_read(ah, off, data);
524} 501}
525 502
526static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah) 503static bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
527{ 504{
528 struct ath_hal_5416 *ahp = AH5416(ah); 505 struct ath_hal_5416 *ahp = AH5416(ah);
529 struct ar5416_eeprom *eep = &ahp->ah_eeprom; 506 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
@@ -790,7 +767,7 @@ ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
790 return true; 767 return true;
791} 768}
792 769
793static inline int ath9k_hw_check_eeprom(struct ath_hal *ah) 770static int ath9k_hw_check_eeprom(struct ath_hal *ah)
794{ 771{
795 u32 sum = 0, el; 772 u32 sum = 0, el;
796 u16 *eepdata; 773 u16 *eepdata;
@@ -1196,11 +1173,12 @@ static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1196 1173
1197 ah = &ahp->ah; 1174 ah = &ahp->ah;
1198 1175
1199 memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
1200
1201 ah->ah_sc = sc; 1176 ah->ah_sc = sc;
1202 ah->ah_sh = mem; 1177 ah->ah_sh = mem;
1203 1178
1179 ah->ah_magic = AR5416_MAGIC;
1180 ah->ah_countryCode = CTRY_DEFAULT;
1181
1204 ah->ah_devid = devid; 1182 ah->ah_devid = devid;
1205 ah->ah_subvendorid = 0; 1183 ah->ah_subvendorid = 0;
1206 1184
@@ -1294,7 +1272,7 @@ u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1294 } 1272 }
1295} 1273}
1296 1274
1297static inline int ath9k_hw_get_radiorev(struct ath_hal *ah) 1275static int ath9k_hw_get_radiorev(struct ath_hal *ah)
1298{ 1276{
1299 u32 val; 1277 u32 val;
1300 int i; 1278 int i;
@@ -1307,7 +1285,7 @@ static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
1307 return ath9k_hw_reverse_bits(val, 8); 1285 return ath9k_hw_reverse_bits(val, 8);
1308} 1286}
1309 1287
1310static inline int ath9k_hw_init_macaddr(struct ath_hal *ah) 1288static int ath9k_hw_init_macaddr(struct ath_hal *ah)
1311{ 1289{
1312 u32 sum; 1290 u32 sum;
1313 int i; 1291 int i;
@@ -1389,7 +1367,7 @@ static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1389 return spur_val; 1367 return spur_val;
1390} 1368}
1391 1369
1392static inline int ath9k_hw_rfattach(struct ath_hal *ah) 1370static int ath9k_hw_rfattach(struct ath_hal *ah)
1393{ 1371{
1394 bool rfStatus = false; 1372 bool rfStatus = false;
1395 int ecode = 0; 1373 int ecode = 0;
@@ -1434,8 +1412,8 @@ static int ath9k_hw_rf_claim(struct ath_hal *ah)
1434 return 0; 1412 return 0;
1435} 1413}
1436 1414
1437static inline void ath9k_hw_init_pll(struct ath_hal *ah, 1415static void ath9k_hw_init_pll(struct ath_hal *ah,
1438 struct ath9k_channel *chan) 1416 struct ath9k_channel *chan)
1439{ 1417{
1440 u32 pll; 1418 u32 pll;
1441 1419
@@ -1553,7 +1531,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1553 } 1531 }
1554} 1532}
1555 1533
1556static inline void 1534static void
1557ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan) 1535ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1558{ 1536{
1559 u32 rfMode = 0; 1537 u32 rfMode = 0;
@@ -1623,7 +1601,7 @@ static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1623 return true; 1601 return true;
1624} 1602}
1625 1603
1626static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah) 1604static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1627{ 1605{
1628 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1606 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1629 AR_RTC_FORCE_WAKE_ON_INT); 1607 AR_RTC_FORCE_WAKE_ON_INT);
@@ -1664,7 +1642,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1664 } 1642 }
1665} 1643}
1666 1644
1667static inline 1645static
1668struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah, 1646struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1669 struct ath9k_channel *chan) 1647 struct ath9k_channel *chan)
1670{ 1648{
@@ -2098,7 +2076,7 @@ static void ath9k_hw_ani_attach(struct ath_hal *ah)
2098 ahp->ah_procPhyErr |= HAL_PROCESS_ANI; 2076 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2099} 2077}
2100 2078
2101static inline void ath9k_hw_ani_setup(struct ath_hal *ah) 2079static void ath9k_hw_ani_setup(struct ath_hal *ah)
2102{ 2080{
2103 struct ath_hal_5416 *ahp = AH5416(ah); 2081 struct ath_hal_5416 *ahp = AH5416(ah);
2104 int i; 2082 int i;
@@ -2822,32 +2800,11 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2822 } 2800 }
2823} 2801}
2824 2802
2825static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio, 2803void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2826 enum ath9k_gpio_output_mux_type 2804 u32 ah_signal_type)
2827 halSignalType)
2828{ 2805{
2829 u32 ah_signal_type;
2830 u32 gpio_shift; 2806 u32 gpio_shift;
2831 2807
2832 static u32 MuxSignalConversionTable[] = {
2833
2834 AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
2835
2836 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
2837
2838 AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
2839
2840 AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
2841
2842 AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
2843 };
2844
2845 if ((halSignalType >= 0)
2846 && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
2847 ah_signal_type = MuxSignalConversionTable[halSignalType];
2848 else
2849 return false;
2850
2851 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); 2808 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2852 2809
2853 gpio_shift = 2 * gpio; 2810 gpio_shift = 2 * gpio;
@@ -2856,19 +2813,46 @@ static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2856 AR_GPIO_OE_OUT, 2813 AR_GPIO_OE_OUT,
2857 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 2814 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2858 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 2815 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2859
2860 return true;
2861} 2816}
2862 2817
2863static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, 2818void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val)
2864 u32 val)
2865{ 2819{
2866 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 2820 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2867 AR_GPIO_BIT(gpio)); 2821 AR_GPIO_BIT(gpio));
2868 return true;
2869} 2822}
2870 2823
2871static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio) 2824/*
2825 * Configure GPIO Input lines
2826 */
2827void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
2828{
2829 u32 gpio_shift;
2830
2831 ASSERT(gpio < ah->ah_caps.num_gpio_pins);
2832
2833 gpio_shift = gpio << 1;
2834
2835 REG_RMW(ah,
2836 AR_GPIO_OE_OUT,
2837 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
2838 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2839}
2840
2841#ifdef CONFIG_RFKILL
2842static void ath9k_enable_rfkill(struct ath_hal *ah)
2843{
2844 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
2845 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
2846
2847 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
2848 AR_GPIO_INPUT_MUX2_RFSILENT);
2849
2850 ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio);
2851 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
2852}
2853#endif
2854
2855u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2872{ 2856{
2873 if (gpio >= ah->ah_caps.num_gpio_pins) 2857 if (gpio >= ah->ah_caps.num_gpio_pins)
2874 return 0xffffffff; 2858 return 0xffffffff;
@@ -2883,7 +2867,7 @@ static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2883 } 2867 }
2884} 2868}
2885 2869
2886static inline int ath9k_hw_post_attach(struct ath_hal *ah) 2870static int ath9k_hw_post_attach(struct ath_hal *ah)
2887{ 2871{
2888 int ecode; 2872 int ecode;
2889 2873
@@ -3081,17 +3065,17 @@ static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3081 3065
3082 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; 3066 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3083 3067
3068#ifdef CONFIG_RFKILL
3084 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT); 3069 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3085 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) { 3070 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3086 ahp->ah_gpioSelect = 3071 ah->ah_rfkill_gpio =
3087 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL); 3072 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3088 ahp->ah_polarity = 3073 ah->ah_rfkill_polarity =
3089 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY); 3074 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3090 3075
3091 ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
3092 NULL);
3093 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 3076 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3094 } 3077 }
3078#endif
3095 3079
3096 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) || 3080 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) || 3081 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
@@ -3595,7 +3579,7 @@ static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
3595 return true; 3579 return true;
3596} 3580}
3597 3581
3598static inline void 3582static void
3599ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah, 3583ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3600 struct ath9k_channel *chan, 3584 struct ath9k_channel *chan,
3601 struct cal_data_per_freq *pRawDataSet, 3585 struct cal_data_per_freq *pRawDataSet,
@@ -3777,7 +3761,7 @@ ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3777 return; 3761 return;
3778} 3762}
3779 3763
3780static inline bool 3764static bool
3781ath9k_hw_set_power_cal_table(struct ath_hal *ah, 3765ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3782 struct ar5416_eeprom *pEepData, 3766 struct ar5416_eeprom *pEepData,
3783 struct ath9k_channel *chan, 3767 struct ath9k_channel *chan,
@@ -3980,7 +3964,7 @@ void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
3980 } 3964 }
3981} 3965}
3982 3966
3983static inline void 3967static void
3984ath9k_hw_get_legacy_target_powers(struct ath_hal *ah, 3968ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3985 struct ath9k_channel *chan, 3969 struct ath9k_channel *chan,
3986 struct cal_target_power_leg *powInfo, 3970 struct cal_target_power_leg *powInfo,
@@ -4046,7 +4030,7 @@ ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
4046 } 4030 }
4047} 4031}
4048 4032
4049static inline void 4033static void
4050ath9k_hw_get_target_powers(struct ath_hal *ah, 4034ath9k_hw_get_target_powers(struct ath_hal *ah,
4051 struct ath9k_channel *chan, 4035 struct ath9k_channel *chan,
4052 struct cal_target_power_ht *powInfo, 4036 struct cal_target_power_ht *powInfo,
@@ -4113,7 +4097,7 @@ ath9k_hw_get_target_powers(struct ath_hal *ah,
4113 } 4097 }
4114} 4098}
4115 4099
4116static inline u16 4100static u16
4117ath9k_hw_get_max_edge_power(u16 freq, 4101ath9k_hw_get_max_edge_power(u16 freq,
4118 struct cal_ctl_edges *pRdEdgesPower, 4102 struct cal_ctl_edges *pRdEdgesPower,
4119 bool is2GHz) 4103 bool is2GHz)
@@ -4143,7 +4127,7 @@ ath9k_hw_get_max_edge_power(u16 freq,
4143 return twiceMaxEdgePower; 4127 return twiceMaxEdgePower;
4144} 4128}
4145 4129
4146static inline bool 4130static bool
4147ath9k_hw_set_power_per_rate_table(struct ath_hal *ah, 4131ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
4148 struct ar5416_eeprom *pEepData, 4132 struct ar5416_eeprom *pEepData,
4149 struct ath9k_channel *chan, 4133 struct ath9k_channel *chan,
@@ -5122,7 +5106,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
5122 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 5106 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5123} 5107}
5124 5108
5125static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah) 5109static void ath9k_hw_init_chain_masks(struct ath_hal *ah)
5126{ 5110{
5127 struct ath_hal_5416 *ahp = AH5416(ah); 5111 struct ath_hal_5416 *ahp = AH5416(ah);
5128 int rx_chainmask, tx_chainmask; 5112 int rx_chainmask, tx_chainmask;
@@ -5326,7 +5310,7 @@ bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5326 } 5310 }
5327} 5311}
5328 5312
5329static inline void ath9k_hw_init_user_settings(struct ath_hal *ah) 5313static void ath9k_hw_init_user_settings(struct ath_hal *ah)
5330{ 5314{
5331 struct ath_hal_5416 *ahp = AH5416(ah); 5315 struct ath_hal_5416 *ahp = AH5416(ah);
5332 5316
@@ -5345,7 +5329,7 @@ static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
5345 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout); 5329 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5346} 5330}
5347 5331
5348static inline int 5332static int
5349ath9k_hw_process_ini(struct ath_hal *ah, 5333ath9k_hw_process_ini(struct ath_hal *ah,
5350 struct ath9k_channel *chan, 5334 struct ath9k_channel *chan,
5351 enum ath9k_ht_macmode macmode) 5335 enum ath9k_ht_macmode macmode)
@@ -5476,7 +5460,7 @@ ath9k_hw_process_ini(struct ath_hal *ah,
5476 return 0; 5460 return 0;
5477} 5461}
5478 5462
5479static inline void ath9k_hw_setup_calibration(struct ath_hal *ah, 5463static void ath9k_hw_setup_calibration(struct ath_hal *ah,
5480 struct hal_cal_list *currCal) 5464 struct hal_cal_list *currCal)
5481{ 5465{
5482 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), 5466 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
@@ -5512,8 +5496,8 @@ static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
5512 AR_PHY_TIMING_CTRL4_DO_CAL); 5496 AR_PHY_TIMING_CTRL4_DO_CAL);
5513} 5497}
5514 5498
5515static inline void ath9k_hw_reset_calibration(struct ath_hal *ah, 5499static void ath9k_hw_reset_calibration(struct ath_hal *ah,
5516 struct hal_cal_list *currCal) 5500 struct hal_cal_list *currCal)
5517{ 5501{
5518 struct ath_hal_5416 *ahp = AH5416(ah); 5502 struct ath_hal_5416 *ahp = AH5416(ah);
5519 int i; 5503 int i;
@@ -5532,7 +5516,7 @@ static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
5532 ahp->ah_CalSamples = 0; 5516 ahp->ah_CalSamples = 0;
5533} 5517}
5534 5518
5535static inline void 5519static void
5536ath9k_hw_per_calibration(struct ath_hal *ah, 5520ath9k_hw_per_calibration(struct ath_hal *ah,
5537 struct ath9k_channel *ichan, 5521 struct ath9k_channel *ichan,
5538 u8 rxchainmask, 5522 u8 rxchainmask,
@@ -5622,7 +5606,7 @@ static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5622 return true; 5606 return true;
5623} 5607}
5624 5608
5625static inline bool 5609static bool
5626ath9k_hw_channel_change(struct ath_hal *ah, 5610ath9k_hw_channel_change(struct ath_hal *ah,
5627 struct ath9k_channel *chan, 5611 struct ath9k_channel *chan,
5628 enum ath9k_ht_macmode macmode) 5612 enum ath9k_ht_macmode macmode)
@@ -5799,8 +5783,8 @@ static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5799 return retval; 5783 return retval;
5800} 5784}
5801 5785
5802static inline bool ath9k_hw_init_cal(struct ath_hal *ah, 5786static bool ath9k_hw_init_cal(struct ath_hal *ah,
5803 struct ath9k_channel *chan) 5787 struct ath9k_channel *chan)
5804{ 5788{
5805 struct ath_hal_5416 *ahp = AH5416(ah); 5789 struct ath_hal_5416 *ahp = AH5416(ah);
5806 struct ath9k_channel *ichan = 5790 struct ath9k_channel *ichan =
@@ -5861,7 +5845,7 @@ static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
5861} 5845}
5862 5846
5863 5847
5864bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, 5848bool ath9k_hw_reset(struct ath_hal *ah,
5865 struct ath9k_channel *chan, 5849 struct ath9k_channel *chan,
5866 enum ath9k_ht_macmode macmode, 5850 enum ath9k_ht_macmode macmode,
5867 u8 txchainmask, u8 rxchainmask, 5851 u8 txchainmask, u8 rxchainmask,
@@ -5945,7 +5929,7 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5945 else 5929 else
5946 ath9k_hw_set_gpio(ah, 9, 1); 5930 ath9k_hw_set_gpio(ah, 9, 1);
5947 } 5931 }
5948 ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT); 5932 ath9k_hw_cfg_output(ah, 9, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
5949 } 5933 }
5950 5934
5951 ecode = ath9k_hw_process_ini(ah, chan, macmode); 5935 ecode = ath9k_hw_process_ini(ah, chan, macmode);
@@ -5975,7 +5959,7 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5975 | (ah->ah_config. 5959 | (ah->ah_config.
5976 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 5960 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
5977 | ahp->ah_staId1Defaults); 5961 | ahp->ah_staId1Defaults);
5978 ath9k_hw_set_operating_mode(ah, opmode); 5962 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
5979 5963
5980 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); 5964 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
5981 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4)); 5965 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
@@ -6005,13 +5989,15 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
6005 for (i = 0; i < ah->ah_caps.total_queues; i++) 5989 for (i = 0; i < ah->ah_caps.total_queues; i++)
6006 ath9k_hw_resettxqueue(ah, i); 5990 ath9k_hw_resettxqueue(ah, i);
6007 5991
6008 ath9k_hw_init_interrupt_masks(ah, opmode); 5992 ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode);
6009 ath9k_hw_init_qos(ah); 5993 ath9k_hw_init_qos(ah);
6010 5994
5995#ifdef CONFIG_RFKILL
5996 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
5997 ath9k_enable_rfkill(ah);
5998#endif
6011 ath9k_hw_init_user_settings(ah); 5999 ath9k_hw_init_user_settings(ah);
6012 6000
6013 ah->ah_opmode = opmode;
6014
6015 REG_WRITE(ah, AR_STA_ID1, 6001 REG_WRITE(ah, AR_STA_ID1,
6016 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM); 6002 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
6017 6003
@@ -6539,31 +6525,6 @@ ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
6539 return true; 6525 return true;
6540} 6526}
6541 6527
6542#ifdef CONFIG_ATH9K_RFKILL
6543static void ath9k_enable_rfkill(struct ath_hal *ah)
6544{
6545 struct ath_hal_5416 *ahp = AH5416(ah);
6546
6547 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
6548 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
6549
6550 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
6551 AR_GPIO_INPUT_MUX2_RFSILENT);
6552
6553 ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
6554 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
6555
6556 if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
6557
6558 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6559 !ahp->ah_gpioBit);
6560 } else {
6561 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6562 ahp->ah_gpioBit);
6563 }
6564}
6565#endif
6566
6567void 6528void
6568ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, 6529ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
6569 u16 assocId) 6530 u16 assocId)
@@ -7285,15 +7246,15 @@ ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
7285 } 7246 }
7286 break; 7247 break;
7287 case ATH9K_CIPHER_WEP: 7248 case ATH9K_CIPHER_WEP:
7288 if (k->kv_len < 40 / NBBY) { 7249 if (k->kv_len < LEN_WEP40) {
7289 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, 7250 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7290 "%s: WEP key length %u too small\n", 7251 "%s: WEP key length %u too small\n",
7291 __func__, k->kv_len); 7252 __func__, k->kv_len);
7292 return false; 7253 return false;
7293 } 7254 }
7294 if (k->kv_len <= 40 / NBBY) 7255 if (k->kv_len <= LEN_WEP40)
7295 keyType = AR_KEYTABLE_TYPE_40; 7256 keyType = AR_KEYTABLE_TYPE_40;
7296 else if (k->kv_len <= 104 / NBBY) 7257 else if (k->kv_len <= LEN_WEP104)
7297 keyType = AR_KEYTABLE_TYPE_104; 7258 keyType = AR_KEYTABLE_TYPE_104;
7298 else 7259 else
7299 keyType = AR_KEYTABLE_TYPE_128; 7260 keyType = AR_KEYTABLE_TYPE_128;
@@ -7313,7 +7274,7 @@ ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
7313 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask; 7274 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
7314 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff; 7275 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
7315 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask; 7276 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
7316 if (k->kv_len <= 104 / NBBY) 7277 if (k->kv_len <= LEN_WEP104)
7317 key4 &= 0xff; 7278 key4 &= 0xff;
7318 7279
7319 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { 7280 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
@@ -7678,8 +7639,7 @@ bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
7678 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 7639 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
7679 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) 7640 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
7680 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) 7641 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
7681 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH) 7642 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
7682 );
7683 7643
7684 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 7644 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7685 REG_WRITE(ah, AR_DMISC(q), 7645 REG_WRITE(ah, AR_DMISC(q),
@@ -8324,15 +8284,7 @@ struct ath_hal *ath9k_hw_attach(u16 devid,
8324 *error = -ENXIO; 8284 *error = -ENXIO;
8325 break; 8285 break;
8326 } 8286 }
8327 if (ah != NULL) { 8287
8328 ah->ah_devid = ah->ah_devid;
8329 ah->ah_subvendorid = ah->ah_subvendorid;
8330 ah->ah_macVersion = ah->ah_macVersion;
8331 ah->ah_macRev = ah->ah_macRev;
8332 ah->ah_phyRev = ah->ah_phyRev;
8333 ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
8334 ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
8335 }
8336 return ah; 8288 return ah;
8337} 8289}
8338 8290
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index ae680f21ba7e..2113818ee934 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -314,14 +314,11 @@ struct ar5416_desc {
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \ 314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \ 315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF) 316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
318 MS(ads->ds_rxstatus3, AR_Parallel40) : \
319 (ads->ds_rxstatus3 >> 10) & 0x1)
320 317
321#define set11nTries(_series, _index) \ 318#define set11nTries(_series, _index) \
322 (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) 319 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
323 320
324#define set11nRate(_series, _index) \ 321#define set11nRate(_series, _index) \
325 (SM((_series)[_index].Rate, AR_XmitRate##_index)) 322 (SM((_series)[_index].Rate, AR_XmitRate##_index))
326 323
327#define set11nPktDurRTSCTS(_series, _index) \ 324#define set11nPktDurRTSCTS(_series, _index) \
@@ -330,11 +327,11 @@ struct ar5416_desc {
330 AR_RTSCTSQual##_index : 0)) 327 AR_RTSCTSQual##_index : 0))
331 328
332#define set11nRateFlags(_series, _index) \ 329#define set11nRateFlags(_series, _index) \
333 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \ 330 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
334 AR_2040_##_index : 0) \ 331 AR_2040_##_index : 0) \
335 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ 332 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
336 AR_GI##_index : 0) \ 333 AR_GI##_index : 0) \
337 |SM((_series)[_index].ChSel, AR_ChainSel##_index)) 334 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
338 335
339#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100) 336#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
340 337
@@ -346,9 +343,6 @@ struct ar5416_desc {
346#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1) 343#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
347#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD 344#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
348 345
349#define NUM_CORNER_FIX_BITS_2133 7
350#define CCK_OFDM_GAIN_DELTA 15
351
352struct ar5416AniState { 346struct ar5416AniState {
353 struct ath9k_channel c; 347 struct ath9k_channel c;
354 u8 noiseImmunityLevel; 348 u8 noiseImmunityLevel;
@@ -377,11 +371,8 @@ struct ar5416AniState {
377}; 371};
378 372
379#define HAL_PROCESS_ANI 0x00000001 373#define HAL_PROCESS_ANI 0x00000001
380#define HAL_RADAR_EN 0x80000000
381#define HAL_AR_EN 0x40000000
382
383#define DO_ANI(ah) \ 374#define DO_ANI(ah) \
384 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI)) 375 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
385 376
386struct ar5416Stats { 377struct ar5416Stats {
387 u32 ast_ani_niup; 378 u32 ast_ani_niup;
@@ -425,7 +416,6 @@ struct ar5416Stats {
425#define AR5416_EEP_MINOR_VER_7 0x7 416#define AR5416_EEP_MINOR_VER_7 0x7
426#define AR5416_EEP_MINOR_VER_9 0x9 417#define AR5416_EEP_MINOR_VER_9 0x9
427 418
428#define AR5416_EEP_START_LOC 256
429#define AR5416_NUM_5G_CAL_PIERS 8 419#define AR5416_NUM_5G_CAL_PIERS 8
430#define AR5416_NUM_2G_CAL_PIERS 4 420#define AR5416_NUM_2G_CAL_PIERS 4
431#define AR5416_NUM_5G_20_TARGET_POWERS 8 421#define AR5416_NUM_5G_20_TARGET_POWERS 8
@@ -441,25 +431,10 @@ struct ar5416Stats {
441#define AR5416_EEPROM_MODAL_SPURS 5 431#define AR5416_EEPROM_MODAL_SPURS 5
442#define AR5416_MAX_RATE_POWER 63 432#define AR5416_MAX_RATE_POWER 63
443#define AR5416_NUM_PDADC_VALUES 128 433#define AR5416_NUM_PDADC_VALUES 128
444#define AR5416_NUM_RATES 16
445#define AR5416_BCHAN_UNUSED 0xFF 434#define AR5416_BCHAN_UNUSED 0xFF
446#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 435#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
447#define AR5416_EEPMISC_BIG_ENDIAN 0x01
448#define AR5416_MAX_CHAINS 3 436#define AR5416_MAX_CHAINS 3
449#define AR5416_ANT_16S 25
450
451#define AR5416_NUM_ANT_CHAIN_FIELDS 7
452#define AR5416_NUM_ANT_COMMON_FIELDS 4
453#define AR5416_SIZE_ANT_CHAIN_FIELD 3
454#define AR5416_SIZE_ANT_COMMON_FIELD 4
455#define AR5416_ANT_CHAIN_MASK 0x7
456#define AR5416_ANT_COMMON_MASK 0xf
457#define AR5416_CHAIN_0_IDX 0
458#define AR5416_CHAIN_1_IDX 1
459#define AR5416_CHAIN_2_IDX 2
460
461#define AR5416_PWR_TABLE_OFFSET -5 437#define AR5416_PWR_TABLE_OFFSET -5
462#define AR5416_LEGACY_CHAINMASK 1
463 438
464enum eeprom_param { 439enum eeprom_param {
465 EEP_NFTHRESH_5, 440 EEP_NFTHRESH_5,
@@ -633,7 +608,7 @@ struct ar5416IniArray {
633}; 608};
634 609
635#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \ 610#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
636 (iniarray)->ia_array = (u32 *)(array); \ 611 (iniarray)->ia_array = (u32 *)(array); \
637 (iniarray)->ia_rows = (rows); \ 612 (iniarray)->ia_rows = (rows); \
638 (iniarray)->ia_columns = (columns); \ 613 (iniarray)->ia_columns = (columns); \
639 } while (0) 614 } while (0)
@@ -641,16 +616,16 @@ struct ar5416IniArray {
641#define INI_RA(iniarray, row, column) \ 616#define INI_RA(iniarray, row, column) \
642 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)]) 617 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
643 618
644#define INIT_CAL(_perCal) do { \ 619#define INIT_CAL(_perCal) do { \
645 (_perCal)->calState = CAL_WAITING; \ 620 (_perCal)->calState = CAL_WAITING; \
646 (_perCal)->calNext = NULL; \ 621 (_perCal)->calNext = NULL; \
647 } while (0) 622 } while (0)
648 623
649#define INSERT_CAL(_ahp, _perCal) \ 624#define INSERT_CAL(_ahp, _perCal) \
650 do { \ 625 do { \
651 if ((_ahp)->ah_cal_list_last == NULL) { \ 626 if ((_ahp)->ah_cal_list_last == NULL) { \
652 (_ahp)->ah_cal_list = \ 627 (_ahp)->ah_cal_list = \
653 (_ahp)->ah_cal_list_last = (_perCal); \ 628 (_ahp)->ah_cal_list_last = (_perCal); \
654 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ 629 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
655 } else { \ 630 } else { \
656 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ 631 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
@@ -696,25 +671,29 @@ struct hal_cal_list {
696struct ath_hal_5416 { 671struct ath_hal_5416 {
697 struct ath_hal ah; 672 struct ath_hal ah;
698 struct ar5416_eeprom ah_eeprom; 673 struct ar5416_eeprom ah_eeprom;
674 struct ar5416Stats ah_stats;
675 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
676 void __iomem *ah_cal_mem;
677
699 u8 ah_macaddr[ETH_ALEN]; 678 u8 ah_macaddr[ETH_ALEN];
700 u8 ah_bssid[ETH_ALEN]; 679 u8 ah_bssid[ETH_ALEN];
701 u8 ah_bssidmask[ETH_ALEN]; 680 u8 ah_bssidmask[ETH_ALEN];
702 u16 ah_assocId; 681 u16 ah_assocId;
682
703 int16_t ah_curchanRadIndex; 683 int16_t ah_curchanRadIndex;
704 u32 ah_maskReg; 684 u32 ah_maskReg;
705 struct ar5416Stats ah_stats;
706 u32 ah_txDescMask;
707 u32 ah_txOkInterruptMask; 685 u32 ah_txOkInterruptMask;
708 u32 ah_txErrInterruptMask; 686 u32 ah_txErrInterruptMask;
709 u32 ah_txDescInterruptMask; 687 u32 ah_txDescInterruptMask;
710 u32 ah_txEolInterruptMask; 688 u32 ah_txEolInterruptMask;
711 u32 ah_txUrnInterruptMask; 689 u32 ah_txUrnInterruptMask;
712 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
713 enum ath9k_power_mode ah_powerMode;
714 bool ah_chipFullSleep; 690 bool ah_chipFullSleep;
715 u32 ah_atimWindow; 691 u32 ah_atimWindow;
716 enum ath9k_ant_setting ah_diversityControl;
717 u16 ah_antennaSwitchSwap; 692 u16 ah_antennaSwitchSwap;
693 enum ath9k_power_mode ah_powerMode;
694 enum ath9k_ant_setting ah_diversityControl;
695
696 /* Calibration */
718 enum hal_cal_types ah_suppCals; 697 enum hal_cal_types ah_suppCals;
719 struct hal_cal_list ah_iqCalData; 698 struct hal_cal_list ah_iqCalData;
720 struct hal_cal_list ah_adcGainCalData; 699 struct hal_cal_list ah_adcGainCalData;
@@ -751,16 +730,16 @@ struct ath_hal_5416 {
751 int32_t sign[AR5416_MAX_CHAINS]; 730 int32_t sign[AR5416_MAX_CHAINS];
752 } ah_Meas3; 731 } ah_Meas3;
753 u16 ah_CalSamples; 732 u16 ah_CalSamples;
754 u32 ah_tx6PowerInHalfDbm; 733
755 u32 ah_staId1Defaults; 734 u32 ah_staId1Defaults;
756 u32 ah_miscMode; 735 u32 ah_miscMode;
757 bool ah_tpcEnabled;
758 u32 ah_beaconInterval;
759 enum { 736 enum {
760 AUTO_32KHZ, 737 AUTO_32KHZ,
761 USE_32KHZ, 738 USE_32KHZ,
762 DONT_USE_32KHZ, 739 DONT_USE_32KHZ,
763 } ah_enable32kHzClock; 740 } ah_enable32kHzClock;
741
742 /* RF */
764 u32 *ah_analogBank0Data; 743 u32 *ah_analogBank0Data;
765 u32 *ah_analogBank1Data; 744 u32 *ah_analogBank1Data;
766 u32 *ah_analogBank2Data; 745 u32 *ah_analogBank2Data;
@@ -770,8 +749,9 @@ struct ath_hal_5416 {
770 u32 *ah_analogBank7Data; 749 u32 *ah_analogBank7Data;
771 u32 *ah_addac5416_21; 750 u32 *ah_addac5416_21;
772 u32 *ah_bank6Temp; 751 u32 *ah_bank6Temp;
773 u32 ah_ofdmTxPower; 752
774 int16_t ah_txPowerIndexOffset; 753 int16_t ah_txPowerIndexOffset;
754 u32 ah_beaconInterval;
775 u32 ah_slottime; 755 u32 ah_slottime;
776 u32 ah_acktimeout; 756 u32 ah_acktimeout;
777 u32 ah_ctstimeout; 757 u32 ah_ctstimeout;
@@ -780,7 +760,8 @@ struct ath_hal_5416 {
780 u32 ah_gpioSelect; 760 u32 ah_gpioSelect;
781 u32 ah_polarity; 761 u32 ah_polarity;
782 u32 ah_gpioBit; 762 u32 ah_gpioBit;
783 bool ah_eepEnabled; 763
764 /* ANI */
784 u32 ah_procPhyErr; 765 u32 ah_procPhyErr;
785 bool ah_hasHwPhyCounters; 766 bool ah_hasHwPhyCounters;
786 u32 ah_aniPeriod; 767 u32 ah_aniPeriod;
@@ -790,18 +771,14 @@ struct ath_hal_5416 {
790 int ah_coarseHigh[5]; 771 int ah_coarseHigh[5];
791 int ah_coarseLow[5]; 772 int ah_coarseLow[5];
792 int ah_firpwr[5]; 773 int ah_firpwr[5];
793 u16 ah_ratesArray[16]; 774 enum ath9k_ani_cmd ah_ani_function;
775
794 u32 ah_intrTxqs; 776 u32 ah_intrTxqs;
795 bool ah_intrMitigation; 777 bool ah_intrMitigation;
796 u32 ah_cycleCount;
797 u32 ah_ctlBusy;
798 u32 ah_extBusy;
799 enum ath9k_ht_extprotspacing ah_extprotspacing; 778 enum ath9k_ht_extprotspacing ah_extprotspacing;
800 u8 ah_txchainmask; 779 u8 ah_txchainmask;
801 u8 ah_rxchainmask; 780 u8 ah_rxchainmask;
802 int ah_hwp; 781
803 void __iomem *ah_cal_mem;
804 enum ath9k_ani_cmd ah_ani_function;
805 struct ar5416IniArray ah_iniModes; 782 struct ar5416IniArray ah_iniModes;
806 struct ar5416IniArray ah_iniCommon; 783 struct ar5416IniArray ah_iniCommon;
807 struct ar5416IniArray ah_iniBank0; 784 struct ar5416IniArray ah_iniBank0;
@@ -820,10 +797,6 @@ struct ath_hal_5416 {
820 797
821#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 798#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
822 799
823#define IS_5416_EMU(ah) \
824 ((ah->ah_devid == AR5416_DEVID_EMU) || \
825 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
826
827#define ar5416RfDetach(ah) do { \ 800#define ar5416RfDetach(ah) do { \
828 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \ 801 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
829 AH5416(ah)->ah_rfHal.rfDetach(ah); \ 802 AH5416(ah)->ah_rfHal.rfDetach(ah); \
@@ -841,8 +814,8 @@ struct ath_hal_5416 {
841#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ 814#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
842 int r; \ 815 int r; \
843 for (r = 0; r < ((iniarray)->ia_rows); r++) { \ 816 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
844 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \ 817 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
845 INI_RA((iniarray), r, (column))); \ 818 INI_RA((iniarray), r, (column))); \
846 DO_DELAY(regWr); \ 819 DO_DELAY(regWr); \
847 } \ 820 } \
848 } while (0) 821 } while (0)
@@ -852,30 +825,21 @@ struct ath_hal_5416 {
852#define COEF_SCALE_S 24 825#define COEF_SCALE_S 24
853#define HT40_CHANNEL_CENTER_SHIFT 10 826#define HT40_CHANNEL_CENTER_SHIFT 10
854 827
855#define ar5416CheckOpMode(_opmode) \
856 ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
857 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
858
859#define AR5416_EEPROM_MAGIC_OFFSET 0x0 828#define AR5416_EEPROM_MAGIC_OFFSET 0x0
860 829
861#define AR5416_EEPROM_S 2 830#define AR5416_EEPROM_S 2
862#define AR5416_EEPROM_OFFSET 0x2000 831#define AR5416_EEPROM_OFFSET 0x2000
863#define AR5416_EEPROM_START_ADDR \ 832#define AR5416_EEPROM_START_ADDR \
864 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 833 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
865#define AR5416_EEPROM_MAX 0xae0 834#define AR5416_EEPROM_MAX 0xae0
866#define ar5416_get_eep_ver(_ahp) \ 835#define ar5416_get_eep_ver(_ahp) \
867 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF) 836 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
868#define ar5416_get_eep_rev(_ahp) \ 837#define ar5416_get_eep_rev(_ahp) \
869 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF) 838 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
870#define ar5416_get_ntxchains(_txchainmask) \ 839#define ar5416_get_ntxchains(_txchainmask) \
871 (((_txchainmask >> 2) & 1) + \ 840 (((_txchainmask >> 2) & 1) + \
872 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 841 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
873 842
874#define IS_EEP_MINOR_V3(_ahp) \
875 (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
876
877#define FIXED_CCA_THRESHOLD 15
878
879#ifdef __BIG_ENDIAN 843#ifdef __BIG_ENDIAN
880#define AR5416_EEPROM_MAGIC 0x5aa5 844#define AR5416_EEPROM_MAGIC 0x5aa5
881#else 845#else
@@ -910,8 +874,6 @@ struct ath_hal_5416 {
910#define AR_GPIOD_MASK 0x00001FFF 874#define AR_GPIOD_MASK 0x00001FFF
911#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 875#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
912 876
913#define MAX_ANALOG_START 319
914
915#define HAL_EP_RND(x, mul) \ 877#define HAL_EP_RND(x, mul) \
916 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 878 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
917#define BEACON_RSSI(ahp) \ 879#define BEACON_RSSI(ahp) \
@@ -923,8 +885,6 @@ struct ath_hal_5416 {
923#define AH_TIMEOUT 100000 885#define AH_TIMEOUT 100000
924#define AH_TIME_QUANTUM 10 886#define AH_TIME_QUANTUM 10
925 887
926#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
927
928#define AR_KEYTABLE_SIZE 128 888#define AR_KEYTABLE_SIZE 128
929#define POWER_UP_TIME 200000 889#define POWER_UP_TIME 200000
930 890
@@ -964,6 +924,6 @@ struct ath_hal_5416 {
964#define OFDM_SYMBOL_TIME_QUARTER 16 924#define OFDM_SYMBOL_TIME_QUARTER 16
965 925
966u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp, 926u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
967 enum eeprom_param param); 927 enum eeprom_param param);
968 928
969#endif 929#endif
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 2888778040e4..1ba18006f475 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -22,8 +22,6 @@
22#define ATH_PCI_VERSION "0.1" 22#define ATH_PCI_VERSION "0.1"
23 23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13 24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25#define IEEE80211_ACTION_CAT_HT 7
26#define IEEE80211_ACTION_HT_TXCHWIDTH 0
27 25
28static char *dev_info = "ath9k"; 26static char *dev_info = "ath9k";
29 27
@@ -142,7 +140,7 @@ static int ath_key_config(struct ath_softc *sc,
142 struct ath9k_keyval hk; 140 struct ath9k_keyval hk;
143 const u8 *mac = NULL; 141 const u8 *mac = NULL;
144 int ret = 0; 142 int ret = 0;
145 enum ieee80211_if_types opmode; 143 enum nl80211_iftype opmode;
146 144
147 memset(&hk, 0, sizeof(hk)); 145 memset(&hk, 0, sizeof(hk));
148 146
@@ -181,14 +179,14 @@ static int ath_key_config(struct ath_softc *sc,
181 */ 179 */
182 if (is_broadcast_ether_addr(addr)) { 180 if (is_broadcast_ether_addr(addr)) {
183 switch (opmode) { 181 switch (opmode) {
184 case IEEE80211_IF_TYPE_STA: 182 case NL80211_IFTYPE_STATION:
185 /* default key: could be group WPA key 183 /* default key: could be group WPA key
186 * or could be static WEP key */ 184 * or could be static WEP key */
187 mac = NULL; 185 mac = NULL;
188 break; 186 break;
189 case IEEE80211_IF_TYPE_IBSS: 187 case NL80211_IFTYPE_ADHOC:
190 break; 188 break;
191 case IEEE80211_IF_TYPE_AP: 189 case NL80211_IFTYPE_AP:
192 break; 190 break;
193 default: 191 default:
194 ASSERT(0); 192 ASSERT(0);
@@ -206,36 +204,32 @@ static int ath_key_config(struct ath_softc *sc,
206 if (!ret) 204 if (!ret)
207 return -EIO; 205 return -EIO;
208 206
209 sc->sc_keytype = hk.kv_type; 207 if (mac)
208 sc->sc_keytype = hk.kv_type;
210 return 0; 209 return 0;
211} 210}
212 211
213static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) 212static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
214{ 213{
215#define ATH_MAX_NUM_KEYS 4
216 int freeslot; 214 int freeslot;
217 215
218 freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0; 216 freeslot = (key->keyidx >= 4) ? 1 : 0;
219 ath_key_reset(sc, key->keyidx, freeslot); 217 ath_key_reset(sc, key->keyidx, freeslot);
220#undef ATH_MAX_NUM_KEYS
221} 218}
222 219
223static void setup_ht_cap(struct ieee80211_ht_info *ht_info) 220static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
224{ 221{
225/* Until mac80211 includes these fields */ 222#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
226 223#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
227#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
228#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
229#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
230 224
231 ht_info->ht_supported = 1; 225 ht_info->ht_supported = 1;
232 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH 226 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
233 |(u16)IEEE80211_HT_CAP_MIMO_PS 227 |(u16)IEEE80211_HT_CAP_SM_PS
234 |(u16)IEEE80211_HT_CAP_SGI_40 228 |(u16)IEEE80211_HT_CAP_SGI_40
235 |(u16)IEEE80211_HT_CAP_DSSSCCK40; 229 |(u16)IEEE80211_HT_CAP_DSSSCCK40;
236 230
237 ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536; 231 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
238 ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8; 232 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
239 /* setup supported mcs set */ 233 /* setup supported mcs set */
240 memset(ht_info->supp_mcs_set, 0, 16); 234 memset(ht_info->supp_mcs_set, 0, 16);
241 ht_info->supp_mcs_set[0] = 0xff; 235 ht_info->supp_mcs_set[0] = 0xff;
@@ -331,6 +325,693 @@ static u8 parse_mpdudensity(u8 mpdudensity)
331 } 325 }
332} 326}
333 327
328static void ath9k_ht_conf(struct ath_softc *sc,
329 struct ieee80211_bss_conf *bss_conf)
330{
331#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
332 struct ath_ht_info *ht_info = &sc->sc_ht_info;
333
334 if (bss_conf->assoc_ht) {
335 ht_info->ext_chan_offset =
336 bss_conf->ht_bss_conf->bss_cap &
337 IEEE80211_HT_IE_CHA_SEC_OFFSET;
338
339 if (!(bss_conf->ht_conf->cap &
340 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
341 (bss_conf->ht_bss_conf->bss_cap &
342 IEEE80211_HT_IE_CHA_WIDTH))
343 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
344 else
345 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
346
347 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
348 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
349 bss_conf->ht_conf->ampdu_factor);
350 ht_info->mpdudensity =
351 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
352
353 }
354
355#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
356}
357
358static void ath9k_bss_assoc_info(struct ath_softc *sc,
359 struct ieee80211_bss_conf *bss_conf)
360{
361 struct ieee80211_hw *hw = sc->hw;
362 struct ieee80211_channel *curchan = hw->conf.channel;
363 struct ath_vap *avp;
364 int pos;
365 DECLARE_MAC_BUF(mac);
366
367 if (bss_conf->assoc) {
368 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
369 __func__,
370 bss_conf->aid);
371
372 avp = sc->sc_vaps[0];
373 if (avp == NULL) {
374 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
375 __func__);
376 return;
377 }
378
379 /* New association, store aid */
380 if (avp->av_opmode == ATH9K_M_STA) {
381 sc->sc_curaid = bss_conf->aid;
382 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
383 sc->sc_curaid);
384 }
385
386 /* Configure the beacon */
387 ath_beacon_config(sc, 0);
388 sc->sc_flags |= SC_OP_BEACONS;
389
390 /* Reset rssi stats */
391 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
392 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
393 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
394 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
395
396 /* Update chainmask */
397 ath_update_chainmask(sc, bss_conf->assoc_ht);
398
399 DPRINTF(sc, ATH_DBG_CONFIG,
400 "%s: bssid %s aid 0x%x\n",
401 __func__,
402 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
403
404 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
405 __func__,
406 curchan->center_freq);
407
408 pos = ath_get_channel(sc, curchan);
409 if (pos == -1) {
410 DPRINTF(sc, ATH_DBG_FATAL,
411 "%s: Invalid channel\n", __func__);
412 return;
413 }
414
415 if (hw->conf.ht_conf.ht_supported)
416 sc->sc_ah->ah_channels[pos].chanmode =
417 ath_get_extchanmode(sc, curchan);
418 else
419 sc->sc_ah->ah_channels[pos].chanmode =
420 (curchan->band == IEEE80211_BAND_2GHZ) ?
421 CHANNEL_G : CHANNEL_A;
422
423 /* set h/w channel */
424 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
425 DPRINTF(sc, ATH_DBG_FATAL,
426 "%s: Unable to set channel\n",
427 __func__);
428
429 ath_rate_newstate(sc, avp);
430 /* Update ratectrl about the new state */
431 ath_rc_node_update(hw, avp->rc_node);
432 } else {
433 DPRINTF(sc, ATH_DBG_CONFIG,
434 "%s: Bss Info DISSOC\n", __func__);
435 sc->sc_curaid = 0;
436 }
437}
438
439void ath_get_beaconconfig(struct ath_softc *sc,
440 int if_id,
441 struct ath_beacon_config *conf)
442{
443 struct ieee80211_hw *hw = sc->hw;
444
445 /* fill in beacon config data */
446
447 conf->beacon_interval = hw->conf.beacon_int;
448 conf->listen_interval = 100;
449 conf->dtim_count = 1;
450 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
451}
452
453void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
454 struct ath_xmit_status *tx_status, struct ath_node *an)
455{
456 struct ieee80211_hw *hw = sc->hw;
457 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
458
459 DPRINTF(sc, ATH_DBG_XMIT,
460 "%s: TX complete: skb: %p\n", __func__, skb);
461
462 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
463 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
464 /* free driver's private data area of tx_info */
465 if (tx_info->driver_data[0] != NULL)
466 kfree(tx_info->driver_data[0]);
467 tx_info->driver_data[0] = NULL;
468 }
469
470 if (tx_status->flags & ATH_TX_BAR) {
471 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
472 tx_status->flags &= ~ATH_TX_BAR;
473 }
474
475 if (tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY)) {
476 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
477 /* Frame was not ACKed, but an ACK was expected */
478 tx_info->status.excessive_retries = 1;
479 }
480 } else {
481 /* Frame was ACKed */
482 tx_info->flags |= IEEE80211_TX_STAT_ACK;
483 }
484
485 tx_info->status.retry_count = tx_status->retries;
486
487 ieee80211_tx_status(hw, skb);
488 if (an)
489 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
490}
491
492int _ath_rx_indicate(struct ath_softc *sc,
493 struct sk_buff *skb,
494 struct ath_recv_status *status,
495 u16 keyix)
496{
497 struct ieee80211_hw *hw = sc->hw;
498 struct ath_node *an = NULL;
499 struct ieee80211_rx_status rx_status;
500 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
501 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
502 int padsize;
503 enum ATH_RX_TYPE st;
504
505 /* see if any padding is done by the hw and remove it */
506 if (hdrlen & 3) {
507 padsize = hdrlen % 4;
508 memmove(skb->data + padsize, skb->data, hdrlen);
509 skb_pull(skb, padsize);
510 }
511
512 /* Prepare rx status */
513 ath9k_rx_prepare(sc, skb, status, &rx_status);
514
515 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
516 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
517 rx_status.flag |= RX_FLAG_DECRYPTED;
518 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
519 && !(status->flags & ATH_RX_DECRYPT_ERROR)
520 && skb->len >= hdrlen + 4) {
521 keyix = skb->data[hdrlen + 3] >> 6;
522
523 if (test_bit(keyix, sc->sc_keymap))
524 rx_status.flag |= RX_FLAG_DECRYPTED;
525 }
526
527 spin_lock_bh(&sc->node_lock);
528 an = ath_node_find(sc, hdr->addr2);
529 spin_unlock_bh(&sc->node_lock);
530
531 if (an) {
532 ath_rx_input(sc, an,
533 hw->conf.ht_conf.ht_supported,
534 skb, status, &st);
535 }
536 if (!an || (st != ATH_RX_CONSUMED))
537 __ieee80211_rx(hw, skb, &rx_status);
538
539 return 0;
540}
541
542int ath_rx_subframe(struct ath_node *an,
543 struct sk_buff *skb,
544 struct ath_recv_status *status)
545{
546 struct ath_softc *sc = an->an_sc;
547 struct ieee80211_hw *hw = sc->hw;
548 struct ieee80211_rx_status rx_status;
549
550 /* Prepare rx status */
551 ath9k_rx_prepare(sc, skb, status, &rx_status);
552 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
553 rx_status.flag |= RX_FLAG_DECRYPTED;
554
555 __ieee80211_rx(hw, skb, &rx_status);
556
557 return 0;
558}
559
560/********************************/
561/* LED functions */
562/********************************/
563
564static void ath_led_brightness(struct led_classdev *led_cdev,
565 enum led_brightness brightness)
566{
567 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
568 struct ath_softc *sc = led->sc;
569
570 switch (brightness) {
571 case LED_OFF:
572 if (led->led_type == ATH_LED_ASSOC ||
573 led->led_type == ATH_LED_RADIO)
574 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
575 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
576 (led->led_type == ATH_LED_RADIO) ? 1 :
577 !!(sc->sc_flags & SC_OP_LED_ASSOCIATED));
578 break;
579 case LED_FULL:
580 if (led->led_type == ATH_LED_ASSOC)
581 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
582 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
583 break;
584 default:
585 break;
586 }
587}
588
589static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
590 char *trigger)
591{
592 int ret;
593
594 led->sc = sc;
595 led->led_cdev.name = led->name;
596 led->led_cdev.default_trigger = trigger;
597 led->led_cdev.brightness_set = ath_led_brightness;
598
599 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
600 if (ret)
601 DPRINTF(sc, ATH_DBG_FATAL,
602 "Failed to register led:%s", led->name);
603 else
604 led->registered = 1;
605 return ret;
606}
607
608static void ath_unregister_led(struct ath_led *led)
609{
610 if (led->registered) {
611 led_classdev_unregister(&led->led_cdev);
612 led->registered = 0;
613 }
614}
615
616static void ath_deinit_leds(struct ath_softc *sc)
617{
618 ath_unregister_led(&sc->assoc_led);
619 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
620 ath_unregister_led(&sc->tx_led);
621 ath_unregister_led(&sc->rx_led);
622 ath_unregister_led(&sc->radio_led);
623 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
624}
625
626static void ath_init_leds(struct ath_softc *sc)
627{
628 char *trigger;
629 int ret;
630
631 /* Configure gpio 1 for output */
632 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
633 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
634 /* LED off, active low */
635 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
636
637 trigger = ieee80211_get_radio_led_name(sc->hw);
638 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
639 "ath9k-%s:radio", wiphy_name(sc->hw->wiphy));
640 ret = ath_register_led(sc, &sc->radio_led, trigger);
641 sc->radio_led.led_type = ATH_LED_RADIO;
642 if (ret)
643 goto fail;
644
645 trigger = ieee80211_get_assoc_led_name(sc->hw);
646 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
647 "ath9k-%s:assoc", wiphy_name(sc->hw->wiphy));
648 ret = ath_register_led(sc, &sc->assoc_led, trigger);
649 sc->assoc_led.led_type = ATH_LED_ASSOC;
650 if (ret)
651 goto fail;
652
653 trigger = ieee80211_get_tx_led_name(sc->hw);
654 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
655 "ath9k-%s:tx", wiphy_name(sc->hw->wiphy));
656 ret = ath_register_led(sc, &sc->tx_led, trigger);
657 sc->tx_led.led_type = ATH_LED_TX;
658 if (ret)
659 goto fail;
660
661 trigger = ieee80211_get_rx_led_name(sc->hw);
662 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
663 "ath9k-%s:rx", wiphy_name(sc->hw->wiphy));
664 ret = ath_register_led(sc, &sc->rx_led, trigger);
665 sc->rx_led.led_type = ATH_LED_RX;
666 if (ret)
667 goto fail;
668
669 return;
670
671fail:
672 ath_deinit_leds(sc);
673}
674
675#ifdef CONFIG_RFKILL
676/*******************/
677/* Rfkill */
678/*******************/
679
680static void ath_radio_enable(struct ath_softc *sc)
681{
682 struct ath_hal *ah = sc->sc_ah;
683 int status;
684
685 spin_lock_bh(&sc->sc_resetlock);
686 if (!ath9k_hw_reset(ah, ah->ah_curchan,
687 sc->sc_ht_info.tx_chan_width,
688 sc->sc_tx_chainmask,
689 sc->sc_rx_chainmask,
690 sc->sc_ht_extprotspacing,
691 false, &status)) {
692 DPRINTF(sc, ATH_DBG_FATAL,
693 "%s: unable to reset channel %u (%uMhz) "
694 "flags 0x%x hal status %u\n", __func__,
695 ath9k_hw_mhz2ieee(ah,
696 ah->ah_curchan->channel,
697 ah->ah_curchan->channelFlags),
698 ah->ah_curchan->channel,
699 ah->ah_curchan->channelFlags, status);
700 }
701 spin_unlock_bh(&sc->sc_resetlock);
702
703 ath_update_txpow(sc);
704 if (ath_startrecv(sc) != 0) {
705 DPRINTF(sc, ATH_DBG_FATAL,
706 "%s: unable to restart recv logic\n", __func__);
707 return;
708 }
709
710 if (sc->sc_flags & SC_OP_BEACONS)
711 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
712
713 /* Re-Enable interrupts */
714 ath9k_hw_set_interrupts(ah, sc->sc_imask);
715
716 /* Enable LED */
717 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
718 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
719 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
720
721 ieee80211_wake_queues(sc->hw);
722}
723
724static void ath_radio_disable(struct ath_softc *sc)
725{
726 struct ath_hal *ah = sc->sc_ah;
727 int status;
728
729
730 ieee80211_stop_queues(sc->hw);
731
732 /* Disable LED */
733 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
734 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
735
736 /* Disable interrupts */
737 ath9k_hw_set_interrupts(ah, 0);
738
739 ath_draintxq(sc, false); /* clear pending tx frames */
740 ath_stoprecv(sc); /* turn off frame recv */
741 ath_flushrecv(sc); /* flush recv queue */
742
743 spin_lock_bh(&sc->sc_resetlock);
744 if (!ath9k_hw_reset(ah, ah->ah_curchan,
745 sc->sc_ht_info.tx_chan_width,
746 sc->sc_tx_chainmask,
747 sc->sc_rx_chainmask,
748 sc->sc_ht_extprotspacing,
749 false, &status)) {
750 DPRINTF(sc, ATH_DBG_FATAL,
751 "%s: unable to reset channel %u (%uMhz) "
752 "flags 0x%x hal status %u\n", __func__,
753 ath9k_hw_mhz2ieee(ah,
754 ah->ah_curchan->channel,
755 ah->ah_curchan->channelFlags),
756 ah->ah_curchan->channel,
757 ah->ah_curchan->channelFlags, status);
758 }
759 spin_unlock_bh(&sc->sc_resetlock);
760
761 ath9k_hw_phy_disable(ah);
762 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
763}
764
765static bool ath_is_rfkill_set(struct ath_softc *sc)
766{
767 struct ath_hal *ah = sc->sc_ah;
768
769 return ath9k_hw_gpio_get(ah, ah->ah_rfkill_gpio) ==
770 ah->ah_rfkill_polarity;
771}
772
773/* h/w rfkill poll function */
774static void ath_rfkill_poll(struct work_struct *work)
775{
776 struct ath_softc *sc = container_of(work, struct ath_softc,
777 rf_kill.rfkill_poll.work);
778 bool radio_on;
779
780 if (sc->sc_flags & SC_OP_INVALID)
781 return;
782
783 radio_on = !ath_is_rfkill_set(sc);
784
785 /*
786 * enable/disable radio only when there is a
787 * state change in RF switch
788 */
789 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
790 enum rfkill_state state;
791
792 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
793 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
794 : RFKILL_STATE_HARD_BLOCKED;
795 } else if (radio_on) {
796 ath_radio_enable(sc);
797 state = RFKILL_STATE_UNBLOCKED;
798 } else {
799 ath_radio_disable(sc);
800 state = RFKILL_STATE_HARD_BLOCKED;
801 }
802
803 if (state == RFKILL_STATE_HARD_BLOCKED)
804 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
805 else
806 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
807
808 rfkill_force_state(sc->rf_kill.rfkill, state);
809 }
810
811 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
812 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
813}
814
815/* s/w rfkill handler */
816static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
817{
818 struct ath_softc *sc = data;
819
820 switch (state) {
821 case RFKILL_STATE_SOFT_BLOCKED:
822 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
823 SC_OP_RFKILL_SW_BLOCKED)))
824 ath_radio_disable(sc);
825 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
826 return 0;
827 case RFKILL_STATE_UNBLOCKED:
828 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
829 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
830 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
831 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
832 "radio as it is disabled by h/w \n");
833 return -EPERM;
834 }
835 ath_radio_enable(sc);
836 }
837 return 0;
838 default:
839 return -EINVAL;
840 }
841}
842
843/* Init s/w rfkill */
844static int ath_init_sw_rfkill(struct ath_softc *sc)
845{
846 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
847 RFKILL_TYPE_WLAN);
848 if (!sc->rf_kill.rfkill) {
849 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
850 return -ENOMEM;
851 }
852
853 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
854 "ath9k-%s:rfkill", wiphy_name(sc->hw->wiphy));
855 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
856 sc->rf_kill.rfkill->data = sc;
857 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
858 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
859 sc->rf_kill.rfkill->user_claim_unsupported = 1;
860
861 return 0;
862}
863
864/* Deinitialize rfkill */
865static void ath_deinit_rfkill(struct ath_softc *sc)
866{
867 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
868 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
869
870 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
871 rfkill_unregister(sc->rf_kill.rfkill);
872 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
873 sc->rf_kill.rfkill = NULL;
874 }
875}
876#endif /* CONFIG_RFKILL */
877
878static int ath_detach(struct ath_softc *sc)
879{
880 struct ieee80211_hw *hw = sc->hw;
881
882 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
883
884 /* Deinit LED control */
885 ath_deinit_leds(sc);
886
887#ifdef CONFIG_RFKILL
888 /* deinit rfkill */
889 ath_deinit_rfkill(sc);
890#endif
891
892 /* Unregister hw */
893
894 ieee80211_unregister_hw(hw);
895
896 /* unregister Rate control */
897 ath_rate_control_unregister();
898
899 /* tx/rx cleanup */
900
901 ath_rx_cleanup(sc);
902 ath_tx_cleanup(sc);
903
904 /* Deinit */
905
906 ath_deinit(sc);
907
908 return 0;
909}
910
911static int ath_attach(u16 devid,
912 struct ath_softc *sc)
913{
914 struct ieee80211_hw *hw = sc->hw;
915 int error = 0;
916
917 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
918
919 error = ath_init(devid, sc);
920 if (error != 0)
921 return error;
922
923 /* Init nodes */
924
925 INIT_LIST_HEAD(&sc->node_list);
926 spin_lock_init(&sc->node_lock);
927
928 /* get mac address from hardware and set in mac80211 */
929
930 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
931
932 /* setup channels and rates */
933
934 sc->sbands[IEEE80211_BAND_2GHZ].channels =
935 sc->channels[IEEE80211_BAND_2GHZ];
936 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
937 sc->rates[IEEE80211_BAND_2GHZ];
938 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
939
940 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
941 /* Setup HT capabilities for 2.4Ghz*/
942 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
943
944 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
945 &sc->sbands[IEEE80211_BAND_2GHZ];
946
947 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
948 sc->sbands[IEEE80211_BAND_5GHZ].channels =
949 sc->channels[IEEE80211_BAND_5GHZ];
950 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
951 sc->rates[IEEE80211_BAND_5GHZ];
952 sc->sbands[IEEE80211_BAND_5GHZ].band =
953 IEEE80211_BAND_5GHZ;
954
955 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
956 /* Setup HT capabilities for 5Ghz*/
957 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
958
959 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
960 &sc->sbands[IEEE80211_BAND_5GHZ];
961 }
962
963 /* FIXME: Have to figure out proper hw init values later */
964
965 hw->queues = 4;
966 hw->ampdu_queues = 1;
967
968 /* Register rate control */
969 hw->rate_control_algorithm = "ath9k_rate_control";
970 error = ath_rate_control_register();
971 if (error != 0) {
972 DPRINTF(sc, ATH_DBG_FATAL,
973 "%s: Unable to register rate control "
974 "algorithm:%d\n", __func__, error);
975 ath_rate_control_unregister();
976 goto bad;
977 }
978
979 error = ieee80211_register_hw(hw);
980 if (error != 0) {
981 ath_rate_control_unregister();
982 goto bad;
983 }
984
985 /* Initialize LED control */
986 ath_init_leds(sc);
987
988#ifdef CONFIG_RFKILL
989 /* Initialze h/w Rfkill */
990 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
991 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
992
993 /* Initialize s/w rfkill */
994 if (ath_init_sw_rfkill(sc))
995 goto detach;
996#endif
997
998 /* initialize tx/rx engine */
999
1000 error = ath_tx_init(sc, ATH_TXBUF);
1001 if (error != 0)
1002 goto detach;
1003
1004 error = ath_rx_init(sc, ATH_RXBUF);
1005 if (error != 0)
1006 goto detach;
1007
1008 return 0;
1009detach:
1010 ath_detach(sc);
1011bad:
1012 return error;
1013}
1014
334static int ath9k_start(struct ieee80211_hw *hw) 1015static int ath9k_start(struct ieee80211_hw *hw)
335{ 1016{
336 struct ath_softc *sc = hw->priv; 1017 struct ath_softc *sc = hw->priv;
@@ -359,6 +1040,33 @@ static int ath9k_start(struct ieee80211_hw *hw)
359 return error; 1040 return error;
360 } 1041 }
361 1042
1043#ifdef CONFIG_RFKILL
1044 /* Start rfkill polling */
1045 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1046 queue_delayed_work(sc->hw->workqueue,
1047 &sc->rf_kill.rfkill_poll, 0);
1048
1049 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1050 if (rfkill_register(sc->rf_kill.rfkill)) {
1051 DPRINTF(sc, ATH_DBG_FATAL,
1052 "Unable to register rfkill\n");
1053 rfkill_free(sc->rf_kill.rfkill);
1054
1055 /* Deinitialize the device */
1056 if (sc->pdev->irq)
1057 free_irq(sc->pdev->irq, sc);
1058 ath_detach(sc);
1059 pci_iounmap(sc->pdev, sc->mem);
1060 pci_release_region(sc->pdev, 0);
1061 pci_disable_device(sc->pdev);
1062 ieee80211_free_hw(hw);
1063 return -EIO;
1064 } else {
1065 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1066 }
1067 }
1068#endif
1069
362 ieee80211_wake_queues(hw); 1070 ieee80211_wake_queues(hw);
363 return 0; 1071 return 0;
364} 1072}
@@ -368,6 +1076,20 @@ static int ath9k_tx(struct ieee80211_hw *hw,
368{ 1076{
369 struct ath_softc *sc = hw->priv; 1077 struct ath_softc *sc = hw->priv;
370 int hdrlen, padsize; 1078 int hdrlen, padsize;
1079 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1080
1081 /*
1082 * As a temporary workaround, assign seq# here; this will likely need
1083 * to be cleaned up to work better with Beacon transmission and virtual
1084 * BSSes.
1085 */
1086 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1087 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1088 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1089 sc->seq_no += 0x10;
1090 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1091 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
1092 }
371 1093
372 /* Add the padding after the header if this is not already done */ 1094 /* Add the padding after the header if this is not already done */
373 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1095 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -406,6 +1128,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
406 "%s: Device is no longer present\n", __func__); 1128 "%s: Device is no longer present\n", __func__);
407 1129
408 ieee80211_stop_queues(hw); 1130 ieee80211_stop_queues(hw);
1131
1132#ifdef CONFIG_RFKILL
1133 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1134 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1135#endif
409} 1136}
410 1137
411static int ath9k_add_interface(struct ieee80211_hw *hw, 1138static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -420,16 +1147,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
420 return -ENOBUFS; 1147 return -ENOBUFS;
421 1148
422 switch (conf->type) { 1149 switch (conf->type) {
423 case IEEE80211_IF_TYPE_STA: 1150 case NL80211_IFTYPE_STATION:
424 ic_opmode = ATH9K_M_STA; 1151 ic_opmode = ATH9K_M_STA;
425 break; 1152 break;
426 case IEEE80211_IF_TYPE_IBSS: 1153 case NL80211_IFTYPE_ADHOC:
427 ic_opmode = ATH9K_M_IBSS; 1154 ic_opmode = ATH9K_M_IBSS;
428 break; 1155 break;
1156 case NL80211_IFTYPE_AP:
1157 ic_opmode = ATH9K_M_HOSTAP;
1158 break;
429 default: 1159 default:
430 DPRINTF(sc, ATH_DBG_FATAL, 1160 DPRINTF(sc, ATH_DBG_FATAL,
431 "%s: Only STA and IBSS are supported currently\n", 1161 "%s: Interface type %d not yet supported\n",
432 __func__); 1162 __func__, conf->type);
433 return -EOPNOTSUPP; 1163 return -EOPNOTSUPP;
434 } 1164 }
435 1165
@@ -472,7 +1202,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
472 ath_rate_newstate(sc, avp); 1202 ath_rate_newstate(sc, avp);
473 1203
474 /* Reclaim beacon resources */ 1204 /* Reclaim beacon resources */
475 if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) { 1205 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
1206 sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
476 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 1207 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
477 ath_beacon_return(sc, avp); 1208 ath_beacon_return(sc, avp);
478 } 1209 }
@@ -480,7 +1211,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
480 /* Set interrupt mask */ 1211 /* Set interrupt mask */
481 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1212 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
482 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL); 1213 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
483 sc->sc_beacons = 0; 1214 sc->sc_flags &= ~SC_OP_BEACONS;
484 1215
485 error = ath_vap_detach(sc, 0); 1216 error = ath_vap_detach(sc, 0);
486 if (error) 1217 if (error)
@@ -529,6 +1260,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
529 struct ieee80211_if_conf *conf) 1260 struct ieee80211_if_conf *conf)
530{ 1261{
531 struct ath_softc *sc = hw->priv; 1262 struct ath_softc *sc = hw->priv;
1263 struct ath_hal *ah = sc->sc_ah;
532 struct ath_vap *avp; 1264 struct ath_vap *avp;
533 u32 rfilt = 0; 1265 u32 rfilt = 0;
534 int error, i; 1266 int error, i;
@@ -541,18 +1273,25 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
541 return -EINVAL; 1273 return -EINVAL;
542 } 1274 }
543 1275
1276 /* TODO: Need to decide which hw opmode to use for multi-interface
1277 * cases */
1278 if (vif->type == NL80211_IFTYPE_AP &&
1279 ah->ah_opmode != ATH9K_M_HOSTAP) {
1280 ah->ah_opmode = ATH9K_M_HOSTAP;
1281 ath9k_hw_setopmode(ah);
1282 ath9k_hw_write_associd(ah, sc->sc_myaddr, 0);
1283 /* Request full reset to get hw opmode changed properly */
1284 sc->sc_flags |= SC_OP_FULL_RESET;
1285 }
1286
544 if ((conf->changed & IEEE80211_IFCC_BSSID) && 1287 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
545 !is_zero_ether_addr(conf->bssid)) { 1288 !is_zero_ether_addr(conf->bssid)) {
546 switch (vif->type) { 1289 switch (vif->type) {
547 case IEEE80211_IF_TYPE_STA: 1290 case NL80211_IFTYPE_STATION:
548 case IEEE80211_IF_TYPE_IBSS: 1291 case NL80211_IFTYPE_ADHOC:
549 /* Update ratectrl about the new state */ 1292 /* Update ratectrl about the new state */
550 ath_rate_newstate(sc, avp); 1293 ath_rate_newstate(sc, avp);
551 1294
552 /* Set rx filter */
553 rfilt = ath_calcrxfilter(sc);
554 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
555
556 /* Set BSSID */ 1295 /* Set BSSID */
557 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN); 1296 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
558 sc->sc_curaid = 0; 1297 sc->sc_curaid = 0;
@@ -585,7 +1324,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
585 print_mac(mac, sc->sc_curbssid), sc->sc_curaid); 1324 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
586 1325
587 /* need to reconfigure the beacon */ 1326 /* need to reconfigure the beacon */
588 sc->sc_beacons = 0; 1327 sc->sc_flags &= ~SC_OP_BEACONS ;
589 1328
590 break; 1329 break;
591 default: 1330 default:
@@ -594,7 +1333,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
594 } 1333 }
595 1334
596 if ((conf->changed & IEEE80211_IFCC_BEACON) && 1335 if ((conf->changed & IEEE80211_IFCC_BEACON) &&
597 (vif->type == IEEE80211_IF_TYPE_IBSS)) { 1336 ((vif->type == NL80211_IFTYPE_ADHOC) ||
1337 (vif->type == NL80211_IFTYPE_AP))) {
598 /* 1338 /*
599 * Allocate and setup the beacon frame. 1339 * Allocate and setup the beacon frame.
600 * 1340 *
@@ -613,7 +1353,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
613 } 1353 }
614 1354
615 /* Check for WLAN_CAPABILITY_PRIVACY ? */ 1355 /* Check for WLAN_CAPABILITY_PRIVACY ? */
616 if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) { 1356 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
617 for (i = 0; i < IEEE80211_WEP_NKID; i++) 1357 for (i = 0; i < IEEE80211_WEP_NKID; i++)
618 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) 1358 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
619 ath9k_hw_keysetmac(sc->sc_ah, 1359 ath9k_hw_keysetmac(sc->sc_ah,
@@ -622,7 +1362,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
622 } 1362 }
623 1363
624 /* Only legacy IBSS for now */ 1364 /* Only legacy IBSS for now */
625 if (vif->type == IEEE80211_IF_TYPE_IBSS) 1365 if (vif->type == NL80211_IFTYPE_ADHOC)
626 ath_update_chainmask(sc, 0); 1366 ath_update_chainmask(sc, 0);
627 1367
628 return 0; 1368 return 0;
@@ -636,8 +1376,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
636 FIF_BCN_PRBRESP_PROMISC | \ 1376 FIF_BCN_PRBRESP_PROMISC | \
637 FIF_FCSFAIL) 1377 FIF_FCSFAIL)
638 1378
639/* Accept unicast, bcast and mcast frames */ 1379/* FIXME: sc->sc_full_reset ? */
640
641static void ath9k_configure_filter(struct ieee80211_hw *hw, 1380static void ath9k_configure_filter(struct ieee80211_hw *hw,
642 unsigned int changed_flags, 1381 unsigned int changed_flags,
643 unsigned int *total_flags, 1382 unsigned int *total_flags,
@@ -645,22 +1384,28 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
645 struct dev_mc_list *mclist) 1384 struct dev_mc_list *mclist)
646{ 1385{
647 struct ath_softc *sc = hw->priv; 1386 struct ath_softc *sc = hw->priv;
1387 u32 rfilt;
648 1388
649 changed_flags &= SUPPORTED_FILTERS; 1389 changed_flags &= SUPPORTED_FILTERS;
650 *total_flags &= SUPPORTED_FILTERS; 1390 *total_flags &= SUPPORTED_FILTERS;
651 1391
1392 sc->rx_filter = *total_flags;
1393 rfilt = ath_calcrxfilter(sc);
1394 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1395
652 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 1396 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
653 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 1397 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
654 ath_scan_start(sc); 1398 ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0);
655 else
656 ath_scan_end(sc);
657 } 1399 }
1400
1401 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set HW RX filter: 0x%x\n",
1402 __func__, sc->rx_filter);
658} 1403}
659 1404
660static void ath9k_sta_notify(struct ieee80211_hw *hw, 1405static void ath9k_sta_notify(struct ieee80211_hw *hw,
661 struct ieee80211_vif *vif, 1406 struct ieee80211_vif *vif,
662 enum sta_notify_cmd cmd, 1407 enum sta_notify_cmd cmd,
663 const u8 *addr) 1408 struct ieee80211_sta *sta)
664{ 1409{
665 struct ath_softc *sc = hw->priv; 1410 struct ath_softc *sc = hw->priv;
666 struct ath_node *an; 1411 struct ath_node *an;
@@ -668,19 +1413,18 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
668 DECLARE_MAC_BUF(mac); 1413 DECLARE_MAC_BUF(mac);
669 1414
670 spin_lock_irqsave(&sc->node_lock, flags); 1415 spin_lock_irqsave(&sc->node_lock, flags);
671 an = ath_node_find(sc, (u8 *) addr); 1416 an = ath_node_find(sc, sta->addr);
672 spin_unlock_irqrestore(&sc->node_lock, flags); 1417 spin_unlock_irqrestore(&sc->node_lock, flags);
673 1418
674 switch (cmd) { 1419 switch (cmd) {
675 case STA_NOTIFY_ADD: 1420 case STA_NOTIFY_ADD:
676 spin_lock_irqsave(&sc->node_lock, flags); 1421 spin_lock_irqsave(&sc->node_lock, flags);
677 if (!an) { 1422 if (!an) {
678 ath_node_attach(sc, (u8 *)addr, 0); 1423 ath_node_attach(sc, sta->addr, 0);
679 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n", 1424 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
680 __func__, 1425 __func__, print_mac(mac, sta->addr));
681 print_mac(mac, addr));
682 } else { 1426 } else {
683 ath_node_get(sc, (u8 *)addr); 1427 ath_node_get(sc, sta->addr);
684 } 1428 }
685 spin_unlock_irqrestore(&sc->node_lock, flags); 1429 spin_unlock_irqrestore(&sc->node_lock, flags);
686 break; 1430 break;
@@ -693,7 +1437,7 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
693 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT); 1437 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
694 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n", 1438 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
695 __func__, 1439 __func__,
696 print_mac(mac, addr)); 1440 print_mac(mac, sta->addr));
697 } 1441 }
698 break; 1442 break;
699 default: 1443 default:
@@ -756,7 +1500,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
756 key->hw_key_idx = key->keyidx; 1500 key->hw_key_idx = key->keyidx;
757 /* push IV and Michael MIC generation to stack */ 1501 /* push IV and Michael MIC generation to stack */
758 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1502 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
759 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1503 if (key->alg == ALG_TKIP)
1504 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
760 } 1505 }
761 break; 1506 break;
762 case DISABLE_KEY: 1507 case DISABLE_KEY:
@@ -771,117 +1516,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
771 return ret; 1516 return ret;
772} 1517}
773 1518
774static void ath9k_ht_conf(struct ath_softc *sc,
775 struct ieee80211_bss_conf *bss_conf)
776{
777#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
778 struct ath_ht_info *ht_info = &sc->sc_ht_info;
779
780 if (bss_conf->assoc_ht) {
781 ht_info->ext_chan_offset =
782 bss_conf->ht_bss_conf->bss_cap &
783 IEEE80211_HT_IE_CHA_SEC_OFFSET;
784
785 if (!(bss_conf->ht_conf->cap &
786 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
787 (bss_conf->ht_bss_conf->bss_cap &
788 IEEE80211_HT_IE_CHA_WIDTH))
789 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
790 else
791 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
792
793 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
794 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
795 bss_conf->ht_conf->ampdu_factor);
796 ht_info->mpdudensity =
797 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
798
799 }
800
801#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
802}
803
804static void ath9k_bss_assoc_info(struct ath_softc *sc,
805 struct ieee80211_bss_conf *bss_conf)
806{
807 struct ieee80211_hw *hw = sc->hw;
808 struct ieee80211_channel *curchan = hw->conf.channel;
809 struct ath_vap *avp;
810 int pos;
811 DECLARE_MAC_BUF(mac);
812
813 if (bss_conf->assoc) {
814 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
815 __func__,
816 bss_conf->aid);
817
818 avp = sc->sc_vaps[0];
819 if (avp == NULL) {
820 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
821 __func__);
822 return;
823 }
824
825 /* New association, store aid */
826 if (avp->av_opmode == ATH9K_M_STA) {
827 sc->sc_curaid = bss_conf->aid;
828 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
829 sc->sc_curaid);
830 }
831
832 /* Configure the beacon */
833 ath_beacon_config(sc, 0);
834 sc->sc_beacons = 1;
835
836 /* Reset rssi stats */
837 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
838 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
839 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
840 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
841
842 /* Update chainmask */
843 ath_update_chainmask(sc, bss_conf->assoc_ht);
844
845 DPRINTF(sc, ATH_DBG_CONFIG,
846 "%s: bssid %s aid 0x%x\n",
847 __func__,
848 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
849
850 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
851 __func__,
852 curchan->center_freq);
853
854 pos = ath_get_channel(sc, curchan);
855 if (pos == -1) {
856 DPRINTF(sc, ATH_DBG_FATAL,
857 "%s: Invalid channel\n", __func__);
858 return;
859 }
860
861 if (hw->conf.ht_conf.ht_supported)
862 sc->sc_ah->ah_channels[pos].chanmode =
863 ath_get_extchanmode(sc, curchan);
864 else
865 sc->sc_ah->ah_channels[pos].chanmode =
866 (curchan->band == IEEE80211_BAND_2GHZ) ?
867 CHANNEL_G : CHANNEL_A;
868
869 /* set h/w channel */
870 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
871 DPRINTF(sc, ATH_DBG_FATAL,
872 "%s: Unable to set channel\n",
873 __func__);
874
875 ath_rate_newstate(sc, avp);
876 /* Update ratectrl about the new state */
877 ath_rc_node_update(hw, avp->rc_node);
878 } else {
879 DPRINTF(sc, ATH_DBG_CONFIG,
880 "%s: Bss Info DISSOC\n", __func__);
881 sc->sc_curaid = 0;
882 }
883}
884
885static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1519static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
886 struct ieee80211_vif *vif, 1520 struct ieee80211_vif *vif,
887 struct ieee80211_bss_conf *bss_conf, 1521 struct ieee80211_bss_conf *bss_conf,
@@ -894,9 +1528,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
894 __func__, 1528 __func__,
895 bss_conf->use_short_preamble); 1529 bss_conf->use_short_preamble);
896 if (bss_conf->use_short_preamble) 1530 if (bss_conf->use_short_preamble)
897 sc->sc_flags |= ATH_PREAMBLE_SHORT; 1531 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
898 else 1532 else
899 sc->sc_flags &= ~ATH_PREAMBLE_SHORT; 1533 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
900 } 1534 }
901 1535
902 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1536 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
@@ -905,9 +1539,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
905 bss_conf->use_cts_prot); 1539 bss_conf->use_cts_prot);
906 if (bss_conf->use_cts_prot && 1540 if (bss_conf->use_cts_prot &&
907 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1541 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
908 sc->sc_flags |= ATH_PROTECT_ENABLE; 1542 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
909 else 1543 else
910 sc->sc_flags &= ~ATH_PROTECT_ENABLE; 1544 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
911 } 1545 }
912 1546
913 if (changed & BSS_CHANGED_HT) { 1547 if (changed & BSS_CHANGED_HT) {
@@ -946,45 +1580,44 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw)
946 1580
947static int ath9k_ampdu_action(struct ieee80211_hw *hw, 1581static int ath9k_ampdu_action(struct ieee80211_hw *hw,
948 enum ieee80211_ampdu_mlme_action action, 1582 enum ieee80211_ampdu_mlme_action action,
949 const u8 *addr, 1583 struct ieee80211_sta *sta,
950 u16 tid, 1584 u16 tid, u16 *ssn)
951 u16 *ssn)
952{ 1585{
953 struct ath_softc *sc = hw->priv; 1586 struct ath_softc *sc = hw->priv;
954 int ret = 0; 1587 int ret = 0;
955 1588
956 switch (action) { 1589 switch (action) {
957 case IEEE80211_AMPDU_RX_START: 1590 case IEEE80211_AMPDU_RX_START:
958 ret = ath_rx_aggr_start(sc, addr, tid, ssn); 1591 ret = ath_rx_aggr_start(sc, sta->addr, tid, ssn);
959 if (ret < 0) 1592 if (ret < 0)
960 DPRINTF(sc, ATH_DBG_FATAL, 1593 DPRINTF(sc, ATH_DBG_FATAL,
961 "%s: Unable to start RX aggregation\n", 1594 "%s: Unable to start RX aggregation\n",
962 __func__); 1595 __func__);
963 break; 1596 break;
964 case IEEE80211_AMPDU_RX_STOP: 1597 case IEEE80211_AMPDU_RX_STOP:
965 ret = ath_rx_aggr_stop(sc, addr, tid); 1598 ret = ath_rx_aggr_stop(sc, sta->addr, tid);
966 if (ret < 0) 1599 if (ret < 0)
967 DPRINTF(sc, ATH_DBG_FATAL, 1600 DPRINTF(sc, ATH_DBG_FATAL,
968 "%s: Unable to stop RX aggregation\n", 1601 "%s: Unable to stop RX aggregation\n",
969 __func__); 1602 __func__);
970 break; 1603 break;
971 case IEEE80211_AMPDU_TX_START: 1604 case IEEE80211_AMPDU_TX_START:
972 ret = ath_tx_aggr_start(sc, addr, tid, ssn); 1605 ret = ath_tx_aggr_start(sc, sta->addr, tid, ssn);
973 if (ret < 0) 1606 if (ret < 0)
974 DPRINTF(sc, ATH_DBG_FATAL, 1607 DPRINTF(sc, ATH_DBG_FATAL,
975 "%s: Unable to start TX aggregation\n", 1608 "%s: Unable to start TX aggregation\n",
976 __func__); 1609 __func__);
977 else 1610 else
978 ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); 1611 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
979 break; 1612 break;
980 case IEEE80211_AMPDU_TX_STOP: 1613 case IEEE80211_AMPDU_TX_STOP:
981 ret = ath_tx_aggr_stop(sc, addr, tid); 1614 ret = ath_tx_aggr_stop(sc, sta->addr, tid);
982 if (ret < 0) 1615 if (ret < 0)
983 DPRINTF(sc, ATH_DBG_FATAL, 1616 DPRINTF(sc, ATH_DBG_FATAL,
984 "%s: Unable to stop TX aggregation\n", 1617 "%s: Unable to stop TX aggregation\n",
985 __func__); 1618 __func__);
986 1619
987 ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); 1620 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
988 break; 1621 break;
989 default: 1622 default:
990 DPRINTF(sc, ATH_DBG_FATAL, 1623 DPRINTF(sc, ATH_DBG_FATAL,
@@ -1021,252 +1654,6 @@ static struct ieee80211_ops ath9k_ops = {
1021 .ampdu_action = ath9k_ampdu_action 1654 .ampdu_action = ath9k_ampdu_action
1022}; 1655};
1023 1656
1024void ath_get_beaconconfig(struct ath_softc *sc,
1025 int if_id,
1026 struct ath_beacon_config *conf)
1027{
1028 struct ieee80211_hw *hw = sc->hw;
1029
1030 /* fill in beacon config data */
1031
1032 conf->beacon_interval = hw->conf.beacon_int;
1033 conf->listen_interval = 100;
1034 conf->dtim_count = 1;
1035 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1036}
1037
1038int ath_update_beacon(struct ath_softc *sc,
1039 int if_id,
1040 struct ath_beacon_offset *bo,
1041 struct sk_buff *skb,
1042 int mcast)
1043{
1044 return 0;
1045}
1046
1047void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1048 struct ath_xmit_status *tx_status, struct ath_node *an)
1049{
1050 struct ieee80211_hw *hw = sc->hw;
1051 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1052
1053 DPRINTF(sc, ATH_DBG_XMIT,
1054 "%s: TX complete: skb: %p\n", __func__, skb);
1055
1056 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1057 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1058 /* free driver's private data area of tx_info */
1059 if (tx_info->driver_data[0] != NULL)
1060 kfree(tx_info->driver_data[0]);
1061 tx_info->driver_data[0] = NULL;
1062 }
1063
1064 if (tx_status->flags & ATH_TX_BAR) {
1065 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1066 tx_status->flags &= ~ATH_TX_BAR;
1067 }
1068 if (tx_status->flags)
1069 tx_info->status.excessive_retries = 1;
1070
1071 tx_info->status.retry_count = tx_status->retries;
1072
1073 ieee80211_tx_status(hw, skb);
1074 if (an)
1075 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
1076}
1077
1078int ath__rx_indicate(struct ath_softc *sc,
1079 struct sk_buff *skb,
1080 struct ath_recv_status *status,
1081 u16 keyix)
1082{
1083 struct ieee80211_hw *hw = sc->hw;
1084 struct ath_node *an = NULL;
1085 struct ieee80211_rx_status rx_status;
1086 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1087 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1088 int padsize;
1089 enum ATH_RX_TYPE st;
1090
1091 /* see if any padding is done by the hw and remove it */
1092 if (hdrlen & 3) {
1093 padsize = hdrlen % 4;
1094 memmove(skb->data + padsize, skb->data, hdrlen);
1095 skb_pull(skb, padsize);
1096 }
1097
1098 /* remove FCS before passing up to protocol stack */
1099 skb_trim(skb, (skb->len - FCS_LEN));
1100
1101 /* Prepare rx status */
1102 ath9k_rx_prepare(sc, skb, status, &rx_status);
1103
1104 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
1105 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
1106 rx_status.flag |= RX_FLAG_DECRYPTED;
1107 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1108 && !(status->flags & ATH_RX_DECRYPT_ERROR)
1109 && skb->len >= hdrlen + 4) {
1110 keyix = skb->data[hdrlen + 3] >> 6;
1111
1112 if (test_bit(keyix, sc->sc_keymap))
1113 rx_status.flag |= RX_FLAG_DECRYPTED;
1114 }
1115
1116 spin_lock_bh(&sc->node_lock);
1117 an = ath_node_find(sc, hdr->addr2);
1118 spin_unlock_bh(&sc->node_lock);
1119
1120 if (an) {
1121 ath_rx_input(sc, an,
1122 hw->conf.ht_conf.ht_supported,
1123 skb, status, &st);
1124 }
1125 if (!an || (st != ATH_RX_CONSUMED))
1126 __ieee80211_rx(hw, skb, &rx_status);
1127
1128 return 0;
1129}
1130
1131int ath_rx_subframe(struct ath_node *an,
1132 struct sk_buff *skb,
1133 struct ath_recv_status *status)
1134{
1135 struct ath_softc *sc = an->an_sc;
1136 struct ieee80211_hw *hw = sc->hw;
1137 struct ieee80211_rx_status rx_status;
1138
1139 /* Prepare rx status */
1140 ath9k_rx_prepare(sc, skb, status, &rx_status);
1141 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
1142 rx_status.flag |= RX_FLAG_DECRYPTED;
1143
1144 __ieee80211_rx(hw, skb, &rx_status);
1145
1146 return 0;
1147}
1148
1149enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
1150{
1151 return sc->sc_ht_info.tx_chan_width;
1152}
1153
1154static int ath_detach(struct ath_softc *sc)
1155{
1156 struct ieee80211_hw *hw = sc->hw;
1157
1158 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
1159
1160 /* Unregister hw */
1161
1162 ieee80211_unregister_hw(hw);
1163
1164 /* unregister Rate control */
1165 ath_rate_control_unregister();
1166
1167 /* tx/rx cleanup */
1168
1169 ath_rx_cleanup(sc);
1170 ath_tx_cleanup(sc);
1171
1172 /* Deinit */
1173
1174 ath_deinit(sc);
1175
1176 return 0;
1177}
1178
1179static int ath_attach(u16 devid,
1180 struct ath_softc *sc)
1181{
1182 struct ieee80211_hw *hw = sc->hw;
1183 int error = 0;
1184
1185 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
1186
1187 error = ath_init(devid, sc);
1188 if (error != 0)
1189 return error;
1190
1191 /* Init nodes */
1192
1193 INIT_LIST_HEAD(&sc->node_list);
1194 spin_lock_init(&sc->node_lock);
1195
1196 /* get mac address from hardware and set in mac80211 */
1197
1198 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1199
1200 /* setup channels and rates */
1201
1202 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1203 sc->channels[IEEE80211_BAND_2GHZ];
1204 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1205 sc->rates[IEEE80211_BAND_2GHZ];
1206 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1207
1208 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1209 /* Setup HT capabilities for 2.4Ghz*/
1210 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
1211
1212 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1213 &sc->sbands[IEEE80211_BAND_2GHZ];
1214
1215 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1216 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1217 sc->channels[IEEE80211_BAND_5GHZ];
1218 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1219 sc->rates[IEEE80211_BAND_5GHZ];
1220 sc->sbands[IEEE80211_BAND_5GHZ].band =
1221 IEEE80211_BAND_5GHZ;
1222
1223 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1224 /* Setup HT capabilities for 5Ghz*/
1225 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
1226
1227 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1228 &sc->sbands[IEEE80211_BAND_5GHZ];
1229 }
1230
1231 /* FIXME: Have to figure out proper hw init values later */
1232
1233 hw->queues = 4;
1234 hw->ampdu_queues = 1;
1235
1236 /* Register rate control */
1237 hw->rate_control_algorithm = "ath9k_rate_control";
1238 error = ath_rate_control_register();
1239 if (error != 0) {
1240 DPRINTF(sc, ATH_DBG_FATAL,
1241 "%s: Unable to register rate control "
1242 "algorithm:%d\n", __func__, error);
1243 ath_rate_control_unregister();
1244 goto bad;
1245 }
1246
1247 error = ieee80211_register_hw(hw);
1248 if (error != 0) {
1249 ath_rate_control_unregister();
1250 goto bad;
1251 }
1252
1253 /* initialize tx/rx engine */
1254
1255 error = ath_tx_init(sc, ATH_TXBUF);
1256 if (error != 0)
1257 goto bad1;
1258
1259 error = ath_rx_init(sc, ATH_RXBUF);
1260 if (error != 0)
1261 goto bad1;
1262
1263 return 0;
1264bad1:
1265 ath_detach(sc);
1266bad:
1267 return error;
1268}
1269
1270static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1657static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1271{ 1658{
1272 void __iomem *mem; 1659 void __iomem *mem;
@@ -1340,9 +1727,16 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1340 goto bad2; 1727 goto bad2;
1341 } 1728 }
1342 1729
1343 hw->flags = IEEE80211_HW_SIGNAL_DBM | 1730 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1731 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1732 IEEE80211_HW_SIGNAL_DBM |
1344 IEEE80211_HW_NOISE_DBM; 1733 IEEE80211_HW_NOISE_DBM;
1345 1734
1735 hw->wiphy->interface_modes =
1736 BIT(NL80211_IFTYPE_AP) |
1737 BIT(NL80211_IFTYPE_STATION) |
1738 BIT(NL80211_IFTYPE_ADHOC);
1739
1346 SET_IEEE80211_DEV(hw, &pdev->dev); 1740 SET_IEEE80211_DEV(hw, &pdev->dev);
1347 pci_set_drvdata(pdev, hw); 1741 pci_set_drvdata(pdev, hw);
1348 1742
@@ -1404,6 +1798,16 @@ static void ath_pci_remove(struct pci_dev *pdev)
1404 1798
1405static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1799static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1406{ 1800{
1801 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1802 struct ath_softc *sc = hw->priv;
1803
1804 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1805
1806#ifdef CONFIG_RFKILL
1807 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1808 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1809#endif
1810
1407 pci_save_state(pdev); 1811 pci_save_state(pdev);
1408 pci_disable_device(pdev); 1812 pci_disable_device(pdev);
1409 pci_set_power_state(pdev, 3); 1813 pci_set_power_state(pdev, 3);
@@ -1413,6 +1817,8 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1413 1817
1414static int ath_pci_resume(struct pci_dev *pdev) 1818static int ath_pci_resume(struct pci_dev *pdev)
1415{ 1819{
1820 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1821 struct ath_softc *sc = hw->priv;
1416 u32 val; 1822 u32 val;
1417 int err; 1823 int err;
1418 1824
@@ -1429,6 +1835,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
1429 if ((val & 0x0000ff00) != 0) 1835 if ((val & 0x0000ff00) != 0)
1430 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 1836 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1431 1837
1838 /* Enable LED */
1839 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1840 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1841 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1842
1843#ifdef CONFIG_RFKILL
1844 /*
1845 * check the h/w rfkill state on resume
1846 * and start the rfkill poll timer
1847 */
1848 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1849 queue_delayed_work(sc->hw->workqueue,
1850 &sc->rf_kill.rfkill_poll, 0);
1851#endif
1852
1432 return 0; 1853 return 0;
1433} 1854}
1434 1855
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 0cd399a5344a..14702344448b 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -18,19 +18,19 @@
18#define PHY_H 18#define PHY_H
19 19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah, 20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
21 struct ath9k_channel 21 struct ath9k_channel
22 *chan); 22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah, 23bool ath9k_hw_set_channel(struct ath_hal *ah,
24 struct ath9k_channel *chan); 24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, 25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites); 26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah, 27bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
28 struct ath9k_channel *chan, 28 struct ath9k_channel *chan,
29 u16 modesIndex); 29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah, 30void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
31 struct ath9k_channel *chan); 31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah, 32bool ath9k_hw_init_rf(struct ath_hal *ah,
33 int *status); 33 int *status);
34 34
35#define AR_PHY_BASE 0x9800 35#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 73c460ad355f..1cc9daf44550 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -653,8 +653,8 @@ ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); 653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
654 for (i = 0; i < rate_table->rate_cnt; i++) { 654 for (i = 0; i < rate_table->rate_cnt; i++) {
655 valid = (ath_rc_priv->single_stream ? 655 valid = (ath_rc_priv->single_stream ?
656 rate_table->info[i].valid_single_stream : 656 rate_table->info[i].valid_single_stream :
657 rate_table->info[i].valid); 657 rate_table->info[i].valid);
658 if (valid == TRUE) { 658 if (valid == TRUE) {
659 u32 phy = rate_table->info[i].phy; 659 u32 phy = rate_table->info[i].phy;
660 u8 valid_rate_count = 0; 660 u8 valid_rate_count = 0;
@@ -740,14 +740,14 @@ ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
740 for (j = 0; j < rate_table->rate_cnt; j++) { 740 for (j = 0; j < rate_table->rate_cnt; j++) {
741 u32 phy = rate_table->info[j].phy; 741 u32 phy = rate_table->info[j].phy;
742 u32 valid = (ath_rc_priv->single_stream ? 742 u32 valid = (ath_rc_priv->single_stream ?
743 rate_table->info[j].valid_single_stream : 743 rate_table->info[j].valid_single_stream :
744 rate_table->info[j].valid); 744 rate_table->info[j].valid);
745 745
746 if (((((struct ath_rateset *) 746 if (((((struct ath_rateset *)
747 mcs_set)->rs_rates[i] & 0x7F) != 747 mcs_set)->rs_rates[i] & 0x7F) !=
748 (rate_table->info[j].dot11rate & 0x7F)) || 748 (rate_table->info[j].dot11rate & 0x7F)) ||
749 !WLAN_RC_PHY_HT(phy) || 749 !WLAN_RC_PHY_HT(phy) ||
750 !WLAN_RC_PHY_HT_VALID(valid, capflag)) 750 !WLAN_RC_PHY_HT_VALID(valid, capflag))
751 continue; 751 continue;
752 752
753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) 753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
@@ -847,9 +847,9 @@ void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
847 /* For half and quarter rate channles use different 847 /* For half and quarter rate channles use different
848 * rate tables 848 * rate tables
849 */ 849 */
850 if (sc->sc_curchan.channelFlags & CHANNEL_HALF) 850 if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_HALF)
851 ar5416_sethalf_ratetable(asc); 851 ar5416_sethalf_ratetable(asc);
852 else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER) 852 else if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_QUARTER)
853 ar5416_setquarter_ratetable(asc); 853 ar5416_setquarter_ratetable(asc);
854 else /* full rate */ 854 else /* full rate */
855 ar5416_setfull_ratetable(asc); 855 ar5416_setfull_ratetable(asc);
@@ -866,10 +866,10 @@ void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
866} 866}
867 867
868static u8 ath_rc_ratefind_ht(struct ath_softc *sc, 868static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
869 struct ath_rate_node *ath_rc_priv, 869 struct ath_rate_node *ath_rc_priv,
870 const struct ath_rate_table *rate_table, 870 const struct ath_rate_table *rate_table,
871 int probe_allowed, int *is_probing, 871 int probe_allowed, int *is_probing,
872 int is_retry) 872 int is_retry)
873{ 873{
874 u32 dt, best_thruput, this_thruput, now_msec; 874 u32 dt, best_thruput, this_thruput, now_msec;
875 u8 rate, next_rate, best_rate, maxindex, minindex; 875 u8 rate, next_rate, best_rate, maxindex, minindex;
@@ -997,8 +997,8 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
997 rate = rate_ctrl->rate_table_size - 1; 997 rate = rate_ctrl->rate_table_size - 1;
998 998
999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) || 999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1000 (rate_table->info[rate].valid_single_stream && 1000 (rate_table->info[rate].valid_single_stream &&
1001 ath_rc_priv->single_stream)); 1001 ath_rc_priv->single_stream));
1002 1002
1003 return rate; 1003 return rate;
1004} 1004}
@@ -1023,10 +1023,10 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
1023} 1023}
1024 1024
1025static u8 ath_rc_rate_getidx(struct ath_softc *sc, 1025static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1026 struct ath_rate_node *ath_rc_priv, 1026 struct ath_rate_node *ath_rc_priv,
1027 const struct ath_rate_table *rate_table, 1027 const struct ath_rate_table *rate_table,
1028 u8 rix, u16 stepdown, 1028 u8 rix, u16 stepdown,
1029 u16 min_rate) 1029 u16 min_rate)
1030{ 1030{
1031 u32 j; 1031 u32 j;
1032 u8 nextindex; 1032 u8 nextindex;
@@ -1066,8 +1066,8 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1066 rate_table = 1066 rate_table =
1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; 1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, 1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0, 1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1070 is_probe, is_retry); 1070 is_probe, is_retry);
1071 nrix = rix; 1071 nrix = rix;
1072 1072
1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) { 1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
@@ -1099,13 +1099,13 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1099 try_num = ((i + 1) == num_rates) ? 1099 try_num = ((i + 1) == num_rates) ?
1100 num_tries - (try_per_rate * i) : try_per_rate ; 1100 num_tries - (try_per_rate * i) : try_per_rate ;
1101 min_rate = (((i + 1) == num_rates) && 1101 min_rate = (((i + 1) == num_rates) &&
1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0; 1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1103 1103
1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv, 1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1105 rate_table, nrix, 1, min_rate); 1105 rate_table, nrix, 1, min_rate);
1106 /* All other rates in the series have RTS enabled */ 1106 /* All other rates in the series have RTS enabled */
1107 ath_rc_rate_set_series(rate_table, 1107 ath_rc_rate_set_series(rate_table,
1108 &series[i], try_num, nrix, TRUE); 1108 &series[i], try_num, nrix, TRUE);
1109 } 1109 }
1110 1110
1111 /* 1111 /*
@@ -1124,13 +1124,13 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1124 * above conditions. 1124 * above conditions.
1125 */ 1125 */
1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) || 1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) || 1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) { 1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
1129 u8 dot11rate = rate_table->info[rix].dot11rate; 1129 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 phy = rate_table->info[rix].phy; 1130 u8 phy = rate_table->info[rix].phy;
1131 if (i == 4 && 1131 if (i == 4 &&
1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) || 1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) { 1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1134 series[3].rix = series[2].rix; 1134 series[3].rix = series[2].rix;
1135 series[3].flags = series[2].flags; 1135 series[3].flags = series[2].flags;
1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen; 1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
@@ -1141,18 +1141,19 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1141/* 1141/*
1142 * Return the Tx rate series. 1142 * Return the Tx rate series.
1143 */ 1143 */
1144void ath_rate_findrate(struct ath_softc *sc, 1144static void ath_rate_findrate(struct ath_softc *sc,
1145 struct ath_rate_node *ath_rc_priv, 1145 struct ath_rate_node *ath_rc_priv,
1146 int num_tries, 1146 int num_tries,
1147 int num_rates, 1147 int num_rates,
1148 unsigned int rcflag, 1148 unsigned int rcflag,
1149 struct ath_rc_series series[], 1149 struct ath_rc_series series[],
1150 int *is_probe, 1150 int *is_probe,
1151 int is_retry) 1151 int is_retry)
1152{ 1152{
1153 struct ath_vap *avp = ath_rc_priv->avp; 1153 struct ath_vap *avp = ath_rc_priv->avp;
1154 1154
1155 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 1155 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1156
1156 if (!num_rates || !num_tries) 1157 if (!num_rates || !num_tries)
1157 return; 1158 return;
1158 1159
@@ -1174,9 +1175,8 @@ void ath_rate_findrate(struct ath_softc *sc,
1174 unsigned int mcs; 1175 unsigned int mcs;
1175 u8 series_rix = 0; 1176 u8 series_rix = 0;
1176 1177
1177 series[idx].tries = 1178 series[idx].tries = IEEE80211_RATE_IDX_ENTRY(
1178 IEEE80211_RATE_IDX_ENTRY( 1179 avp->av_config.av_fixed_retryset, idx);
1179 avp->av_config.av_fixed_retryset, idx);
1180 1180
1181 mcs = IEEE80211_RATE_IDX_ENTRY( 1181 mcs = IEEE80211_RATE_IDX_ENTRY(
1182 avp->av_config.av_fixed_rateset, idx); 1182 avp->av_config.av_fixed_rateset, idx);
@@ -1228,7 +1228,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1228 u32 now_msec = jiffies_to_msecs(jiffies); 1228 u32 now_msec = jiffies_to_msecs(jiffies);
1229 int state_change = FALSE, rate, count; 1229 int state_change = FALSE, rate, count;
1230 u8 last_per; 1230 u8 last_per;
1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1232 struct ath_rate_table *rate_table = 1232 struct ath_rate_table *rate_table =
1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; 1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1234 1234
@@ -1272,14 +1272,14 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1272 } else { 1272 } else {
1273 /* xretries == 2 */ 1273 /* xretries == 2 */
1274 count = sizeof(nretry_to_per_lookup) / 1274 count = sizeof(nretry_to_per_lookup) /
1275 sizeof(nretry_to_per_lookup[0]); 1275 sizeof(nretry_to_per_lookup[0]);
1276 if (retries >= count) 1276 if (retries >= count)
1277 retries = count - 1; 1277 retries = count - 1;
1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1279 rate_ctrl->state[tx_rate].per = 1279 rate_ctrl->state[tx_rate].per =
1280 (u8)(rate_ctrl->state[tx_rate].per - 1280 (u8)(rate_ctrl->state[tx_rate].per -
1281 (rate_ctrl->state[tx_rate].per >> 3) + 1281 (rate_ctrl->state[tx_rate].per >> 3) +
1282 ((100) >> 3)); 1282 ((100) >> 3));
1283 } 1283 }
1284 1284
1285 /* xretries == 1 or 2 */ 1285 /* xretries == 1 or 2 */
@@ -1295,8 +1295,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1295 if (retries >= count) 1295 if (retries >= count)
1296 retries = count - 1; 1296 retries = count - 1;
1297 if (info_priv->n_bad_frames) { 1297 if (info_priv->n_bad_frames) {
1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER)
1299 /*
1300 * Assuming that n_frames is not 0. The current PER 1299 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1), 1300 * from the retries is 100 * retries / (retries+1),
1302 * since the first retries attempts failed, and the 1301 * since the first retries attempts failed, and the
@@ -1386,7 +1385,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1386 * rssi_ack values. 1385 * rssi_ack values.
1387 */ 1386 */
1388 if (tx_rate == rate_ctrl->rate_max_phy && 1387 if (tx_rate == rate_ctrl->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) { 1388 rate_ctrl->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++; 1389 rate_ctrl->hw_maxretry_pktcnt++;
1391 } 1390 }
1392 1391
@@ -1418,7 +1417,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1418 /* Now reduce the current 1417 /* Now reduce the current
1419 * rssi threshold. */ 1418 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) && 1419 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) { 1420 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate]. 1421 rate_ctrl->state[tx_rate].
1423 rssi_thres--; 1422 rssi_thres--;
1424 } 1423 }
@@ -1436,10 +1435,10 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1436 * a while (except if we are probing). 1435 * a while (except if we are probing).
1437 */ 1436 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 && 1437 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <= 1438 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) { 1439 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl, 1440 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy); 1441 (u8) tx_rate, &rate_ctrl->rate_max_phy);
1443 1442
1444 /* Don't probe for a little while. */ 1443 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec; 1444 rate_ctrl->probe_time = now_msec;
@@ -1460,43 +1459,43 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1460 break; 1459 break;
1461 1460
1462 if (rate_ctrl->state[rate].rssi_thres + 1461 if (rate_ctrl->state[rate].rssi_thres +
1463 rate_table->info[rate].rssi_ack_deltamin > 1462 rate_table->info[rate].rssi_ack_deltamin >
1464 rate_ctrl->state[rate+1].rssi_thres) { 1463 rate_ctrl->state[rate+1].rssi_thres) {
1465 rate_ctrl->state[rate+1].rssi_thres = 1464 rate_ctrl->state[rate+1].rssi_thres =
1466 rate_ctrl->state[rate]. 1465 rate_ctrl->state[rate].
1467 rssi_thres + 1466 rssi_thres +
1468 rate_table->info[rate]. 1467 rate_table->info[rate].
1469 rssi_ack_deltamin; 1468 rssi_ack_deltamin;
1470 } 1469 }
1471 } 1470 }
1472 1471
1473 /* Make sure the rates below this have lower rssi thresholds. */ 1472 /* Make sure the rates below this have lower rssi thresholds. */
1474 for (rate = tx_rate - 1; rate >= 0; rate--) { 1473 for (rate = tx_rate - 1; rate >= 0; rate--) {
1475 if (rate_table->info[rate].phy != 1474 if (rate_table->info[rate].phy !=
1476 rate_table->info[tx_rate].phy) 1475 rate_table->info[tx_rate].phy)
1477 break; 1476 break;
1478 1477
1479 if (rate_ctrl->state[rate].rssi_thres + 1478 if (rate_ctrl->state[rate].rssi_thres +
1480 rate_table->info[rate].rssi_ack_deltamin > 1479 rate_table->info[rate].rssi_ack_deltamin >
1481 rate_ctrl->state[rate+1].rssi_thres) { 1480 rate_ctrl->state[rate+1].rssi_thres) {
1482 if (rate_ctrl->state[rate+1].rssi_thres < 1481 if (rate_ctrl->state[rate+1].rssi_thres <
1483 rate_table->info[rate]. 1482 rate_table->info[rate].
1484 rssi_ack_deltamin) 1483 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0; 1484 rate_ctrl->state[rate].rssi_thres = 0;
1486 else { 1485 else {
1487 rate_ctrl->state[rate].rssi_thres = 1486 rate_ctrl->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1]. 1487 rate_ctrl->state[rate+1].
1489 rssi_thres - 1488 rssi_thres -
1490 rate_table->info[rate]. 1489 rate_table->info[rate].
1491 rssi_ack_deltamin; 1490 rssi_ack_deltamin;
1492 } 1491 }
1493 1492
1494 if (rate_ctrl->state[rate].rssi_thres < 1493 if (rate_ctrl->state[rate].rssi_thres <
1495 rate_table->info[rate]. 1494 rate_table->info[rate].
1496 rssi_ack_validmin) { 1495 rssi_ack_validmin) {
1497 rate_ctrl->state[rate].rssi_thres = 1496 rate_ctrl->state[rate].rssi_thres =
1498 rate_table->info[rate]. 1497 rate_table->info[rate].
1499 rssi_ack_validmin; 1498 rssi_ack_validmin;
1500 } 1499 }
1501 } 1500 }
1502 } 1501 }
@@ -1507,11 +1506,11 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1507 if (rate_ctrl->state[tx_rate].per < last_per) { 1506 if (rate_ctrl->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) { 1507 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy != 1508 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy) 1509 rate_table->info[tx_rate].phy)
1511 break; 1510 break;
1512 1511
1513 if (rate_ctrl->state[rate].per > 1512 if (rate_ctrl->state[rate].per >
1514 rate_ctrl->state[rate+1].per) { 1513 rate_ctrl->state[rate+1].per) {
1515 rate_ctrl->state[rate].per = 1514 rate_ctrl->state[rate].per =
1516 rate_ctrl->state[rate+1].per; 1515 rate_ctrl->state[rate+1].per;
1517 } 1516 }
@@ -1528,11 +1527,11 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1528 /* Every so often, we reduce the thresholds and 1527 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */ 1528 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >= 1529 if (now_msec - rate_ctrl->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) { 1530 rate_table->rssi_reduce_interval) {
1532 1531
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1532 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres > 1533 if (rate_ctrl->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin) 1534 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1; 1535 rate_ctrl->state[rate].rssi_thres -= 1;
1537 } 1536 }
1538 rate_ctrl->rssi_down_time = now_msec; 1537 rate_ctrl->rssi_down_time = now_msec;
@@ -1541,7 +1540,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1541 /* Every so often, we reduce the thresholds 1540 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */ 1541 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >= 1542 if (now_msec - rate_ctrl->per_down_time >=
1544 rate_table->rssi_reduce_interval) { 1543 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1544 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1546 rate_ctrl->state[rate].per = 1545 rate_ctrl->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8; 1546 7 * rate_ctrl->state[rate].per / 8;
@@ -1560,7 +1559,7 @@ static void ath_rc_update(struct ath_softc *sc,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx, 1559 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry) 1560 int xretries, int long_retry)
1562{ 1561{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1562 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1564 struct ath_rate_table *rate_table; 1563 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl; 1564 struct ath_tx_ratectrl *rate_ctrl;
1566 struct ath_rc_series rcs[4]; 1565 struct ath_rc_series rcs[4];
@@ -1637,7 +1636,6 @@ static void ath_rc_update(struct ath_softc *sc,
1637 xretries, long_retry); 1636 xretries, long_retry);
1638} 1637}
1639 1638
1640
1641/* 1639/*
1642 * Process a tx descriptor for a completed transmit (success or failure). 1640 * Process a tx descriptor for a completed transmit (success or failure).
1643 */ 1641 */
@@ -1651,13 +1649,13 @@ static void ath_rate_tx_complete(struct ath_softc *sc,
1651 struct ath_vap *avp; 1649 struct ath_vap *avp;
1652 1650
1653 avp = rc_priv->avp; 1651 avp = rc_priv->avp;
1654 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) 1652 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) ||
1655 || info_priv->tx.ts_status & ATH9K_TXERR_FILT) 1653 (info_priv->tx.ts_status & ATH9K_TXERR_FILT))
1656 return; 1654 return;
1657 1655
1658 if (info_priv->tx.ts_rssi > 0) { 1656 if (info_priv->tx.ts_rssi > 0) {
1659 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi, 1657 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1660 info_priv->tx.ts_rssi); 1658 info_priv->tx.ts_rssi);
1661 } 1659 }
1662 1660
1663 /* 1661 /*
@@ -1682,7 +1680,6 @@ static void ath_rate_tx_complete(struct ath_softc *sc,
1682 info_priv->tx.ts_longretry); 1680 info_priv->tx.ts_longretry);
1683} 1681}
1684 1682
1685
1686/* 1683/*
1687 * Update the SIB's rate control information 1684 * Update the SIB's rate control information
1688 * 1685 *
@@ -1701,8 +1698,8 @@ static void ath_rc_sib_update(struct ath_softc *sc,
1701 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1698 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1702 struct ath_rateset *rateset = negotiated_rates; 1699 struct ath_rateset *rateset = negotiated_rates;
1703 u8 *ht_mcs = (u8 *)negotiated_htrates; 1700 u8 *ht_mcs = (u8 *)negotiated_htrates;
1704 struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *) 1701 struct ath_tx_ratectrl *rate_ctrl =
1705 (ath_rc_priv); 1702 (struct ath_tx_ratectrl *)ath_rc_priv;
1706 u8 i, j, k, hi = 0, hthi = 0; 1703 u8 i, j, k, hi = 0, hthi = 0;
1707 1704
1708 rate_table = (struct ath_rate_table *) 1705 rate_table = (struct ath_rate_table *)
@@ -1824,10 +1821,11 @@ static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta)
1824 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv; 1821 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
1825 int i, j = 0; 1822 int i, j = 0;
1826 1823
1827 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 1824 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1825
1828 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1826 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1829 for (i = 0; i < sband->n_bitrates; i++) { 1827 for (i = 0; i < sband->n_bitrates; i++) {
1830 if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) { 1828 if (sta->sta.supp_rates[local->hw.conf.channel->band] & BIT(i)) {
1831 rc_priv->neg_rates.rs_rates[j] 1829 rc_priv->neg_rates.rs_rates[j]
1832 = (sband->bitrates[i].bitrate * 2) / 10; 1830 = (sband->bitrates[i].bitrate * 2) / 10;
1833 j++; 1831 j++;
@@ -1903,7 +1901,7 @@ static void ath_tx_aggr_resp(struct ath_softc *sc,
1903 int state; 1901 int state;
1904 DECLARE_MAC_BUF(mac); 1902 DECLARE_MAC_BUF(mac);
1905 1903
1906 if (!sc->sc_txaggr) 1904 if (!(sc->sc_flags & SC_OP_TXAGGR))
1907 return; 1905 return;
1908 1906
1909 txtid = ATH_AN_2_TID(an, tidno); 1907 txtid = ATH_AN_2_TID(an, tidno);
@@ -1944,7 +1942,7 @@ static void ath_get_rate(void *priv, struct net_device *dev,
1944 struct ath_rate_node *ath_rc_priv; 1942 struct ath_rate_node *ath_rc_priv;
1945 struct ath_node *an; 1943 struct ath_node *an;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1944 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1947 int is_probe, chk, ret; 1945 int is_probe = FALSE, chk, ret;
1948 s8 lowest_idx; 1946 s8 lowest_idx;
1949 __le16 fc = hdr->frame_control; 1947 __le16 fc = hdr->frame_control;
1950 u8 *qc, tid; 1948 u8 *qc, tid;
@@ -1962,7 +1960,7 @@ static void ath_get_rate(void *priv, struct net_device *dev,
1962 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10; 1960 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1963 /* lowest rate for management and multicast/broadcast frames */ 1961 /* lowest rate for management and multicast/broadcast frames */
1964 if (!ieee80211_is_data(fc) || 1962 if (!ieee80211_is_data(fc) ||
1965 is_multicast_ether_addr(hdr->addr1) || !sta) { 1963 is_multicast_ether_addr(hdr->addr1) || !sta) {
1966 sel->rate_idx = lowest_idx; 1964 sel->rate_idx = lowest_idx;
1967 return; 1965 return;
1968 } 1966 }
@@ -1978,7 +1976,7 @@ static void ath_get_rate(void *priv, struct net_device *dev,
1978 false); 1976 false);
1979 if (is_probe) 1977 if (is_probe)
1980 sel->probe_idx = ((struct ath_tx_ratectrl *) 1978 sel->probe_idx = ((struct ath_tx_ratectrl *)
1981 sta->rate_ctrl_priv)->probe_rate; 1979 sta->rate_ctrl_priv)->probe_rate;
1982 1980
1983 /* Ratecontrol sometimes returns invalid rate index */ 1981 /* Ratecontrol sometimes returns invalid rate index */
1984 if (tx_info_priv->rcs[0].rix != 0xff) 1982 if (tx_info_priv->rcs[0].rix != 0xff)
@@ -2035,23 +2033,22 @@ static void ath_rate_init(void *priv, void *priv_sta,
2035 struct ieee80211_hw *hw = local_to_hw(local); 2033 struct ieee80211_hw *hw = local_to_hw(local);
2036 struct ieee80211_conf *conf = &local->hw.conf; 2034 struct ieee80211_conf *conf = &local->hw.conf;
2037 struct ath_softc *sc = hw->priv; 2035 struct ath_softc *sc = hw->priv;
2036 struct ath_rate_node *ath_rc_priv = priv_sta;
2038 int i, j = 0; 2037 int i, j = 0;
2039 2038
2040 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__); 2039 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2041 2040
2042 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2041 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2043 sta->txrate_idx = rate_lowest_index(local, sband, sta);
2044 2042
2045 ath_setup_rates(local, sta); 2043 ath_setup_rates(local, sta);
2046 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { 2044 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2047 for (i = 0; i < MCS_SET_SIZE; i++) { 2045 for (i = 0; i < MCS_SET_SIZE; i++) {
2048 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8))) 2046 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2049 ((struct ath_rate_node *) 2047 ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
2050 priv_sta)->neg_ht_rates.rs_rates[j++] = i;
2051 if (j == ATH_RATE_MAX) 2048 if (j == ATH_RATE_MAX)
2052 break; 2049 break;
2053 } 2050 }
2054 ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j; 2051 ath_rc_priv->neg_ht_rates.rs_nrates = j;
2055 } 2052 }
2056 ath_rc_node_update(hw, priv_sta); 2053 ath_rc_node_update(hw, priv_sta);
2057} 2054}
@@ -2066,7 +2063,7 @@ static void *ath_rate_alloc(struct ieee80211_local *local)
2066 struct ieee80211_hw *hw = local_to_hw(local); 2063 struct ieee80211_hw *hw = local_to_hw(local);
2067 struct ath_softc *sc = hw->priv; 2064 struct ath_softc *sc = hw->priv;
2068 2065
2069 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 2066 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2070 return local->hw.priv; 2067 return local->hw.priv;
2071} 2068}
2072 2069
@@ -2081,14 +2078,17 @@ static void *ath_rate_alloc_sta(void *priv, gfp_t gfp)
2081 struct ath_vap *avp = sc->sc_vaps[0]; 2078 struct ath_vap *avp = sc->sc_vaps[0];
2082 struct ath_rate_node *rate_priv; 2079 struct ath_rate_node *rate_priv;
2083 2080
2084 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 2081 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2082
2085 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp); 2083 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
2086 if (!rate_priv) { 2084 if (!rate_priv) {
2087 DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate" 2085 DPRINTF(sc, ATH_DBG_FATAL,
2088 "private rate control structure", __func__); 2086 "%s: Unable to allocate private rc structure\n",
2087 __func__);
2089 return NULL; 2088 return NULL;
2090 } 2089 }
2091 ath_rc_sib_init(rate_priv); 2090 ath_rc_sib_init(rate_priv);
2091
2092 return rate_priv; 2092 return rate_priv;
2093} 2093}
2094 2094
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index 71aef9c75232..b95b41508b98 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -71,9 +71,6 @@ enum ieee80211_fixed_rate_mode {
71 */ 71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8))) 72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73 73
74#define SHORT_PRE 1
75#define LONG_PRE 0
76
77#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS 74#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
78#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS 75#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
79#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI 76#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
@@ -102,18 +99,18 @@ enum {
102 WLAN_RC_PHY_MAX 99 WLAN_RC_PHY_MAX
103}; 100};
104 101
105#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ 102#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
106 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 103 || (_phy == WLAN_RC_PHY_HT_40_DS) \
107 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 104 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 105 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
109#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 106#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
110 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 107 || (_phy == WLAN_RC_PHY_HT_40_DS) \
111 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 108 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 109 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
113#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ 110#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
114 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 111 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
115 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 112 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
116 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 113 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
117 114
118#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) 115#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
119 116
@@ -135,56 +132,59 @@ enum {
135#define WLAN_RC_SGI_FLAG (0x04) 132#define WLAN_RC_SGI_FLAG (0x04)
136#define WLAN_RC_HT_FLAG (0x08) 133#define WLAN_RC_HT_FLAG (0x08)
137 134
138/* Index into the rate table */
139#define INIT_RATE_MAX_20 23
140#define INIT_RATE_MAX_40 40
141
142#define RATE_TABLE_SIZE 64 135#define RATE_TABLE_SIZE 64
143 136
144/* XXX: Convert to kdoc */ 137/**
138 * struct ath_rate_table - Rate Control table
139 * @valid: valid for use in rate control
140 * @valid_single_stream: valid for use in rate control for
141 * single stream operation
142 * @phy: CCK/OFDM
143 * @ratekbps: rate in Kbits per second
144 * @user_ratekbps: user rate in Kbits per second
145 * @ratecode: rate that goes into HW descriptors
146 * @short_preamble: Mask for enabling short preamble in ratecode for CCK
147 * @dot11rate: value that goes into supported
148 * rates info element of MLME
149 * @ctrl_rate: Index of next lower basic rate, used for duration computation
150 * @max_4ms_framelen: maximum frame length(bytes) for tx duration
151 * @probe_interval: interval for rate control to probe for other rates
152 * @rssi_reduce_interval: interval for rate control to reduce rssi
153 * @initial_ratemax: initial ratemax value used in ath_rc_sib_update()
154 */
145struct ath_rate_table { 155struct ath_rate_table {
146 int rate_cnt; 156 int rate_cnt;
147 struct { 157 struct {
148 int valid; /* Valid for use in rate control */ 158 int valid;
149 int valid_single_stream;/* Valid for use in rate control 159 int valid_single_stream;
150 for single stream operation */ 160 u8 phy;
151 u8 phy; /* CCK/OFDM/TURBO/XR */ 161 u32 ratekbps;
152 u32 ratekbps; /* Rate in Kbits per second */ 162 u32 user_ratekbps;
153 u32 user_ratekbps; /* User rate in KBits per second */ 163 u8 ratecode;
154 u8 ratecode; /* rate that goes into 164 u8 short_preamble;
155 hw descriptors */ 165 u8 dot11rate;
156 u8 short_preamble; /* Mask for enabling short preamble 166 u8 ctrl_rate;
157 in rate code for CCK */ 167 int8_t rssi_ack_validmin;
158 u8 dot11rate; /* Value that goes into supported 168 int8_t rssi_ack_deltamin;
159 rates info element of MLME */ 169 u8 base_index;
160 u8 ctrl_rate; /* Index of next lower basic rate, 170 u8 cw40index;
161 used for duration computation */ 171 u8 sgi_index;
162 int8_t rssi_ack_validmin; /* Rate control related */ 172 u8 ht_index;
163 int8_t rssi_ack_deltamin; /* Rate control related */ 173 u32 max_4ms_framelen;
164 u8 base_index; /* base rate index */
165 u8 cw40index; /* 40cap rate index */
166 u8 sgi_index; /* shortgi rate index */
167 u8 ht_index; /* shortgi rate index */
168 u32 max_4ms_framelen; /* Maximum frame length(bytes)
169 for 4ms tx duration */
170 } info[RATE_TABLE_SIZE]; 174 } info[RATE_TABLE_SIZE];
171 u32 probe_interval; /* interval for ratectrl to 175 u32 probe_interval;
172 probe for other rates */ 176 u32 rssi_reduce_interval;
173 u32 rssi_reduce_interval; /* interval for ratectrl 177 u8 initial_ratemax;
174 to reduce RSSI */
175 u8 initial_ratemax; /* the initial ratemax value used
176 in ath_rc_sib_update() */
177}; 178};
178 179
179#define ATH_RC_PROBE_ALLOWED 0x00000001 180#define ATH_RC_PROBE_ALLOWED 0x00000001
180#define ATH_RC_MINRATE_LASTRATE 0x00000002 181#define ATH_RC_MINRATE_LASTRATE 0x00000002
181#define ATH_RC_SHORT_PREAMBLE 0x00000004
182 182
183struct ath_rc_series { 183struct ath_rc_series {
184 u8 rix; 184 u8 rix;
185 u8 tries; 185 u8 tries;
186 u8 flags; 186 u8 flags;
187 u32 max_4ms_framelen; 187 u32 max_4ms_framelen;
188}; 188};
189 189
190/* rcs_flags definition */ 190/* rcs_flags definition */
@@ -201,42 +201,56 @@ struct ath_rc_series {
201#define MAX_TX_RATE_PHY 48 201#define MAX_TX_RATE_PHY 48
202 202
203struct ath_tx_ratectrl_state { 203struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */ 204 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */ 205 u8 per; /* recent estimate of packet error rate (%) */
206}; 206};
207 207
208/**
209 * struct ath_tx_ratectrl - TX Rate control Information
210 * @state: RC state
211 * @rssi_last: last ACK rssi
212 * @rssi_last_lookup: last ACK rssi used for lookup
213 * @rssi_last_prev: previous last ACK rssi
214 * @rssi_last_prev2: 2nd previous last ACK rssi
215 * @rssi_sum_cnt: count of rssi_sum for averaging
216 * @rssi_sum_rate: rate that we are averaging
217 * @rssi_sum: running sum of rssi for averaging
218 * @probe_rate: rate we are probing at
219 * @rssi_time: msec timestamp for last ack rssi
220 * @rssi_down_time: msec timestamp for last down step
221 * @probe_time: msec timestamp for last probe
222 * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
223 * @max_valid_rate: maximum number of valid rate
224 * @per_down_time: msec timestamp for last PER down step
225 * @valid_phy_ratecnt: valid rate count
226 * @rate_max_phy: phy index for the max rate
227 * @probe_interval: interval for ratectrl to probe for other rates
228 */
208struct ath_tx_ratectrl { 229struct ath_tx_ratectrl {
209 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */ 230 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL];
210 int8_t rssi_last; /* last ack rssi */ 231 int8_t rssi_last;
211 int8_t rssi_last_lookup; /* last ack rssi used for lookup */ 232 int8_t rssi_last_lookup;
212 int8_t rssi_last_prev; /* previous last ack rssi */ 233 int8_t rssi_last_prev;
213 int8_t rssi_last_prev2; /* 2nd previous last ack rssi */ 234 int8_t rssi_last_prev2;
214 int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */ 235 int32_t rssi_sum_cnt;
215 int32_t rssi_sum_rate; /* rate that we are averaging */ 236 int32_t rssi_sum_rate;
216 int32_t rssi_sum; /* running sum of rssi for averaging */ 237 int32_t rssi_sum;
217 u32 valid_txrate_mask; /* mask of valid rates */ 238 u8 rate_table_size;
218 u8 rate_table_size; /* rate table size */ 239 u8 probe_rate;
219 u8 rate_max; /* max rate that has recently worked */ 240 u32 rssi_time;
220 u8 probe_rate; /* rate we are probing at */ 241 u32 rssi_down_time;
221 u32 rssi_time; /* msec timestamp for last ack rssi */ 242 u32 probe_time;
222 u32 rssi_down_time; /* msec timestamp for last down step */ 243 u8 hw_maxretry_pktcnt;
223 u32 probe_time; /* msec timestamp for last probe */ 244 u8 max_valid_rate;
224 u8 hw_maxretry_pktcnt; /* num packets since we got 245 u8 valid_rate_index[MAX_TX_RATE_TBL];
225 HW max retry error */ 246 u32 per_down_time;
226 u8 max_valid_rate; /* maximum number of valid rate */
227 u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
228 u32 per_down_time; /* msec timstamp for last
229 PER down step */
230 247
231 /* 11n state */ 248 /* 11n state */
232 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */ 249 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
233 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL]; 250 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
234 u8 rc_phy_mode; 251 u8 rc_phy_mode;
235 u8 rate_max_phy; /* Phy index for the max rate */ 252 u8 rate_max_phy;
236 u32 rate_max_lastused; /* msec timstamp of when we 253 u32 probe_interval;
237 last used rateMaxPhy */
238 u32 probe_interval; /* interval for ratectrl to probe
239 for other rates */
240}; 254};
241 255
242struct ath_rateset { 256struct ath_rateset {
@@ -248,29 +262,32 @@ struct ath_rateset {
248struct ath_rate_softc { 262struct ath_rate_softc {
249 /* phy tables that contain rate control data */ 263 /* phy tables that contain rate control data */
250 const void *hw_rate_table[ATH9K_MODE_MAX]; 264 const void *hw_rate_table[ATH9K_MODE_MAX];
251 int fixedrix; /* -1 or index of fixed rate */ 265
266 /* -1 or index of fixed rate */
267 int fixedrix;
252}; 268};
253 269
254/* per-node state */ 270/* per-node state */
255struct ath_rate_node { 271struct ath_rate_node {
256 struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */ 272 struct ath_tx_ratectrl tx_ratectrl;
257 u32 prev_data_rix; /* rate idx of last data frame */
258 273
259 /* map of rate ix -> negotiated rate set ix */ 274 /* rate idx of last data frame */
260 u8 rixmap[MAX_TX_RATE_TBL]; 275 u32 prev_data_rix;
261 276
262 /* map of ht rate ix -> negotiated rate set ix */ 277 /* ht capabilities */
263 u8 ht_rixmap[MAX_TX_RATE_TBL]; 278 u8 ht_cap;
264 279
265 u8 ht_cap; /* ht capabilities */ 280 /* When TRUE, only single stream Tx possible */
266 u8 ant_tx; /* current transmit antenna */ 281 u8 single_stream;
267 282
268 u8 single_stream; /* When TRUE, only single 283 /* Negotiated rates */
269 stream Tx possible */ 284 struct ath_rateset neg_rates;
270 struct ath_rateset neg_rates; /* Negotiated rates */ 285
271 struct ath_rateset neg_ht_rates; /* Negotiated HT rates */ 286 /* Negotiated HT rates */
272 struct ath_rate_softc *asc; /* back pointer to atheros softc */ 287 struct ath_rateset neg_ht_rates;
273 struct ath_vap *avp; /* back pointer to vap */ 288
289 struct ath_rate_softc *asc;
290 struct ath_vap *avp;
274}; 291};
275 292
276/* Driver data of ieee80211_tx_info */ 293/* Driver data of ieee80211_tx_info */
@@ -297,17 +314,10 @@ void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
297void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp); 314void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
298 315
299/* 316/*
300 * Return the tx rate series.
301 */
302void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
303 int num_tries, int num_rates,
304 unsigned int rcflag, struct ath_rc_series[],
305 int *is_probe, int isretry);
306/*
307 * Return rate index for given Dot11 Rate. 317 * Return rate index for given Dot11 Rate.
308 */ 318 */
309u8 ath_rate_findrateix(struct ath_softc *sc, 319u8 ath_rate_findrateix(struct ath_softc *sc,
310 u8 dot11_rate); 320 u8 dot11_rate);
311 321
312/* Routines to register/unregister rate control algorithm */ 322/* Routines to register/unregister rate control algorithm */
313int ath_rate_control_register(void); 323int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 2fe806175c01..498256309ab7 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -184,7 +184,7 @@ static int ath_ampdu_input(struct ath_softc *sc,
184 tid = qc[0] & 0xf; 184 tid = qc[0] & 0xf;
185 } 185 }
186 186
187 if (sc->sc_opmode == ATH9K_M_STA) { 187 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */ 188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { 189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb); 190 dev_kfree_skb(skb);
@@ -360,8 +360,9 @@ static void ath_rx_flush_tid(struct ath_softc *sc,
360 struct ath_arx_tid *rxtid, int drop) 360 struct ath_arx_tid *rxtid, int drop)
361{ 361{
362 struct ath_rxbuf *rxbuf; 362 struct ath_rxbuf *rxbuf;
363 unsigned long flag;
363 364
364 spin_lock_bh(&rxtid->tidlock); 365 spin_lock_irqsave(&rxtid->tidlock, flag);
365 while (rxtid->baw_head != rxtid->baw_tail) { 366 while (rxtid->baw_head != rxtid->baw_tail) {
366 rxbuf = rxtid->rxbuf + rxtid->baw_head; 367 rxbuf = rxtid->rxbuf + rxtid->baw_head;
367 if (!rxbuf->rx_wbuf) { 368 if (!rxbuf->rx_wbuf) {
@@ -382,7 +383,7 @@ static void ath_rx_flush_tid(struct ath_softc *sc,
382 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); 383 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
383 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); 384 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
384 } 385 }
385 spin_unlock_bh(&rxtid->tidlock); 386 spin_unlock_irqrestore(&rxtid->tidlock, flag);
386} 387}
387 388
388static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, 389static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
@@ -448,17 +449,16 @@ static int ath_rx_indicate(struct ath_softc *sc,
448 int type; 449 int type;
449 450
450 /* indicate frame to the stack, which will free the old skb. */ 451 /* indicate frame to the stack, which will free the old skb. */
451 type = ath__rx_indicate(sc, skb, status, keyix); 452 type = _ath_rx_indicate(sc, skb, status, keyix);
452 453
453 /* allocate a new skb and queue it to for H/W processing */ 454 /* allocate a new skb and queue it to for H/W processing */
454 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); 455 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
455 if (nskb != NULL) { 456 if (nskb != NULL) {
456 bf->bf_mpdu = nskb; 457 bf->bf_mpdu = nskb;
457 bf->bf_buf_addr = ath_skb_map_single(sc, 458 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
458 nskb, 459 skb_end_pointer(nskb) - nskb->head,
459 PCI_DMA_FROMDEVICE, 460 PCI_DMA_FROMDEVICE);
460 /* XXX: Remove get_dma_mem_context() */ 461 bf->bf_dmacontext = bf->bf_buf_addr;
461 get_dma_mem_context(bf, bf_dmacontext));
462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
463 463
464 /* queue the new wbuf to H/W */ 464 /* queue the new wbuf to H/W */
@@ -504,7 +504,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
504 504
505 do { 505 do {
506 spin_lock_init(&sc->sc_rxflushlock); 506 spin_lock_init(&sc->sc_rxflushlock);
507 sc->sc_rxflush = 0; 507 sc->sc_flags &= ~SC_OP_RXFLUSH;
508 spin_lock_init(&sc->sc_rxbuflock); 508 spin_lock_init(&sc->sc_rxbuflock);
509 509
510 /* 510 /*
@@ -541,9 +541,10 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
541 } 541 }
542 542
543 bf->bf_mpdu = skb; 543 bf->bf_mpdu = skb;
544 bf->bf_buf_addr = 544 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
545 ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE, 545 skb_end_pointer(skb) - skb->head,
546 get_dma_mem_context(bf, bf_dmacontext)); 546 PCI_DMA_FROMDEVICE);
547 bf->bf_dmacontext = bf->bf_buf_addr;
547 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; 548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
548 } 549 }
549 sc->sc_rxlink = NULL; 550 sc->sc_rxlink = NULL;
@@ -597,6 +598,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
597u32 ath_calcrxfilter(struct ath_softc *sc) 598u32 ath_calcrxfilter(struct ath_softc *sc)
598{ 599{
599#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 600#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
601
600 u32 rfilt; 602 u32 rfilt;
601 603
602 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 604 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
@@ -604,25 +606,29 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
604 | ATH9K_RX_FILTER_MCAST; 606 | ATH9K_RX_FILTER_MCAST;
605 607
606 /* If not a STA, enable processing of Probe Requests */ 608 /* If not a STA, enable processing of Probe Requests */
607 if (sc->sc_opmode != ATH9K_M_STA) 609 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
608 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 610 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
609 611
610 /* Can't set HOSTAP into promiscous mode */ 612 /* Can't set HOSTAP into promiscous mode */
611 if (sc->sc_opmode == ATH9K_M_MONITOR) { 613 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
614 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
615 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
612 rfilt |= ATH9K_RX_FILTER_PROM; 616 rfilt |= ATH9K_RX_FILTER_PROM;
613 /* ??? To prevent from sending ACK */ 617 /* ??? To prevent from sending ACK */
614 rfilt &= ~ATH9K_RX_FILTER_UCAST; 618 rfilt &= ~ATH9K_RX_FILTER_UCAST;
615 } 619 }
616 620
617 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS || 621 if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
618 sc->sc_scanning) 622 (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) ||
623 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
619 rfilt |= ATH9K_RX_FILTER_BEACON; 624 rfilt |= ATH9K_RX_FILTER_BEACON;
620 625
621 /* If in HOSTAP mode, want to enable reception of PSPOLL frames 626 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
622 & beacon frames */ 627 & beacon frames */
623 if (sc->sc_opmode == ATH9K_M_HOSTAP) 628 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
624 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); 629 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
625 return rfilt; 630 return rfilt;
631
626#undef RX_FILTER_PRESERVE 632#undef RX_FILTER_PRESERVE
627} 633}
628 634
@@ -702,11 +708,11 @@ void ath_flushrecv(struct ath_softc *sc)
702 * progress (see references to sc_rxflush) 708 * progress (see references to sc_rxflush)
703 */ 709 */
704 spin_lock_bh(&sc->sc_rxflushlock); 710 spin_lock_bh(&sc->sc_rxflushlock);
705 sc->sc_rxflush = 1; 711 sc->sc_flags |= SC_OP_RXFLUSH;
706 712
707 ath_rx_tasklet(sc, 1); 713 ath_rx_tasklet(sc, 1);
708 714
709 sc->sc_rxflush = 0; 715 sc->sc_flags &= ~SC_OP_RXFLUSH;
710 spin_unlock_bh(&sc->sc_rxflushlock); 716 spin_unlock_bh(&sc->sc_rxflushlock);
711} 717}
712 718
@@ -719,7 +725,7 @@ int ath_rx_input(struct ath_softc *sc,
719 struct ath_recv_status *rx_status, 725 struct ath_recv_status *rx_status,
720 enum ATH_RX_TYPE *status) 726 enum ATH_RX_TYPE *status)
721{ 727{
722 if (is_ampdu && sc->sc_rxaggr) { 728 if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) {
723 *status = ATH_RX_CONSUMED; 729 *status = ATH_RX_CONSUMED;
724 return ath_ampdu_input(sc, an, skb, rx_status); 730 return ath_ampdu_input(sc, an, skb, rx_status);
725 } else { 731 } else {
@@ -750,7 +756,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
750 756
751 do { 757 do {
752 /* If handling rx interrupt and flush is in progress => exit */ 758 /* If handling rx interrupt and flush is in progress => exit */
753 if (sc->sc_rxflush && (flush == 0)) 759 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
754 break; 760 break;
755 761
756 spin_lock_bh(&sc->sc_rxbuflock); 762 spin_lock_bh(&sc->sc_rxbuflock);
@@ -900,7 +906,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
900 * Enable this if you want to see 906 * Enable this if you want to see
901 * error frames in Monitor mode. 907 * error frames in Monitor mode.
902 */ 908 */
903 if (sc->sc_opmode != ATH9K_M_MONITOR) 909 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
904 goto rx_next; 910 goto rx_next;
905#endif 911#endif
906 /* fall thru for monitor mode handling... */ 912 /* fall thru for monitor mode handling... */
@@ -945,7 +951,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
945 * decryption and MIC failures. For monitor mode, 951 * decryption and MIC failures. For monitor mode,
946 * we also ignore the CRC error. 952 * we also ignore the CRC error.
947 */ 953 */
948 if (sc->sc_opmode == ATH9K_M_MONITOR) { 954 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
949 if (ds->ds_rxstat.rs_status & 955 if (ds->ds_rxstat.rs_status &
950 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 956 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
951 ATH9K_RXERR_CRC)) 957 ATH9K_RXERR_CRC))
@@ -1089,7 +1095,7 @@ rx_next:
1089 "%s: Reset rx chain mask. " 1095 "%s: Reset rx chain mask. "
1090 "Do internal reset\n", __func__); 1096 "Do internal reset\n", __func__);
1091 ASSERT(flush == 0); 1097 ASSERT(flush == 0);
1092 ath_internal_reset(sc); 1098 ath_reset(sc, false);
1093 } 1099 }
1094 1100
1095 return 0; 1101 return 0;
@@ -1127,7 +1133,7 @@ int ath_rx_aggr_start(struct ath_softc *sc,
1127 rxtid = &an->an_aggr.rx.tid[tid]; 1133 rxtid = &an->an_aggr.rx.tid[tid];
1128 1134
1129 spin_lock_bh(&rxtid->tidlock); 1135 spin_lock_bh(&rxtid->tidlock);
1130 if (sc->sc_rxaggr) { 1136 if (sc->sc_flags & SC_OP_RXAGGR) {
1131 /* Allow aggregation reception 1137 /* Allow aggregation reception
1132 * Adjust rx BA window size. Peer might indicate a 1138 * Adjust rx BA window size. Peer might indicate a
1133 * zero buffer size for a _dont_care_ condition. 1139 * zero buffer size for a _dont_care_ condition.
@@ -1227,7 +1233,7 @@ void ath_rx_aggr_teardown(struct ath_softc *sc,
1227 1233
1228void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) 1234void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1229{ 1235{
1230 if (sc->sc_rxaggr) { 1236 if (sc->sc_flags & SC_OP_RXAGGR) {
1231 struct ath_arx_tid *rxtid; 1237 struct ath_arx_tid *rxtid;
1232 int tidno; 1238 int tidno;
1233 1239
@@ -1259,7 +1265,7 @@ void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1259 1265
1260void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 1266void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1261{ 1267{
1262 if (sc->sc_rxaggr) { 1268 if (sc->sc_flags & SC_OP_RXAGGR) {
1263 struct ath_arx_tid *rxtid; 1269 struct ath_arx_tid *rxtid;
1264 int tidno, i; 1270 int tidno, i;
1265 1271
@@ -1292,27 +1298,3 @@ void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1292{ 1298{
1293 ath_rx_node_cleanup(sc, an); 1299 ath_rx_node_cleanup(sc, an);
1294} 1300}
1295
1296dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1297 struct sk_buff *skb,
1298 int direction,
1299 dma_addr_t *pa)
1300{
1301 /*
1302 * NB: do NOT use skb->len, which is 0 on initialization.
1303 * Use skb's entire data area instead.
1304 */
1305 *pa = pci_map_single(sc->pdev, skb->data,
1306 skb_end_pointer(skb) - skb->head, direction);
1307 return *pa;
1308}
1309
1310void ath_skb_unmap_single(struct ath_softc *sc,
1311 struct sk_buff *skb,
1312 int direction,
1313 dma_addr_t *pa)
1314{
1315 /* Unmap skb's entire data area */
1316 pci_unmap_single(sc->pdev, *pa,
1317 skb_end_pointer(skb) - skb->head, direction);
1318}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index 42b0890a4685..60617ae66209 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -899,12 +899,6 @@ enum {
899#define AR_GPIO_OUTPUT_MUX2 0x4064 899#define AR_GPIO_OUTPUT_MUX2 0x4064
900#define AR_GPIO_OUTPUT_MUX3 0x4068 900#define AR_GPIO_OUTPUT_MUX3 0x4068
901 901
902#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
903#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
904#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
905#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
906#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
907
908#define AR_INPUT_STATE 0x406c 902#define AR_INPUT_STATE 0x406c
909 903
910#define AR_EEPROM_STATUS_DATA 0x407c 904#define AR_EEPROM_STATUS_DATA 0x407c
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 157f830ee6b8..3fc6641e8bf7 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -60,79 +60,6 @@ static u32 bits_per_symbol[][2] = {
60#define IS_HT_RATE(_rate) ((_rate) & 0x80) 60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61 61
62/* 62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and 63 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller. 64 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held 65 * NB: must be called with txq lock held
@@ -277,8 +204,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
277 __le16 fc; 204 __le16 fc;
278 u8 *qc; 205 u8 *qc;
279 206
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc; 207 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data; 208 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 209 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -302,7 +227,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
302 } 227 }
303 228
304 txctl->if_id = 0; 229 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3); 230 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */ 231 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308 232
@@ -329,12 +253,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
329 253
330 /* Fill qnum */ 254 /* Fill qnum */
331 255
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 256 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
333 txq = &sc->sc_txq[txctl->qnum]; 257 txctl->qnum = 0;
258 txq = sc->sc_cabq;
259 } else {
260 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
261 txq = &sc->sc_txq[txctl->qnum];
262 }
334 spin_lock_bh(&txq->axq_lock); 263 spin_lock_bh(&txq->axq_lock);
335 264
336 /* Try to avoid running out of descriptors */ 265 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 266 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
267 !(txctl->flags & ATH9K_TXDESC_CAB)) {
338 DPRINTF(sc, ATH_DBG_FATAL, 268 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n", 269 "%s: TX queue: %d is full, depth: %d\n",
340 __func__, 270 __func__,
@@ -354,12 +284,12 @@ static int ath_tx_prepare(struct ath_softc *sc,
354 284
355 /* Fill flags */ 285 /* Fill flags */
356 286
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 287 txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358 288
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 289 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 tx_info->flags |= ATH9K_TXDESC_NOACK; 290 txctl->flags |= ATH9K_TXDESC_NOACK;
361 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) 291 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
362 tx_info->flags |= ATH9K_TXDESC_RTSENA; 292 txctl->flags |= ATH9K_TXDESC_RTSENA;
363 293
364 /* 294 /*
365 * Setup for rate calculations. 295 * Setup for rate calculations.
@@ -392,7 +322,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
392 * incremented by the fragmentation routine. 322 * incremented by the fragmentation routine.
393 */ 323 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 324 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) { 325 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
396 struct ath_atx_tid *tid; 326 struct ath_atx_tid *tid;
397 327
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 328 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
@@ -413,50 +343,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
413 } 343 }
414 rix = rcs[0].rix; 344 rix = rcs[0].rix;
415 345
416 /* 346 if (ieee80211_has_morefrags(fc) ||
417 * Calculate duration. This logically belongs in the 802.11 347 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /* 348 /*
423 * XXX not right with fragmentation. 349 ** Force hardware to use computed duration for next
424 */ 350 ** fragment by disabling multi-rate retry, which
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 351 ** updates duration based on the multi-rate
426 dur = rt->info[rix].spAckDuration; 352 ** duration table.
427 else 353 */
428 dur = rt->info[rix].lpAckDuration; 354 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
429 355 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
430 if (le16_to_cpu(hdr->frame_control) & 356 /* reset tries but keep rate index */
431 IEEE80211_FCTL_MOREFRAGS) { 357 rcs[0].tries = ATH_TXMAXTRY;
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 } 358 }
461 359
462 /* 360 /*
@@ -484,12 +382,8 @@ static int ath_tx_prepare(struct ath_softc *sc,
484 if (is_multicast_ether_addr(hdr->addr1)) { 382 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1; 383 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; 384 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else 385 }
488 antenna = sc->sc_txantenna;
489 386
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0; 387 return 0;
494} 388}
495 389
@@ -502,7 +396,6 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
502{ 396{
503 struct sk_buff *skb = bf->bf_mpdu; 397 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status; 398 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506 399
507 /* 400 /*
508 * Set retry information. 401 * Set retry information.
@@ -518,13 +411,12 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
518 if (!txok) { 411 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR; 412 tx_status.flags |= ATH_TX_ERROR;
520 413
521 if (bf->bf_isxretried) 414 if (bf_isxretried(bf))
522 tx_status.flags |= ATH_TX_XRETRY; 415 tx_status.flags |= ATH_TX_XRETRY;
523 } 416 }
524 /* Unmap this frame */ 417 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev, 418 pci_unmap_single(sc->pdev,
527 *pa, 419 bf->bf_dmacontext,
528 skb->len, 420 skb->len,
529 PCI_DMA_TODEVICE); 421 PCI_DMA_TODEVICE);
530 /* complete this frame */ 422 /* complete this frame */
@@ -629,7 +521,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc,
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 521 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0; 522 return 0;
631 523
632 isaggr = bf->bf_isaggr; 524 isaggr = bf_isaggr(bf);
633 if (isaggr) { 525 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds); 526 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 527 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
@@ -651,7 +543,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
651 struct sk_buff *skb; 543 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr; 544 struct ieee80211_hdr *hdr;
653 545
654 bf->bf_isretried = 1; 546 bf->bf_state.bf_type |= BUF_RETRY;
655 bf->bf_retries++; 547 bf->bf_retries++;
656 548
657 skb = bf->bf_mpdu; 549 skb = bf->bf_mpdu;
@@ -698,7 +590,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc,
698 u8 rc; 590 u8 rc;
699 int streams, pktlen; 591 int streams, pktlen;
700 592
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen; 593 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode; 594 rc = rt->info[rix].rateCode;
703 595
704 /* 596 /*
@@ -742,7 +634,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0; 634 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0; 635 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0; 636 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit; 637 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node; 638 struct ath_node *an = (struct ath_node *) bf->bf_node;
747 639
748 /* 640 /*
@@ -781,7 +673,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
781 * let rate series flags determine which rates will actually 673 * let rate series flags determine which rates will actually
782 * use RTS. 674 * use RTS.
783 */ 675 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) { 676 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
785 BUG_ON(!an); 677 BUG_ON(!an);
786 /* 678 /*
787 * 802.11g protection not needed, use our default behavior 679 * 802.11g protection not needed, use our default behavior
@@ -793,7 +685,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
793 * and the second aggregate should have any protection at all. 685 * and the second aggregate should have any protection at all.
794 */ 686 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { 687 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) { 688 if (!bf_isaggrburst(bf)) {
797 flags = ATH9K_TXDESC_RTSENA; 689 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1; 690 dynamic_mimops = 1;
799 } else { 691 } else {
@@ -806,7 +698,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
806 * Set protection if aggregate protection on 698 * Set protection if aggregate protection on
807 */ 699 */
808 if (sc->sc_config.ath_aggr_prot && 700 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) { 701 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA; 702 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate; 703 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1; 704 rtsctsena = 1;
@@ -815,7 +707,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
815 /* 707 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K. 708 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */ 709 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) { 710 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
819 /* 711 /*
820 * Ensure that in the case of SM Dynamic power save 712 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the 713 * while we are bursting the second aggregate the
@@ -832,7 +724,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
832 /* NB: cix is set above where RTS/CTS is enabled */ 724 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff); 725 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode | 726 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0); 727 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
836 728
837 /* 729 /*
838 * Setup HAL rate series 730 * Setup HAL rate series
@@ -846,7 +738,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
846 rix = bf->bf_rcs[i].rix; 738 rix = bf->bf_rcs[i].rix;
847 739
848 series[i].Rate = rt->info[rix].rateCode | 740 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0); 741 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
850 742
851 series[i].Tries = bf->bf_rcs[i].tries; 743 series[i].Tries = bf->bf_rcs[i].tries;
852 744
@@ -862,7 +754,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
862 sc, rix, bf, 754 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, 755 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), 756 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble); 757 bf_isshpreamble(bf));
866 758
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && 759 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { 760 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
@@ -875,7 +767,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
875 */ 767 */
876 series[i].ChSel = sc->sc_tx_chainmask; 768 series[i].ChSel = sc->sc_tx_chainmask;
877 } else { 769 } else {
878 if (bf->bf_ht) 770 if (bf_isht(bf))
879 series[i].ChSel = 771 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an); 772 ath_chainmask_sel_logic(sc, an);
881 else 773 else
@@ -908,7 +800,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
908 * use the precalculated ACK durations. 800 * use the precalculated ACK durations.
909 */ 801 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */ 802 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ? 803 ctsduration += bf_isshpreamble(bf) ?
912 rt->info[cix].spAckDuration : 804 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration; 805 rt->info[cix].lpAckDuration;
914 } 806 }
@@ -916,7 +808,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
916 ctsduration += series[0].PktDuration; 808 ctsduration += series[0].PktDuration;
917 809
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 810 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ? 811 ctsduration += bf_isshpreamble(bf) ?
920 rt->info[rix].spAckDuration : 812 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration; 813 rt->info[rix].lpAckDuration;
922 } 814 }
@@ -932,10 +824,10 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
932 * set dur_update_en for l-sig computation except for PS-Poll frames 824 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */ 825 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds, 826 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll, 827 !bf_ispspoll(bf),
936 ctsrate, 828 ctsrate,
937 ctsduration, 829 ctsduration,
938 series, 4, flags); 830 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags) 831 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192); 832 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941} 833}
@@ -958,7 +850,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
958 BUG_ON(list_empty(bf_head)); 850 BUG_ON(list_empty(bf_head));
959 851
960 bf = list_first_entry(bf_head, struct ath_buf, list); 852 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */ 853 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
962 854
963 skb = (struct sk_buff *)bf->bf_mpdu; 855 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb); 856 tx_info = IEEE80211_SKB_CB(skb);
@@ -998,7 +890,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
998 890
999 while (!list_empty(&tid->buf_q)) { 891 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 892 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried); 893 ASSERT(!bf_isretried(bf));
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 894 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head); 895 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 } 896 }
@@ -1025,7 +917,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 917 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN); 918 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027 919
1028 isaggr = bf->bf_isaggr; 920 isaggr = bf_isaggr(bf);
1029 if (isaggr) { 921 if (isaggr) {
1030 if (txok) { 922 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) { 923 if (ATH_DS_TX_BA(ds)) {
@@ -1047,7 +939,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1047 * when perform internal reset in this routine. 939 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now. 940 * Only enable reset in STA mode for now.
1049 */ 941 */
1050 if (sc->sc_opmode == ATH9K_M_STA) 942 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
1051 needreset = 1; 943 needreset = 1;
1052 } 944 }
1053 } else { 945 } else {
@@ -1075,7 +967,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1075 ath_tx_set_retry(sc, bf); 967 ath_tx_set_retry(sc, bf);
1076 txpending = 1; 968 txpending = 1;
1077 } else { 969 } else {
1078 bf->bf_isxretried = 1; 970 bf->bf_state.bf_type |= BUF_XRETRY;
1079 txfail = 1; 971 txfail = 1;
1080 sendbar = 1; 972 sendbar = 1;
1081 } 973 }
@@ -1175,11 +1067,8 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1175 tbf->bf_lastfrm->bf_desc); 1067 tbf->bf_lastfrm->bf_desc);
1176 1068
1177 /* copy the DMA context */ 1069 /* copy the DMA context */
1178 copy_dma_mem_context( 1070 tbf->bf_dmacontext =
1179 get_dma_mem_context(tbf, 1071 bf_last->bf_dmacontext;
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 } 1072 }
1184 list_add_tail(&tbf->list, &bf_head); 1073 list_add_tail(&tbf->list, &bf_head);
1185 } else { 1074 } else {
@@ -1188,7 +1077,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1188 * software retry 1077 * software retry
1189 */ 1078 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah, 1079 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc); 1080 bf->bf_lastfrm->bf_desc);
1192 } 1081 }
1193 1082
1194 /* 1083 /*
@@ -1242,7 +1131,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1242 } 1131 }
1243 1132
1244 if (needreset) 1133 if (needreset)
1245 ath_internal_reset(sc); 1134 ath_reset(sc, false);
1246 1135
1247 return; 1136 return;
1248} 1137}
@@ -1331,7 +1220,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1331 1220
1332 txq->axq_depth--; 1221 txq->axq_depth--;
1333 1222
1334 if (bf->bf_isaggr) 1223 if (bf_isaggr(bf))
1335 txq->axq_aggr_depth--; 1224 txq->axq_aggr_depth--;
1336 1225
1337 txok = (ds->ds_txstat.ts_status == 0); 1226 txok = (ds->ds_txstat.ts_status == 0);
@@ -1345,14 +1234,14 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1345 spin_unlock_bh(&sc->sc_txbuflock); 1234 spin_unlock_bh(&sc->sc_txbuflock);
1346 } 1235 }
1347 1236
1348 if (!bf->bf_isampdu) { 1237 if (!bf_isampdu(bf)) {
1349 /* 1238 /*
1350 * This frame is sent out as a single frame. 1239 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame. 1240 * Use hardware retry status for this frame.
1352 */ 1241 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry; 1242 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 1243 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1; 1244 bf->bf_state.bf_type |= BUF_XRETRY;
1356 nbad = 0; 1245 nbad = 0;
1357 } else { 1246 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok); 1247 nbad = ath_tx_num_badfrms(sc, bf, txok);
@@ -1368,7 +1257,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1368 if (ds->ds_txstat.ts_status == 0) 1257 if (ds->ds_txstat.ts_status == 0)
1369 nacked++; 1258 nacked++;
1370 1259
1371 if (bf->bf_isdata) { 1260 if (bf_isdata(bf)) {
1372 if (isrifs) 1261 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc; 1262 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else 1263 else
@@ -1384,7 +1273,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1384 /* 1273 /*
1385 * Complete this transmit unit 1274 * Complete this transmit unit
1386 */ 1275 */
1387 if (bf->bf_isampdu) 1276 if (bf_isampdu(bf))
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); 1277 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else 1278 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); 1279 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
@@ -1406,7 +1295,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1406 /* 1295 /*
1407 * schedule any pending packets if aggregation is enabled 1296 * schedule any pending packets if aggregation is enabled
1408 */ 1297 */
1409 if (sc->sc_txaggr) 1298 if (sc->sc_flags & SC_OP_TXAGGR)
1410 ath_txq_schedule(sc, txq); 1299 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock); 1300 spin_unlock_bh(&txq->axq_lock);
1412 } 1301 }
@@ -1430,10 +1319,9 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1430 struct ath_hal *ah = sc->sc_ah; 1319 struct ath_hal *ah = sc->sc_ah;
1431 int i; 1320 int i;
1432 int npend = 0; 1321 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434 1322
1435 /* XXX return value */ 1323 /* XXX return value */
1436 if (!sc->sc_invalid) { 1324 if (!(sc->sc_flags & SC_OP_INVALID)) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1325 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) { 1326 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1327 ath_tx_stopdma(sc, &sc->sc_txq[i]);
@@ -1454,10 +1342,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); 1342 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455 1343
1456 spin_lock_bh(&sc->sc_resetlock); 1344 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode, 1345 if (!ath9k_hw_reset(ah,
1458 &sc->sc_curchan, ht_macmode, 1346 sc->sc_ah->ah_curchan,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 1347 sc->sc_ht_info.tx_chan_width,
1460 sc->sc_ht_extprotspacing, true, &status)) { 1348 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1349 sc->sc_ht_extprotspacing, true, &status)) {
1461 1350
1462 DPRINTF(sc, ATH_DBG_FATAL, 1351 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n", 1352 "%s: unable to reset hardware; hal status %u\n",
@@ -1481,7 +1370,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc,
1481{ 1370{
1482 int index, cindex; 1371 int index, cindex;
1483 1372
1484 if (bf->bf_isretried) 1373 if (bf_isretried(bf))
1485 return; 1374 return;
1486 1375
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 1376 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
@@ -1516,7 +1405,7 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1516 BUG_ON(list_empty(bf_head)); 1405 BUG_ON(list_empty(bf_head));
1517 1406
1518 bf = list_first_entry(bf_head, struct ath_buf, list); 1407 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1; 1408 bf->bf_state.bf_type |= BUF_AMPDU;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */ 1409 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno; 1410 bf->bf_tidno = txctl->tidno;
1522 1411
@@ -1860,7 +1749,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1860 if (bf->bf_nframes == 1) { 1749 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last); 1750 ASSERT(bf->bf_lastfrm == bf_last);
1862 1751
1863 bf->bf_isaggr = 0; 1752 bf->bf_state.bf_type &= ~BUF_AGGR;
1864 /* 1753 /*
1865 * clear aggr bits for every descriptor 1754 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it? 1755 * XXX TODO: is there a way to optimize it?
@@ -1877,7 +1766,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1877 /* 1766 /*
1878 * setup first desc with rate and aggr info 1767 * setup first desc with rate and aggr info
1879 */ 1768 */
1880 bf->bf_isaggr = 1; 1769 bf->bf_state.bf_type |= BUF_AGGR;
1881 ath_buf_set_rate(sc, bf); 1770 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 1771 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883 1772
@@ -1925,7 +1814,7 @@ static void ath_tid_drain(struct ath_softc *sc,
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 1814 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926 1815
1927 /* update baw for software retried frame */ 1816 /* update baw for software retried frame */
1928 if (bf->bf_isretried) 1817 if (bf_isretried(bf))
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno); 1818 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930 1819
1931 /* 1820 /*
@@ -1990,13 +1879,18 @@ static int ath_tx_start_dma(struct ath_softc *sc,
1990 struct list_head bf_head; 1879 struct list_head bf_head;
1991 struct ath_desc *ds; 1880 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah; 1881 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; 1882 struct ath_txq *txq;
1994 struct ath_tx_info_priv *tx_info_priv; 1883 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs; 1884 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1886 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control; 1887 __le16 fc = hdr->frame_control;
1999 1888
1889 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1890 txq = sc->sc_cabq;
1891 else
1892 txq = &sc->sc_txq[txctl->qnum];
1893
2000 /* For each sglist entry, allocate an ath_buf for DMA */ 1894 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head); 1895 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock); 1896 spin_lock_bh(&sc->sc_txbuflock);
@@ -2014,11 +1908,21 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2014 /* set up this buffer */ 1908 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf); 1909 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen; 1910 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc); 1911
2018 bf->bf_isbar = ieee80211_is_back_req(fc); 1912 ieee80211_is_data(fc) ?
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc); 1913 (bf->bf_state.bf_type |= BUF_DATA) :
1914 (bf->bf_state.bf_type &= ~BUF_DATA);
1915 ieee80211_is_back_req(fc) ?
1916 (bf->bf_state.bf_type |= BUF_BAR) :
1917 (bf->bf_state.bf_type &= ~BUF_BAR);
1918 ieee80211_is_pspoll(fc) ?
1919 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1920 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1921 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1922 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1923 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1924
2020 bf->bf_flags = txctl->flags; 1925 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype; 1926 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1927 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs; 1928 rcs = tx_info_priv->rcs;
@@ -2038,8 +1942,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2038 /* 1942 /*
2039 * Save the DMA context in the first ath_buf 1943 * Save the DMA context in the first ath_buf
2040 */ 1944 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext), 1945 bf->bf_dmacontext = txctl->dmacontext;
2042 get_dma_mem_context(txctl, dmacontext));
2043 1946
2044 /* 1947 /*
2045 * Formulate first tx descriptor with tx controls. 1948 * Formulate first tx descriptor with tx controls.
@@ -2060,11 +1963,13 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2060 ds); /* first descriptor */ 1963 ds); /* first descriptor */
2061 1964
2062 bf->bf_lastfrm = bf; 1965 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht; 1966 (txctl->ht) ?
1967 (bf->bf_state.bf_type |= BUF_HT) :
1968 (bf->bf_state.bf_type &= ~BUF_HT);
2064 1969
2065 spin_lock_bh(&txq->axq_lock); 1970 spin_lock_bh(&txq->axq_lock);
2066 1971
2067 if (txctl->ht && sc->sc_txaggr) { 1972 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 1973 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) { 1974 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /* 1975 /*
@@ -2090,27 +1995,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2090 bf->bf_tidno = txctl->tidno; 1995 bf->bf_tidno = txctl->tidno;
2091 } 1996 }
2092 1997
2093 if (is_multicast_ether_addr(hdr->addr1)) { 1998 ath_tx_txqaddbuf(sc, txq, &bf_head);
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 } 1999 }
2115 spin_unlock_bh(&txq->axq_lock); 2000 spin_unlock_bh(&txq->axq_lock);
2116 return 0; 2001 return 0;
@@ -2118,30 +2003,31 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2118 2003
2119static void xmit_map_sg(struct ath_softc *sc, 2004static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb, 2005 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl) 2006 struct ath_tx_control *txctl)
2123{ 2007{
2124 struct ath_xmit_status tx_status; 2008 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid; 2009 struct ath_atx_tid *tid;
2126 struct scatterlist sg; 2010 struct scatterlist sg;
2127 2011
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2012 txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
2013 skb->len, PCI_DMA_TODEVICE);
2129 2014
2130 /* setup S/G list */ 2015 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist)); 2016 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa; 2017 sg_dma_address(&sg) = txctl->dmacontext;
2133 sg_dma_len(&sg) = skb->len; 2018 sg_dma_len(&sg) = skb->len;
2134 2019
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { 2020 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /* 2021 /*
2137 * We have to do drop frame here. 2022 * We have to do drop frame here.
2138 */ 2023 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE); 2024 pci_unmap_single(sc->pdev, txctl->dmacontext,
2025 skb->len, PCI_DMA_TODEVICE);
2140 2026
2141 tx_status.retries = 0; 2027 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR; 2028 tx_status.flags = ATH_TX_ERROR;
2143 2029
2144 if (txctl->ht && sc->sc_txaggr) { 2030 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2145 /* Reclaim the seqno. */ 2031 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *) 2032 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno); 2033 txctl->an, txctl->tidno);
@@ -2162,7 +2048,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2162 2048
2163 /* Setup tx descriptors */ 2049 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2050 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC); 2051 "tx", nbufs, 1);
2166 if (error != 0) { 2052 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL, 2053 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n", 2054 "%s: failed to allocate tx descriptors: %d\n",
@@ -2403,6 +2289,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2403 struct ath_tx_control txctl; 2289 struct ath_tx_control txctl;
2404 int error = 0; 2290 int error = 0;
2405 2291
2292 memset(&txctl, 0, sizeof(struct ath_tx_control));
2406 error = ath_tx_prepare(sc, skb, &txctl); 2293 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0) 2294 if (error == 0)
2408 /* 2295 /*
@@ -2410,9 +2297,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2410 * ath_tx_start_dma() will be called either synchronously 2297 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete. 2298 * or asynchrounsly once DMA is complete.
2412 */ 2299 */
2413 xmit_map_sg(sc, skb, 2300 xmit_map_sg(sc, skb, &txctl);
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else 2301 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); 2302 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418 2303
@@ -2424,8 +2309,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2424 2309
2425void ath_tx_tasklet(struct ath_softc *sc) 2310void ath_tx_tasklet(struct ath_softc *sc)
2426{ 2311{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah); 2312 int i;
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2313 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430 2314
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2315 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
@@ -2435,10 +2319,8 @@ void ath_tx_tasklet(struct ath_softc *sc)
2435 */ 2319 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2320 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2321 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 2322 ath_tx_processq(sc, &sc->sc_txq[i]);
2439 } 2323 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442} 2324}
2443 2325
2444void ath_tx_draintxq(struct ath_softc *sc, 2326void ath_tx_draintxq(struct ath_softc *sc,
@@ -2486,14 +2368,14 @@ void ath_tx_draintxq(struct ath_softc *sc,
2486 2368
2487 spin_unlock_bh(&txq->axq_lock); 2369 spin_unlock_bh(&txq->axq_lock);
2488 2370
2489 if (bf->bf_isampdu) 2371 if (bf_isampdu(bf))
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); 2372 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else 2373 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 2374 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 } 2375 }
2494 2376
2495 /* flush any pending frames if aggregation is enabled */ 2377 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) { 2378 if (sc->sc_flags & SC_OP_TXAGGR) {
2497 if (!retry_tx) { 2379 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock); 2380 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq, 2381 ath_txq_drain_pending_buffers(sc, txq,
@@ -2509,7 +2391,7 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{ 2391{
2510 /* stop beacon queue. The beacon will be freed when 2392 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */ 2393 * we go to INIT state */
2512 if (!sc->sc_invalid) { 2394 if (!(sc->sc_flags & SC_OP_INVALID)) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2395 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, 2396 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2397 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
@@ -2536,7 +2418,7 @@ enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2536 struct ath_atx_tid *txtid; 2418 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac); 2419 DECLARE_MAC_BUF(mac);
2538 2420
2539 if (!sc->sc_txaggr) 2421 if (!(sc->sc_flags & SC_OP_TXAGGR))
2540 return AGGR_NOT_REQUIRED; 2422 return AGGR_NOT_REQUIRED;
2541 2423
2542 /* ADDBA exchange must be completed before sending aggregates */ 2424 /* ADDBA exchange must be completed before sending aggregates */
@@ -2583,7 +2465,7 @@ int ath_tx_aggr_start(struct ath_softc *sc,
2583 return -1; 2465 return -1;
2584 } 2466 }
2585 2467
2586 if (sc->sc_txaggr) { 2468 if (sc->sc_flags & SC_OP_TXAGGR) {
2587 txtid = ATH_AN_2_TID(an, tid); 2469 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1; 2470 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid); 2471 ath_tx_pause_tid(sc, txtid);
@@ -2647,7 +2529,7 @@ void ath_tx_aggr_teardown(struct ath_softc *sc,
2647 spin_lock_bh(&txq->axq_lock); 2529 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) { 2530 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 2531 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) { 2532 if (!bf_isretried(bf)) {
2651 /* 2533 /*
2652 * NB: it's based on the assumption that 2534 * NB: it's based on the assumption that
2653 * software retried frame will always stay 2535 * software retried frame will always stay
@@ -2743,7 +2625,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2743 2625
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2626void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{ 2627{
2746 if (sc->sc_txaggr) { 2628 if (sc->sc_flags & SC_OP_TXAGGR) {
2747 struct ath_atx_tid *tid; 2629 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac; 2630 struct ath_atx_ac *ac;
2749 int tidno, acno; 2631 int tidno, acno;
@@ -2855,7 +2737,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2855 2737
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) 2738void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{ 2739{
2858 if (sc->sc_txaggr) { 2740 if (sc->sc_flags & SC_OP_TXAGGR) {
2859 struct ath_atx_tid *tid; 2741 struct ath_atx_tid *tid;
2860 int tidno, i; 2742 int tidno, i;
2861 2743
@@ -2869,3 +2751,57 @@ void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2869 } 2751 }
2870 } 2752 }
2871} 2753}
2754
2755void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2756{
2757 int hdrlen, padsize;
2758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2759 struct ath_tx_control txctl;
2760
2761 /*
2762 * As a temporary workaround, assign seq# here; this will likely need
2763 * to be cleaned up to work better with Beacon transmission and virtual
2764 * BSSes.
2765 */
2766 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2768 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2769 sc->seq_no += 0x10;
2770 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2771 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2772 }
2773
2774 /* Add the padding after the header if this is not already done */
2775 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2776 if (hdrlen & 3) {
2777 padsize = hdrlen % 4;
2778 if (skb_headroom(skb) < padsize) {
2779 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2780 "failed\n", __func__);
2781 dev_kfree_skb_any(skb);
2782 return;
2783 }
2784 skb_push(skb, padsize);
2785 memmove(skb->data, skb->data + padsize, hdrlen);
2786 }
2787
2788 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2789 __func__,
2790 skb);
2791
2792 memset(&txctl, 0, sizeof(struct ath_tx_control));
2793 txctl.flags = ATH9K_TXDESC_CAB;
2794 if (ath_tx_prepare(sc, skb, &txctl) == 0) {
2795 /*
2796 * Start DMA mapping.
2797 * ath_tx_start_dma() will be called either synchronously
2798 * or asynchrounsly once DMA is complete.
2799 */
2800 xmit_map_sg(sc, skb, &txctl);
2801 } else {
2802 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2803 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2804 dev_kfree_skb_any(skb);
2805 }
2806}
2807
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 1fa043d1802c..1f81d36f87c5 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -80,6 +80,18 @@ config B43_NPHY
80 80
81 SAY N. 81 SAY N.
82 82
83config B43_PHY_LP
84 bool "IEEE 802.11g LP-PHY support (BROKEN)"
85 depends on B43 && EXPERIMENTAL && BROKEN
86 ---help---
87 Support for the LP-PHY.
88 The LP-PHY is an IEEE 802.11g based PHY built into some notebooks
89 and embedded devices.
90
91 THIS IS BROKEN AND DOES NOT WORK YET.
92
93 SAY N.
94
83# This config option automatically enables b43 LEDS support, 95# This config option automatically enables b43 LEDS support,
84# if it's possible. 96# if it's possible.
85config B43_LEDS 97config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 8c52b0b9862a..14a02b3aea53 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,8 +1,11 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += tables.o 2b43-y += tables.o
3b43-$(CONFIG_B43_NPHY) += tables_nphy.o 3b43-$(CONFIG_B43_NPHY) += tables_nphy.o
4b43-y += phy.o 4b43-y += phy_common.o
5b43-$(CONFIG_B43_NPHY) += nphy.o 5b43-y += phy_g.o
6b43-y += phy_a.o
7b43-$(CONFIG_B43_NPHY) += phy_n.o
8b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
6b43-y += sysfs.o 9b43-y += sysfs.o
7b43-y += xmit.o 10b43-y += xmit.o
8b43-y += lo.o 11b43-y += lo.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index edcdfa366452..427b8203e3f9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -12,7 +12,7 @@
12#include "leds.h" 12#include "leds.h"
13#include "rfkill.h" 13#include "rfkill.h"
14#include "lo.h" 14#include "lo.h"
15#include "phy.h" 15#include "phy_common.h"
16 16
17 17
18/* The unique identifier of the firmware that's officially supported by 18/* The unique identifier of the firmware that's officially supported by
@@ -173,6 +173,11 @@ enum {
173#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ 173#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
174#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ 174#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */
175#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ 175#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
176/* TSSI information */
177#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
178#define B43_SHM_SH_TSSI_OFDM_A 0x0068 /* TSSI for last 4 OFDM frames (32bit) */
179#define B43_SHM_SH_TSSI_OFDM_G 0x0070 /* TSSI for last 4 OFDM frames (32bit) */
180#define B43_TSSI_MAX 0x7F /* Max value for one TSSI value */
176/* SHM_SHARED TX FIFO variables */ 181/* SHM_SHARED TX FIFO variables */
177#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ 182#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */
178#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ 183#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */
@@ -508,122 +513,6 @@ struct b43_iv {
508} __attribute__((__packed__)); 513} __attribute__((__packed__));
509 514
510 515
511struct b43_phy {
512 /* Band support flags. */
513 bool supports_2ghz;
514 bool supports_5ghz;
515
516 /* GMODE bit enabled? */
517 bool gmode;
518
519 /* Analog Type */
520 u8 analog;
521 /* B43_PHYTYPE_ */
522 u8 type;
523 /* PHY revision number. */
524 u8 rev;
525
526 /* Radio versioning */
527 u16 radio_manuf; /* Radio manufacturer */
528 u16 radio_ver; /* Radio version */
529 u8 radio_rev; /* Radio revision */
530
531 bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */
532
533 /* ACI (adjacent channel interference) flags. */
534 bool aci_enable;
535 bool aci_wlan_automatic;
536 bool aci_hw_rssi;
537
538 /* Radio switched on/off */
539 bool radio_on;
540 struct {
541 /* Values saved when turning the radio off.
542 * They are needed when turning it on again. */
543 bool valid;
544 u16 rfover;
545 u16 rfoverval;
546 } radio_off_context;
547
548 u16 minlowsig[2];
549 u16 minlowsigpos[2];
550
551 /* TSSI to dBm table in use */
552 const s8 *tssi2dbm;
553 /* Target idle TSSI */
554 int tgt_idle_tssi;
555 /* Current idle TSSI */
556 int cur_idle_tssi;
557
558 /* LocalOscillator control values. */
559 struct b43_txpower_lo_control *lo_control;
560 /* Values from b43_calc_loopback_gain() */
561 s16 max_lb_gain; /* Maximum Loopback gain in hdB */
562 s16 trsw_rx_gain; /* TRSW RX gain in hdB */
563 s16 lna_lod_gain; /* LNA lod */
564 s16 lna_gain; /* LNA */
565 s16 pga_gain; /* PGA */
566
567 /* Desired TX power level (in dBm).
568 * This is set by the user and adjusted in b43_phy_xmitpower(). */
569 u8 power_level;
570 /* A-PHY TX Power control value. */
571 u16 txpwr_offset;
572
573 /* Current TX power level attenuation control values */
574 struct b43_bbatt bbatt;
575 struct b43_rfatt rfatt;
576 u8 tx_control; /* B43_TXCTL_XXX */
577
578 /* Hardware Power Control enabled? */
579 bool hardware_power_control;
580
581 /* Current Interference Mitigation mode */
582 int interfmode;
583 /* Stack of saved values from the Interference Mitigation code.
584 * Each value in the stack is layed out as follows:
585 * bit 0-11: offset
586 * bit 12-15: register ID
587 * bit 16-32: value
588 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
589 */
590#define B43_INTERFSTACK_SIZE 26
591 u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure
592
593 /* Saved values from the NRSSI Slope calculation */
594 s16 nrssi[2];
595 s32 nrssislope;
596 /* In memory nrssi lookup table. */
597 s8 nrssi_lt[64];
598
599 /* current channel */
600 u8 channel;
601
602 u16 lofcal;
603
604 u16 initval; //FIXME rename?
605
606 /* PHY TX errors counter. */
607 atomic_t txerr_cnt;
608
609 /* The device does address auto increment for the OFDM tables.
610 * We cache the previously used address here and omit the address
611 * write on the next table access, if possible. */
612 u16 ofdmtab_addr; /* The address currently set in hardware. */
613 enum { /* The last data flow direction. */
614 B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
615 B43_OFDMTAB_DIRECTION_READ,
616 B43_OFDMTAB_DIRECTION_WRITE,
617 } ofdmtab_addr_direction;
618
619#if B43_DEBUG
620 /* Manual TX-power control enabled? */
621 bool manual_txpower_control;
622 /* PHY registers locked by b43_phy_lock()? */
623 bool phy_locked;
624#endif /* B43_DEBUG */
625};
626
627/* Data structures for DMA transmission, per 80211 core. */ 516/* Data structures for DMA transmission, per 80211 core. */
628struct b43_dma { 517struct b43_dma {
629 struct b43_dmaring *tx_ring_AC_BK; /* Background */ 518 struct b43_dmaring *tx_ring_AC_BK; /* Background */
@@ -680,7 +569,7 @@ struct b43_key {
680#define B43_QOS_VOICE B43_QOS_PARAMS(3) 569#define B43_QOS_VOICE B43_QOS_PARAMS(3)
681 570
682/* QOS parameter hardware data structure offsets. */ 571/* QOS parameter hardware data structure offsets. */
683#define B43_NR_QOSPARAMS 22 572#define B43_NR_QOSPARAMS 16
684enum { 573enum {
685 B43_QOSPARAM_TXOP = 0, 574 B43_QOSPARAM_TXOP = 0,
686 B43_QOSPARAM_CWMIN, 575 B43_QOSPARAM_CWMIN,
@@ -696,8 +585,6 @@ enum {
696struct b43_qos_params { 585struct b43_qos_params {
697 /* The QOS parameters */ 586 /* The QOS parameters */
698 struct ieee80211_tx_queue_params p; 587 struct ieee80211_tx_queue_params p;
699 /* Does this need to get uploaded to hardware? */
700 bool need_hw_update;
701}; 588};
702 589
703struct b43_wldev; 590struct b43_wldev;
@@ -759,11 +646,13 @@ struct b43_wl {
759 bool beacon_templates_virgin; /* Never wrote the templates? */ 646 bool beacon_templates_virgin; /* Never wrote the templates? */
760 struct work_struct beacon_update_trigger; 647 struct work_struct beacon_update_trigger;
761 648
762 /* The current QOS parameters for the 4 queues. 649 /* The current QOS parameters for the 4 queues. */
763 * This is protected by the irq_lock. */
764 struct b43_qos_params qos_params[4]; 650 struct b43_qos_params qos_params[4];
765 /* Workqueue for updating QOS parameters in hardware. */ 651
766 struct work_struct qos_update_work; 652 /* Work for adjustment of the transmission power.
653 * This is scheduled when we determine that the actual TX output
654 * power doesn't match what we want. */
655 struct work_struct txpower_adjust_work;
767}; 656};
768 657
769/* In-memory representation of a cached microcode file. */ 658/* In-memory representation of a cached microcode file. */
@@ -908,6 +797,15 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
908 return (wl->operating && wl->if_type == type); 797 return (wl->operating && wl->if_type == type);
909} 798}
910 799
800/**
801 * b43_current_band - Returns the currently used band.
802 * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
803 */
804static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
805{
806 return wl->hw->conf.channel->band;
807}
808
911static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) 809static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
912{ 810{
913 return ssb_read16(dev->dev, offset); 811 return ssb_read16(dev->dev, offset);
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 29851bc1101f..06a01da80160 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -443,76 +443,6 @@ out_unlock:
443 return count; 443 return count;
444} 444}
445 445
446static ssize_t txpower_g_read_file(struct b43_wldev *dev,
447 char *buf, size_t bufsize)
448{
449 ssize_t count = 0;
450
451 if (dev->phy.type != B43_PHYTYPE_G) {
452 fappend("Device is not a G-PHY\n");
453 goto out;
454 }
455 fappend("Control: %s\n", dev->phy.manual_txpower_control ?
456 "MANUAL" : "AUTOMATIC");
457 fappend("Baseband attenuation: %u\n", dev->phy.bbatt.att);
458 fappend("Radio attenuation: %u\n", dev->phy.rfatt.att);
459 fappend("TX Mixer Gain: %s\n",
460 (dev->phy.tx_control & B43_TXCTL_TXMIX) ? "ON" : "OFF");
461 fappend("PA Gain 2dB: %s\n",
462 (dev->phy.tx_control & B43_TXCTL_PA2DB) ? "ON" : "OFF");
463 fappend("PA Gain 3dB: %s\n",
464 (dev->phy.tx_control & B43_TXCTL_PA3DB) ? "ON" : "OFF");
465 fappend("\n\n");
466 fappend("You can write to this file:\n");
467 fappend("Writing \"auto\" enables automatic txpower control.\n");
468 fappend
469 ("Writing the attenuation values as \"bbatt rfatt txmix pa2db pa3db\" "
470 "enables manual txpower control.\n");
471 fappend("Example: 5 4 0 0 1\n");
472 fappend("Enables manual control with Baseband attenuation 5, "
473 "Radio attenuation 4, No TX Mixer Gain, "
474 "No PA Gain 2dB, With PA Gain 3dB.\n");
475out:
476 return count;
477}
478
479static int txpower_g_write_file(struct b43_wldev *dev,
480 const char *buf, size_t count)
481{
482 if (dev->phy.type != B43_PHYTYPE_G)
483 return -ENODEV;
484 if ((count >= 4) && (memcmp(buf, "auto", 4) == 0)) {
485 /* Automatic control */
486 dev->phy.manual_txpower_control = 0;
487 b43_phy_xmitpower(dev);
488 } else {
489 int bbatt = 0, rfatt = 0, txmix = 0, pa2db = 0, pa3db = 0;
490 /* Manual control */
491 if (sscanf(buf, "%d %d %d %d %d", &bbatt, &rfatt,
492 &txmix, &pa2db, &pa3db) != 5)
493 return -EINVAL;
494 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
495 dev->phy.manual_txpower_control = 1;
496 dev->phy.bbatt.att = bbatt;
497 dev->phy.rfatt.att = rfatt;
498 dev->phy.tx_control = 0;
499 if (txmix)
500 dev->phy.tx_control |= B43_TXCTL_TXMIX;
501 if (pa2db)
502 dev->phy.tx_control |= B43_TXCTL_PA2DB;
503 if (pa3db)
504 dev->phy.tx_control |= B43_TXCTL_PA3DB;
505 b43_phy_lock(dev);
506 b43_radio_lock(dev);
507 b43_set_txpower_g(dev, &dev->phy.bbatt,
508 &dev->phy.rfatt, dev->phy.tx_control);
509 b43_radio_unlock(dev);
510 b43_phy_unlock(dev);
511 }
512
513 return 0;
514}
515
516/* wl->irq_lock is locked */ 446/* wl->irq_lock is locked */
517static int restart_write_file(struct b43_wldev *dev, 447static int restart_write_file(struct b43_wldev *dev,
518 const char *buf, size_t count) 448 const char *buf, size_t count)
@@ -560,7 +490,7 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
560 err = -ENODEV; 490 err = -ENODEV;
561 goto out; 491 goto out;
562 } 492 }
563 lo = phy->lo_control; 493 lo = phy->g->lo_control;
564 fappend("-- Local Oscillator calibration data --\n\n"); 494 fappend("-- Local Oscillator calibration data --\n\n");
565 fappend("HW-power-control enabled: %d\n", 495 fappend("HW-power-control enabled: %d\n",
566 dev->phy.hardware_power_control); 496 dev->phy.hardware_power_control);
@@ -578,8 +508,8 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
578 list_for_each_entry(cal, &lo->calib_list, list) { 508 list_for_each_entry(cal, &lo->calib_list, list) {
579 bool active; 509 bool active;
580 510
581 active = (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) && 511 active = (b43_compare_bbatt(&cal->bbatt, &phy->g->bbatt) &&
582 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)); 512 b43_compare_rfatt(&cal->rfatt, &phy->g->rfatt));
583 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d " 513 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d "
584 "(expires in %lu sec)%s\n", 514 "(expires in %lu sec)%s\n",
585 cal->bbatt.att, 515 cal->bbatt.att,
@@ -763,7 +693,6 @@ B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file, 1);
763B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1); 693B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1);
764B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); 694B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1);
765B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); 695B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0);
766B43_DEBUGFS_FOPS(txpower_g, txpower_g_read_file, txpower_g_write_file, 0);
767B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); 696B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1);
768B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0); 697B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0);
769 698
@@ -877,7 +806,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
877 ADD_FILE(mmio32write, 0200); 806 ADD_FILE(mmio32write, 0200);
878 ADD_FILE(tsf, 0600); 807 ADD_FILE(tsf, 0600);
879 ADD_FILE(txstat, 0400); 808 ADD_FILE(txstat, 0400);
880 ADD_FILE(txpower_g, 0600);
881 ADD_FILE(restart, 0200); 809 ADD_FILE(restart, 0200);
882 ADD_FILE(loctls, 0400); 810 ADD_FILE(loctls, 0400);
883 811
@@ -907,7 +835,6 @@ void b43_debugfs_remove_device(struct b43_wldev *dev)
907 debugfs_remove(e->file_mmio32write.dentry); 835 debugfs_remove(e->file_mmio32write.dentry);
908 debugfs_remove(e->file_tsf.dentry); 836 debugfs_remove(e->file_tsf.dentry);
909 debugfs_remove(e->file_txstat.dentry); 837 debugfs_remove(e->file_txstat.dentry);
910 debugfs_remove(e->file_txpower_g.dentry);
911 debugfs_remove(e->file_restart.dentry); 838 debugfs_remove(e->file_restart.dentry);
912 debugfs_remove(e->file_loctls.dentry); 839 debugfs_remove(e->file_loctls.dentry);
913 840
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 9c854d6aae36..6a18a1470465 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -29,7 +29,7 @@
29 29
30#include "b43.h" 30#include "b43.h"
31#include "lo.h" 31#include "lo.h"
32#include "phy.h" 32#include "phy_g.h"
33#include "main.h" 33#include "main.h"
34 34
35#include <linux/delay.h> 35#include <linux/delay.h>
@@ -174,7 +174,8 @@ static u16 lo_txctl_register_table(struct b43_wldev *dev,
174static void lo_measure_txctl_values(struct b43_wldev *dev) 174static void lo_measure_txctl_values(struct b43_wldev *dev)
175{ 175{
176 struct b43_phy *phy = &dev->phy; 176 struct b43_phy *phy = &dev->phy;
177 struct b43_txpower_lo_control *lo = phy->lo_control; 177 struct b43_phy_g *gphy = phy->g;
178 struct b43_txpower_lo_control *lo = gphy->lo_control;
178 u16 reg, mask; 179 u16 reg, mask;
179 u16 trsw_rx, pga; 180 u16 trsw_rx, pga;
180 u16 radio_pctl_reg; 181 u16 radio_pctl_reg;
@@ -195,7 +196,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
195 int lb_gain; /* Loopback gain (in dB) */ 196 int lb_gain; /* Loopback gain (in dB) */
196 197
197 trsw_rx = 0; 198 trsw_rx = 0;
198 lb_gain = phy->max_lb_gain / 2; 199 lb_gain = gphy->max_lb_gain / 2;
199 if (lb_gain > 10) { 200 if (lb_gain > 10) {
200 radio_pctl_reg = 0; 201 radio_pctl_reg = 0;
201 pga = abs(10 - lb_gain) / 6; 202 pga = abs(10 - lb_gain) / 6;
@@ -226,7 +227,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
226 } 227 }
227 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) 228 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
228 & 0xFFF0) | radio_pctl_reg); 229 & 0xFFF0) | radio_pctl_reg);
229 b43_phy_set_baseband_attenuation(dev, 2); 230 b43_gphy_set_baseband_attenuation(dev, 2);
230 231
231 reg = lo_txctl_register_table(dev, &mask, NULL); 232 reg = lo_txctl_register_table(dev, &mask, NULL);
232 mask = ~mask; 233 mask = ~mask;
@@ -277,7 +278,8 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
277static void lo_read_power_vector(struct b43_wldev *dev) 278static void lo_read_power_vector(struct b43_wldev *dev)
278{ 279{
279 struct b43_phy *phy = &dev->phy; 280 struct b43_phy *phy = &dev->phy;
280 struct b43_txpower_lo_control *lo = phy->lo_control; 281 struct b43_phy_g *gphy = phy->g;
282 struct b43_txpower_lo_control *lo = gphy->lo_control;
281 int i; 283 int i;
282 u64 tmp; 284 u64 tmp;
283 u64 power_vector = 0; 285 u64 power_vector = 0;
@@ -298,6 +300,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
298 s16 max_rx_gain, int use_trsw_rx) 300 s16 max_rx_gain, int use_trsw_rx)
299{ 301{
300 struct b43_phy *phy = &dev->phy; 302 struct b43_phy *phy = &dev->phy;
303 struct b43_phy_g *gphy = phy->g;
301 u16 tmp; 304 u16 tmp;
302 305
303 if (max_rx_gain < 0) 306 if (max_rx_gain < 0)
@@ -308,7 +311,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
308 int trsw_rx_gain; 311 int trsw_rx_gain;
309 312
310 if (use_trsw_rx) { 313 if (use_trsw_rx) {
311 trsw_rx_gain = phy->trsw_rx_gain / 2; 314 trsw_rx_gain = gphy->trsw_rx_gain / 2;
312 if (max_rx_gain >= trsw_rx_gain) { 315 if (max_rx_gain >= trsw_rx_gain) {
313 trsw_rx_gain = max_rx_gain - trsw_rx_gain; 316 trsw_rx_gain = max_rx_gain - trsw_rx_gain;
314 trsw_rx = 0x20; 317 trsw_rx = 0x20;
@@ -316,38 +319,38 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
316 } else 319 } else
317 trsw_rx_gain = max_rx_gain; 320 trsw_rx_gain = max_rx_gain;
318 if (trsw_rx_gain < 9) { 321 if (trsw_rx_gain < 9) {
319 phy->lna_lod_gain = 0; 322 gphy->lna_lod_gain = 0;
320 } else { 323 } else {
321 phy->lna_lod_gain = 1; 324 gphy->lna_lod_gain = 1;
322 trsw_rx_gain -= 8; 325 trsw_rx_gain -= 8;
323 } 326 }
324 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D); 327 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D);
325 phy->pga_gain = trsw_rx_gain / 3; 328 gphy->pga_gain = trsw_rx_gain / 3;
326 if (phy->pga_gain >= 5) { 329 if (gphy->pga_gain >= 5) {
327 phy->pga_gain -= 5; 330 gphy->pga_gain -= 5;
328 phy->lna_gain = 2; 331 gphy->lna_gain = 2;
329 } else 332 } else
330 phy->lna_gain = 0; 333 gphy->lna_gain = 0;
331 } else { 334 } else {
332 phy->lna_gain = 0; 335 gphy->lna_gain = 0;
333 phy->trsw_rx_gain = 0x20; 336 gphy->trsw_rx_gain = 0x20;
334 if (max_rx_gain >= 0x14) { 337 if (max_rx_gain >= 0x14) {
335 phy->lna_lod_gain = 1; 338 gphy->lna_lod_gain = 1;
336 phy->pga_gain = 2; 339 gphy->pga_gain = 2;
337 } else if (max_rx_gain >= 0x12) { 340 } else if (max_rx_gain >= 0x12) {
338 phy->lna_lod_gain = 1; 341 gphy->lna_lod_gain = 1;
339 phy->pga_gain = 1; 342 gphy->pga_gain = 1;
340 } else if (max_rx_gain >= 0xF) { 343 } else if (max_rx_gain >= 0xF) {
341 phy->lna_lod_gain = 1; 344 gphy->lna_lod_gain = 1;
342 phy->pga_gain = 0; 345 gphy->pga_gain = 0;
343 } else { 346 } else {
344 phy->lna_lod_gain = 0; 347 gphy->lna_lod_gain = 0;
345 phy->pga_gain = 0; 348 gphy->pga_gain = 0;
346 } 349 }
347 } 350 }
348 351
349 tmp = b43_radio_read16(dev, 0x7A); 352 tmp = b43_radio_read16(dev, 0x7A);
350 if (phy->lna_lod_gain == 0) 353 if (gphy->lna_lod_gain == 0)
351 tmp &= ~0x0008; 354 tmp &= ~0x0008;
352 else 355 else
353 tmp |= 0x0008; 356 tmp |= 0x0008;
@@ -392,10 +395,11 @@ static void lo_measure_setup(struct b43_wldev *dev,
392{ 395{
393 struct ssb_sprom *sprom = &dev->dev->bus->sprom; 396 struct ssb_sprom *sprom = &dev->dev->bus->sprom;
394 struct b43_phy *phy = &dev->phy; 397 struct b43_phy *phy = &dev->phy;
395 struct b43_txpower_lo_control *lo = phy->lo_control; 398 struct b43_phy_g *gphy = phy->g;
399 struct b43_txpower_lo_control *lo = gphy->lo_control;
396 u16 tmp; 400 u16 tmp;
397 401
398 if (b43_has_hardware_pctl(phy)) { 402 if (b43_has_hardware_pctl(dev)) {
399 sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); 403 sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK);
400 sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01)); 404 sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01));
401 sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL); 405 sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL);
@@ -496,7 +500,7 @@ static void lo_measure_setup(struct b43_wldev *dev,
496 b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802); 500 b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802);
497 if (phy->rev >= 2) 501 if (phy->rev >= 2)
498 b43_dummy_transmission(dev); 502 b43_dummy_transmission(dev);
499 b43_radio_selectchannel(dev, 6, 0); 503 b43_gphy_channel_switch(dev, 6, 0);
500 b43_radio_read16(dev, 0x51); /* dummy read */ 504 b43_radio_read16(dev, 0x51); /* dummy read */
501 if (phy->type == B43_PHYTYPE_G) 505 if (phy->type == B43_PHYTYPE_G)
502 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0); 506 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0);
@@ -520,18 +524,19 @@ static void lo_measure_restore(struct b43_wldev *dev,
520 struct lo_g_saved_values *sav) 524 struct lo_g_saved_values *sav)
521{ 525{
522 struct b43_phy *phy = &dev->phy; 526 struct b43_phy *phy = &dev->phy;
527 struct b43_phy_g *gphy = phy->g;
523 u16 tmp; 528 u16 tmp;
524 529
525 if (phy->rev >= 2) { 530 if (phy->rev >= 2) {
526 b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); 531 b43_phy_write(dev, B43_PHY_PGACTL, 0xE300);
527 tmp = (phy->pga_gain << 8); 532 tmp = (gphy->pga_gain << 8);
528 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0); 533 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0);
529 udelay(5); 534 udelay(5);
530 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2); 535 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2);
531 udelay(2); 536 udelay(2);
532 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3); 537 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3);
533 } else { 538 } else {
534 tmp = (phy->pga_gain | 0xEFA0); 539 tmp = (gphy->pga_gain | 0xEFA0);
535 b43_phy_write(dev, B43_PHY_PGACTL, tmp); 540 b43_phy_write(dev, B43_PHY_PGACTL, tmp);
536 } 541 }
537 if (phy->type == B43_PHYTYPE_G) { 542 if (phy->type == B43_PHYTYPE_G) {
@@ -572,7 +577,7 @@ static void lo_measure_restore(struct b43_wldev *dev,
572 b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E); 577 b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E);
573 b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0); 578 b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0);
574 } 579 }
575 if (b43_has_hardware_pctl(phy)) { 580 if (b43_has_hardware_pctl(dev)) {
576 tmp = (sav->phy_lo_mask & 0xBFFF); 581 tmp = (sav->phy_lo_mask & 0xBFFF);
577 b43_phy_write(dev, B43_PHY_LO_MASK, tmp); 582 b43_phy_write(dev, B43_PHY_LO_MASK, tmp);
578 b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01); 583 b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01);
@@ -580,7 +585,7 @@ static void lo_measure_restore(struct b43_wldev *dev,
580 b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14); 585 b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14);
581 b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); 586 b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl);
582 } 587 }
583 b43_radio_selectchannel(dev, sav->old_channel, 1); 588 b43_gphy_channel_switch(dev, sav->old_channel, 1);
584} 589}
585 590
586struct b43_lo_g_statemachine { 591struct b43_lo_g_statemachine {
@@ -597,6 +602,7 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
597 struct b43_lo_g_statemachine *d) 602 struct b43_lo_g_statemachine *d)
598{ 603{
599 struct b43_phy *phy = &dev->phy; 604 struct b43_phy *phy = &dev->phy;
605 struct b43_phy_g *gphy = phy->g;
600 struct b43_loctl test_loctl; 606 struct b43_loctl test_loctl;
601 struct b43_loctl orig_loctl; 607 struct b43_loctl orig_loctl;
602 struct b43_loctl prev_loctl = { 608 struct b43_loctl prev_loctl = {
@@ -646,9 +652,9 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
646 test_loctl.q != prev_loctl.q) && 652 test_loctl.q != prev_loctl.q) &&
647 (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { 653 (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) {
648 b43_lo_write(dev, &test_loctl); 654 b43_lo_write(dev, &test_loctl);
649 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 655 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
650 phy->pga_gain, 656 gphy->pga_gain,
651 phy->trsw_rx_gain); 657 gphy->trsw_rx_gain);
652 if (feedth < d->lowest_feedth) { 658 if (feedth < d->lowest_feedth) {
653 memcpy(probe_loctl, &test_loctl, 659 memcpy(probe_loctl, &test_loctl,
654 sizeof(struct b43_loctl)); 660 sizeof(struct b43_loctl));
@@ -677,6 +683,7 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
677 int *max_rx_gain) 683 int *max_rx_gain)
678{ 684{
679 struct b43_phy *phy = &dev->phy; 685 struct b43_phy *phy = &dev->phy;
686 struct b43_phy_g *gphy = phy->g;
680 struct b43_lo_g_statemachine d; 687 struct b43_lo_g_statemachine d;
681 u16 feedth; 688 u16 feedth;
682 int found_lower; 689 int found_lower;
@@ -693,17 +700,17 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
693 max_repeat = 4; 700 max_repeat = 4;
694 do { 701 do {
695 b43_lo_write(dev, &d.min_loctl); 702 b43_lo_write(dev, &d.min_loctl);
696 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 703 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
697 phy->pga_gain, 704 gphy->pga_gain,
698 phy->trsw_rx_gain); 705 gphy->trsw_rx_gain);
699 if (feedth < 0x258) { 706 if (feedth < 0x258) {
700 if (feedth >= 0x12C) 707 if (feedth >= 0x12C)
701 *max_rx_gain += 6; 708 *max_rx_gain += 6;
702 else 709 else
703 *max_rx_gain += 3; 710 *max_rx_gain += 3;
704 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 711 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
705 phy->pga_gain, 712 gphy->pga_gain,
706 phy->trsw_rx_gain); 713 gphy->trsw_rx_gain);
707 } 714 }
708 d.lowest_feedth = feedth; 715 d.lowest_feedth = feedth;
709 716
@@ -752,6 +759,7 @@ struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
752 const struct b43_rfatt *rfatt) 759 const struct b43_rfatt *rfatt)
753{ 760{
754 struct b43_phy *phy = &dev->phy; 761 struct b43_phy *phy = &dev->phy;
762 struct b43_phy_g *gphy = phy->g;
755 struct b43_loctl loctl = { 763 struct b43_loctl loctl = {
756 .i = 0, 764 .i = 0,
757 .q = 0, 765 .q = 0,
@@ -782,11 +790,11 @@ struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
782 if (rfatt->with_padmix) 790 if (rfatt->with_padmix)
783 max_rx_gain -= pad_mix_gain; 791 max_rx_gain -= pad_mix_gain;
784 if (has_loopback_gain(phy)) 792 if (has_loopback_gain(phy))
785 max_rx_gain += phy->max_lb_gain; 793 max_rx_gain += gphy->max_lb_gain;
786 lo_measure_gain_values(dev, max_rx_gain, 794 lo_measure_gain_values(dev, max_rx_gain,
787 has_loopback_gain(phy)); 795 has_loopback_gain(phy));
788 796
789 b43_phy_set_baseband_attenuation(dev, bbatt->att); 797 b43_gphy_set_baseband_attenuation(dev, bbatt->att);
790 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain); 798 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
791 799
792 lo_measure_restore(dev, &saved_regs); 800 lo_measure_restore(dev, &saved_regs);
@@ -820,7 +828,7 @@ struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
820 const struct b43_bbatt *bbatt, 828 const struct b43_bbatt *bbatt,
821 const struct b43_rfatt *rfatt) 829 const struct b43_rfatt *rfatt)
822{ 830{
823 struct b43_txpower_lo_control *lo = dev->phy.lo_control; 831 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
824 struct b43_lo_calib *c; 832 struct b43_lo_calib *c;
825 833
826 c = b43_find_lo_calib(lo, bbatt, rfatt); 834 c = b43_find_lo_calib(lo, bbatt, rfatt);
@@ -839,7 +847,8 @@ struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
839void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) 847void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
840{ 848{
841 struct b43_phy *phy = &dev->phy; 849 struct b43_phy *phy = &dev->phy;
842 struct b43_txpower_lo_control *lo = phy->lo_control; 850 struct b43_phy_g *gphy = phy->g;
851 struct b43_txpower_lo_control *lo = gphy->lo_control;
843 int i; 852 int i;
844 int rf_offset, bb_offset; 853 int rf_offset, bb_offset;
845 const struct b43_rfatt *rfatt; 854 const struct b43_rfatt *rfatt;
@@ -917,14 +926,14 @@ static inline void b43_lo_fixup_rfatt(struct b43_rfatt *rf)
917 926
918void b43_lo_g_adjust(struct b43_wldev *dev) 927void b43_lo_g_adjust(struct b43_wldev *dev)
919{ 928{
920 struct b43_phy *phy = &dev->phy; 929 struct b43_phy_g *gphy = dev->phy.g;
921 struct b43_lo_calib *cal; 930 struct b43_lo_calib *cal;
922 struct b43_rfatt rf; 931 struct b43_rfatt rf;
923 932
924 memcpy(&rf, &phy->rfatt, sizeof(rf)); 933 memcpy(&rf, &gphy->rfatt, sizeof(rf));
925 b43_lo_fixup_rfatt(&rf); 934 b43_lo_fixup_rfatt(&rf);
926 935
927 cal = b43_get_calib_lo_settings(dev, &phy->bbatt, &rf); 936 cal = b43_get_calib_lo_settings(dev, &gphy->bbatt, &rf);
928 if (!cal) 937 if (!cal)
929 return; 938 return;
930 b43_lo_write(dev, &cal->ctl); 939 b43_lo_write(dev, &cal->ctl);
@@ -952,7 +961,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
952void b43_lo_g_maintanance_work(struct b43_wldev *dev) 961void b43_lo_g_maintanance_work(struct b43_wldev *dev)
953{ 962{
954 struct b43_phy *phy = &dev->phy; 963 struct b43_phy *phy = &dev->phy;
955 struct b43_txpower_lo_control *lo = phy->lo_control; 964 struct b43_phy_g *gphy = phy->g;
965 struct b43_txpower_lo_control *lo = gphy->lo_control;
956 unsigned long now; 966 unsigned long now;
957 unsigned long expire; 967 unsigned long expire;
958 struct b43_lo_calib *cal, *tmp; 968 struct b43_lo_calib *cal, *tmp;
@@ -962,7 +972,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
962 if (!lo) 972 if (!lo)
963 return; 973 return;
964 now = jiffies; 974 now = jiffies;
965 hwpctl = b43_has_hardware_pctl(phy); 975 hwpctl = b43_has_hardware_pctl(dev);
966 976
967 if (hwpctl) { 977 if (hwpctl) {
968 /* Read the power vector and update it, if needed. */ 978 /* Read the power vector and update it, if needed. */
@@ -983,8 +993,8 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
983 if (!time_before(cal->calib_time, expire)) 993 if (!time_before(cal->calib_time, expire))
984 continue; 994 continue;
985 /* This item expired. */ 995 /* This item expired. */
986 if (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) && 996 if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
987 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)) { 997 b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
988 B43_WARN_ON(current_item_expired); 998 B43_WARN_ON(current_item_expired);
989 current_item_expired = 1; 999 current_item_expired = 1;
990 } 1000 }
@@ -1002,7 +1012,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1002 /* Recalibrate currently used LO setting. */ 1012 /* Recalibrate currently used LO setting. */
1003 if (b43_debug(dev, B43_DBG_LO)) 1013 if (b43_debug(dev, B43_DBG_LO))
1004 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n"); 1014 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n");
1005 cal = b43_calibrate_lo_setting(dev, &phy->bbatt, &phy->rfatt); 1015 cal = b43_calibrate_lo_setting(dev, &gphy->bbatt, &gphy->rfatt);
1006 if (cal) { 1016 if (cal) {
1007 list_add(&cal->list, &lo->calib_list); 1017 list_add(&cal->list, &lo->calib_list);
1008 b43_lo_write(dev, &cal->ctl); 1018 b43_lo_write(dev, &cal->ctl);
@@ -1013,7 +1023,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1013 1023
1014void b43_lo_g_cleanup(struct b43_wldev *dev) 1024void b43_lo_g_cleanup(struct b43_wldev *dev)
1015{ 1025{
1016 struct b43_txpower_lo_control *lo = dev->phy.lo_control; 1026 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
1017 struct b43_lo_calib *cal, *tmp; 1027 struct b43_lo_calib *cal, *tmp;
1018 1028
1019 if (!lo) 1029 if (!lo)
@@ -1027,9 +1037,7 @@ void b43_lo_g_cleanup(struct b43_wldev *dev)
1027/* LO Initialization */ 1037/* LO Initialization */
1028void b43_lo_g_init(struct b43_wldev *dev) 1038void b43_lo_g_init(struct b43_wldev *dev)
1029{ 1039{
1030 struct b43_phy *phy = &dev->phy; 1040 if (b43_has_hardware_pctl(dev)) {
1031
1032 if (b43_has_hardware_pctl(phy)) {
1033 lo_read_power_vector(dev); 1041 lo_read_power_vector(dev);
1034 b43_gphy_dc_lt_init(dev, 1); 1042 b43_gphy_dc_lt_init(dev, 1);
1035 } 1043 }
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 1da321cabc12..3b27e20eff80 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -1,7 +1,9 @@
1#ifndef B43_LO_H_ 1#ifndef B43_LO_H_
2#define B43_LO_H_ 2#define B43_LO_H_
3 3
4#include "phy.h" 4/* G-PHY Local Oscillator */
5
6#include "phy_g.h"
5 7
6struct b43_wldev; 8struct b43_wldev;
7 9
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7205a936ec74..0f628a29d833 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -44,8 +44,9 @@
44#include "b43.h" 44#include "b43.h"
45#include "main.h" 45#include "main.h"
46#include "debugfs.h" 46#include "debugfs.h"
47#include "phy.h" 47#include "phy_common.h"
48#include "nphy.h" 48#include "phy_g.h"
49#include "phy_n.h"
49#include "dma.h" 50#include "dma.h"
50#include "pio.h" 51#include "pio.h"
51#include "sysfs.h" 52#include "sysfs.h"
@@ -1051,23 +1052,6 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
1051 } 1052 }
1052} 1053}
1053 1054
1054/* Turn the Analog ON/OFF */
1055static void b43_switch_analog(struct b43_wldev *dev, int on)
1056{
1057 switch (dev->phy.type) {
1058 case B43_PHYTYPE_A:
1059 case B43_PHYTYPE_G:
1060 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
1061 break;
1062 case B43_PHYTYPE_N:
1063 b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
1064 on ? 0 : 0x7FFF);
1065 break;
1066 default:
1067 B43_WARN_ON(1);
1068 }
1069}
1070
1071void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) 1055void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1072{ 1056{
1073 u32 tmslow; 1057 u32 tmslow;
@@ -1090,8 +1074,12 @@ void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1090 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ 1074 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */
1091 msleep(1); 1075 msleep(1);
1092 1076
1093 /* Turn Analog ON */ 1077 /* Turn Analog ON, but only if we already know the PHY-type.
1094 b43_switch_analog(dev, 1); 1078 * This protects against very early setup where we don't know the
1079 * PHY-type, yet. wireless_core_reset will be called once again later,
1080 * when we know the PHY-type. */
1081 if (dev->phy.ops)
1082 dev->phy.ops->switch_analog(dev, 1);
1095 1083
1096 macctl = b43_read32(dev, B43_MMIO_MACCTL); 1084 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1097 macctl &= ~B43_MACCTL_GMODE; 1085 macctl &= ~B43_MACCTL_GMODE;
@@ -1174,6 +1162,8 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
1174{ 1162{
1175 /* Top half of Link Quality calculation. */ 1163 /* Top half of Link Quality calculation. */
1176 1164
1165 if (dev->phy.type != B43_PHYTYPE_G)
1166 return;
1177 if (dev->noisecalc.calculation_running) 1167 if (dev->noisecalc.calculation_running)
1178 return; 1168 return;
1179 dev->noisecalc.calculation_running = 1; 1169 dev->noisecalc.calculation_running = 1;
@@ -1184,7 +1174,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
1184 1174
1185static void handle_irq_noise(struct b43_wldev *dev) 1175static void handle_irq_noise(struct b43_wldev *dev)
1186{ 1176{
1187 struct b43_phy *phy = &dev->phy; 1177 struct b43_phy_g *phy = dev->phy.g;
1188 u16 tmp; 1178 u16 tmp;
1189 u8 noise[4]; 1179 u8 noise[4];
1190 u8 i, j; 1180 u8 i, j;
@@ -1192,6 +1182,9 @@ static void handle_irq_noise(struct b43_wldev *dev)
1192 1182
1193 /* Bottom half of Link Quality calculation. */ 1183 /* Bottom half of Link Quality calculation. */
1194 1184
1185 if (dev->phy.type != B43_PHYTYPE_G)
1186 return;
1187
1195 /* Possible race condition: It might be possible that the user 1188 /* Possible race condition: It might be possible that the user
1196 * changed to a different channel in the meantime since we 1189 * changed to a different channel in the meantime since we
1197 * started the calculation. We ignore that fact, since it's 1190 * started the calculation. We ignore that fact, since it's
@@ -1251,13 +1244,13 @@ generate_new:
1251 1244
1252static void handle_irq_tbtt_indication(struct b43_wldev *dev) 1245static void handle_irq_tbtt_indication(struct b43_wldev *dev)
1253{ 1246{
1254 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { 1247 if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
1255 ///TODO: PS TBTT 1248 ///TODO: PS TBTT
1256 } else { 1249 } else {
1257 if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) 1250 if (1 /*FIXME: the last PSpoll frame was sent successfully */ )
1258 b43_power_saving_ctl_bits(dev, 0); 1251 b43_power_saving_ctl_bits(dev, 0);
1259 } 1252 }
1260 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 1253 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
1261 dev->dfq_valid = 1; 1254 dev->dfq_valid = 1;
1262} 1255}
1263 1256
@@ -1606,8 +1599,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1606 struct b43_wl *wl = dev->wl; 1599 struct b43_wl *wl = dev->wl;
1607 u32 cmd, beacon0_valid, beacon1_valid; 1600 u32 cmd, beacon0_valid, beacon1_valid;
1608 1601
1609 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP) && 1602 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
1610 !b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 1603 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
1611 return; 1604 return;
1612 1605
1613 /* This is the bottom half of the asynchronous beacon update. */ 1606 /* This is the bottom half of the asynchronous beacon update. */
@@ -2575,10 +2568,10 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2575 ctl &= ~B43_MACCTL_BEACPROMISC; 2568 ctl &= ~B43_MACCTL_BEACPROMISC;
2576 ctl |= B43_MACCTL_INFRA; 2569 ctl |= B43_MACCTL_INFRA;
2577 2570
2578 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 2571 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
2579 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 2572 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
2580 ctl |= B43_MACCTL_AP; 2573 ctl |= B43_MACCTL_AP;
2581 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 2574 else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
2582 ctl &= ~B43_MACCTL_INFRA; 2575 ctl &= ~B43_MACCTL_INFRA;
2583 2576
2584 if (wl->filter_flags & FIF_CONTROL) 2577 if (wl->filter_flags & FIF_CONTROL)
@@ -2688,9 +2681,8 @@ static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna)
2688/* This is the opposite of b43_chip_init() */ 2681/* This is the opposite of b43_chip_init() */
2689static void b43_chip_exit(struct b43_wldev *dev) 2682static void b43_chip_exit(struct b43_wldev *dev)
2690{ 2683{
2691 b43_radio_turn_off(dev, 1); 2684 b43_phy_exit(dev);
2692 b43_gpio_cleanup(dev); 2685 b43_gpio_cleanup(dev);
2693 b43_lo_g_cleanup(dev);
2694 /* firmware is released later */ 2686 /* firmware is released later */
2695} 2687}
2696 2688
@@ -2700,7 +2692,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
2700static int b43_chip_init(struct b43_wldev *dev) 2692static int b43_chip_init(struct b43_wldev *dev)
2701{ 2693{
2702 struct b43_phy *phy = &dev->phy; 2694 struct b43_phy *phy = &dev->phy;
2703 int err, tmp; 2695 int err;
2704 u32 value32, macctl; 2696 u32 value32, macctl;
2705 u16 value16; 2697 u16 value16;
2706 2698
@@ -2725,19 +2717,20 @@ static int b43_chip_init(struct b43_wldev *dev)
2725 err = b43_upload_initvals(dev); 2717 err = b43_upload_initvals(dev);
2726 if (err) 2718 if (err)
2727 goto err_gpio_clean; 2719 goto err_gpio_clean;
2728 b43_radio_turn_on(dev);
2729 2720
2730 b43_write16(dev, 0x03E6, 0x0000); 2721 /* Turn the Analog on and initialize the PHY. */
2722 phy->ops->switch_analog(dev, 1);
2731 err = b43_phy_init(dev); 2723 err = b43_phy_init(dev);
2732 if (err) 2724 if (err)
2733 goto err_radio_off; 2725 goto err_gpio_clean;
2734 2726
2735 /* Select initial Interference Mitigation. */ 2727 /* Disable Interference Mitigation. */
2736 tmp = phy->interfmode; 2728 if (phy->ops->interf_mitigation)
2737 phy->interfmode = B43_INTERFMODE_NONE; 2729 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
2738 b43_radio_set_interference_mitigation(dev, tmp);
2739 2730
2740 b43_set_rx_antenna(dev, B43_ANTENNA_DEFAULT); 2731 /* Select the antennae */
2732 if (phy->ops->set_rx_antenna)
2733 phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT);
2741 b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); 2734 b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT);
2742 2735
2743 if (phy->type == B43_PHYTYPE_B) { 2736 if (phy->type == B43_PHYTYPE_B) {
@@ -2790,8 +2783,6 @@ static int b43_chip_init(struct b43_wldev *dev)
2790out: 2783out:
2791 return err; 2784 return err;
2792 2785
2793err_radio_off:
2794 b43_radio_turn_off(dev, 1);
2795err_gpio_clean: 2786err_gpio_clean:
2796 b43_gpio_cleanup(dev); 2787 b43_gpio_cleanup(dev);
2797 return err; 2788 return err;
@@ -2799,25 +2790,13 @@ err_gpio_clean:
2799 2790
2800static void b43_periodic_every60sec(struct b43_wldev *dev) 2791static void b43_periodic_every60sec(struct b43_wldev *dev)
2801{ 2792{
2802 struct b43_phy *phy = &dev->phy; 2793 const struct b43_phy_operations *ops = dev->phy.ops;
2803 2794
2804 if (phy->type != B43_PHYTYPE_G) 2795 if (ops->pwork_60sec)
2805 return; 2796 ops->pwork_60sec(dev);
2806 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) { 2797
2807 b43_mac_suspend(dev); 2798 /* Force check the TX power emission now. */
2808 b43_calc_nrssi_slope(dev); 2799 b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME);
2809 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
2810 u8 old_chan = phy->channel;
2811
2812 /* VCO Calibration */
2813 if (old_chan >= 8)
2814 b43_radio_selectchannel(dev, 1, 0);
2815 else
2816 b43_radio_selectchannel(dev, 13, 0);
2817 b43_radio_selectchannel(dev, old_chan, 0);
2818 }
2819 b43_mac_enable(dev);
2820 }
2821} 2800}
2822 2801
2823static void b43_periodic_every30sec(struct b43_wldev *dev) 2802static void b43_periodic_every30sec(struct b43_wldev *dev)
@@ -2845,32 +2824,8 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
2845 } 2824 }
2846 } 2825 }
2847 2826
2848 if (phy->type == B43_PHYTYPE_G) { 2827 if (phy->ops->pwork_15sec)
2849 //TODO: update_aci_moving_average 2828 phy->ops->pwork_15sec(dev);
2850 if (phy->aci_enable && phy->aci_wlan_automatic) {
2851 b43_mac_suspend(dev);
2852 if (!phy->aci_enable && 1 /*TODO: not scanning? */ ) {
2853 if (0 /*TODO: bunch of conditions */ ) {
2854 b43_radio_set_interference_mitigation
2855 (dev, B43_INTERFMODE_MANUALWLAN);
2856 }
2857 } else if (1 /*TODO*/) {
2858 /*
2859 if ((aci_average > 1000) && !(b43_radio_aci_scan(dev))) {
2860 b43_radio_set_interference_mitigation(dev,
2861 B43_INTERFMODE_NONE);
2862 }
2863 */
2864 }
2865 b43_mac_enable(dev);
2866 } else if (phy->interfmode == B43_INTERFMODE_NONWLAN &&
2867 phy->rev == 1) {
2868 //TODO: implement rev1 workaround
2869 }
2870 }
2871 b43_phy_xmitpower(dev); //FIXME: unless scanning?
2872 b43_lo_g_maintanance_work(dev);
2873 //TODO for APHY (temperature?)
2874 2829
2875 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 2830 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
2876 wmb(); 2831 wmb();
@@ -3104,36 +3059,31 @@ static void b43_qos_params_upload(struct b43_wldev *dev,
3104 } 3059 }
3105} 3060}
3106 3061
3107/* Update the QOS parameters in hardware. */ 3062/* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */
3108static void b43_qos_update(struct b43_wldev *dev) 3063static const u16 b43_qos_shm_offsets[] = {
3064 /* [mac80211-queue-nr] = SHM_OFFSET, */
3065 [0] = B43_QOS_VOICE,
3066 [1] = B43_QOS_VIDEO,
3067 [2] = B43_QOS_BESTEFFORT,
3068 [3] = B43_QOS_BACKGROUND,
3069};
3070
3071/* Update all QOS parameters in hardware. */
3072static void b43_qos_upload_all(struct b43_wldev *dev)
3109{ 3073{
3110 struct b43_wl *wl = dev->wl; 3074 struct b43_wl *wl = dev->wl;
3111 struct b43_qos_params *params; 3075 struct b43_qos_params *params;
3112 unsigned long flags;
3113 unsigned int i; 3076 unsigned int i;
3114 3077
3115 /* Mapping of mac80211 queues to b43 SHM offsets. */ 3078 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3116 static const u16 qos_shm_offsets[] = { 3079 ARRAY_SIZE(wl->qos_params));
3117 [0] = B43_QOS_VOICE,
3118 [1] = B43_QOS_VIDEO,
3119 [2] = B43_QOS_BESTEFFORT,
3120 [3] = B43_QOS_BACKGROUND,
3121 };
3122 BUILD_BUG_ON(ARRAY_SIZE(qos_shm_offsets) != ARRAY_SIZE(wl->qos_params));
3123 3080
3124 b43_mac_suspend(dev); 3081 b43_mac_suspend(dev);
3125 spin_lock_irqsave(&wl->irq_lock, flags);
3126
3127 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { 3082 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
3128 params = &(wl->qos_params[i]); 3083 params = &(wl->qos_params[i]);
3129 if (params->need_hw_update) { 3084 b43_qos_params_upload(dev, &(params->p),
3130 b43_qos_params_upload(dev, &(params->p), 3085 b43_qos_shm_offsets[i]);
3131 qos_shm_offsets[i]);
3132 params->need_hw_update = 0;
3133 }
3134 } 3086 }
3135
3136 spin_unlock_irqrestore(&wl->irq_lock, flags);
3137 b43_mac_enable(dev); 3087 b43_mac_enable(dev);
3138} 3088}
3139 3089
@@ -3142,25 +3092,50 @@ static void b43_qos_clear(struct b43_wl *wl)
3142 struct b43_qos_params *params; 3092 struct b43_qos_params *params;
3143 unsigned int i; 3093 unsigned int i;
3144 3094
3095 /* Initialize QoS parameters to sane defaults. */
3096
3097 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3098 ARRAY_SIZE(wl->qos_params));
3099
3145 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { 3100 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
3146 params = &(wl->qos_params[i]); 3101 params = &(wl->qos_params[i]);
3147 3102
3148 memset(&(params->p), 0, sizeof(params->p)); 3103 switch (b43_qos_shm_offsets[i]) {
3149 params->p.aifs = -1; 3104 case B43_QOS_VOICE:
3150 params->need_hw_update = 1; 3105 params->p.txop = 0;
3106 params->p.aifs = 2;
3107 params->p.cw_min = 0x0001;
3108 params->p.cw_max = 0x0001;
3109 break;
3110 case B43_QOS_VIDEO:
3111 params->p.txop = 0;
3112 params->p.aifs = 2;
3113 params->p.cw_min = 0x0001;
3114 params->p.cw_max = 0x0001;
3115 break;
3116 case B43_QOS_BESTEFFORT:
3117 params->p.txop = 0;
3118 params->p.aifs = 3;
3119 params->p.cw_min = 0x0001;
3120 params->p.cw_max = 0x03FF;
3121 break;
3122 case B43_QOS_BACKGROUND:
3123 params->p.txop = 0;
3124 params->p.aifs = 7;
3125 params->p.cw_min = 0x0001;
3126 params->p.cw_max = 0x03FF;
3127 break;
3128 default:
3129 B43_WARN_ON(1);
3130 }
3151 } 3131 }
3152} 3132}
3153 3133
3154/* Initialize the core's QOS capabilities */ 3134/* Initialize the core's QOS capabilities */
3155static void b43_qos_init(struct b43_wldev *dev) 3135static void b43_qos_init(struct b43_wldev *dev)
3156{ 3136{
3157 struct b43_wl *wl = dev->wl;
3158 unsigned int i;
3159
3160 /* Upload the current QOS parameters. */ 3137 /* Upload the current QOS parameters. */
3161 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) 3138 b43_qos_upload_all(dev);
3162 wl->qos_params[i].need_hw_update = 1;
3163 b43_qos_update(dev);
3164 3139
3165 /* Enable QOS support. */ 3140 /* Enable QOS support. */
3166 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); 3141 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF);
@@ -3169,25 +3144,13 @@ static void b43_qos_init(struct b43_wldev *dev)
3169 | B43_MMIO_IFSCTL_USE_EDCF); 3144 | B43_MMIO_IFSCTL_USE_EDCF);
3170} 3145}
3171 3146
3172static void b43_qos_update_work(struct work_struct *work)
3173{
3174 struct b43_wl *wl = container_of(work, struct b43_wl, qos_update_work);
3175 struct b43_wldev *dev;
3176
3177 mutex_lock(&wl->mutex);
3178 dev = wl->current_dev;
3179 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED)))
3180 b43_qos_update(dev);
3181 mutex_unlock(&wl->mutex);
3182}
3183
3184static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue, 3147static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3185 const struct ieee80211_tx_queue_params *params) 3148 const struct ieee80211_tx_queue_params *params)
3186{ 3149{
3187 struct b43_wl *wl = hw_to_b43_wl(hw); 3150 struct b43_wl *wl = hw_to_b43_wl(hw);
3188 unsigned long flags; 3151 struct b43_wldev *dev;
3189 unsigned int queue = (unsigned int)_queue; 3152 unsigned int queue = (unsigned int)_queue;
3190 struct b43_qos_params *p; 3153 int err = -ENODEV;
3191 3154
3192 if (queue >= ARRAY_SIZE(wl->qos_params)) { 3155 if (queue >= ARRAY_SIZE(wl->qos_params)) {
3193 /* Queue not available or don't support setting 3156 /* Queue not available or don't support setting
@@ -3195,16 +3158,25 @@ static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3195 * confuse mac80211. */ 3158 * confuse mac80211. */
3196 return 0; 3159 return 0;
3197 } 3160 }
3161 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3162 ARRAY_SIZE(wl->qos_params));
3198 3163
3199 spin_lock_irqsave(&wl->irq_lock, flags); 3164 mutex_lock(&wl->mutex);
3200 p = &(wl->qos_params[queue]); 3165 dev = wl->current_dev;
3201 memcpy(&(p->p), params, sizeof(p->p)); 3166 if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED)))
3202 p->need_hw_update = 1; 3167 goto out_unlock;
3203 spin_unlock_irqrestore(&wl->irq_lock, flags);
3204 3168
3205 queue_work(hw->workqueue, &wl->qos_update_work); 3169 memcpy(&(wl->qos_params[queue].p), params, sizeof(*params));
3170 b43_mac_suspend(dev);
3171 b43_qos_params_upload(dev, &(wl->qos_params[queue].p),
3172 b43_qos_shm_offsets[queue]);
3173 b43_mac_enable(dev);
3174 err = 0;
3206 3175
3207 return 0; 3176out_unlock:
3177 mutex_unlock(&wl->mutex);
3178
3179 return err;
3208} 3180}
3209 3181
3210static int b43_op_get_tx_stats(struct ieee80211_hw *hw, 3182static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
@@ -3401,7 +3373,7 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3401 /* Switch to the requested channel. 3373 /* Switch to the requested channel.
3402 * The firmware takes care of races with the TX handler. */ 3374 * The firmware takes care of races with the TX handler. */
3403 if (conf->channel->hw_value != phy->channel) 3375 if (conf->channel->hw_value != phy->channel)
3404 b43_radio_selectchannel(dev, conf->channel->hw_value, 0); 3376 b43_switch_channel(dev, conf->channel->hw_value);
3405 3377
3406 /* Enable/Disable ShortSlot timing. */ 3378 /* Enable/Disable ShortSlot timing. */
3407 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) != 3379 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) !=
@@ -3417,26 +3389,30 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3417 3389
3418 /* Adjust the desired TX power level. */ 3390 /* Adjust the desired TX power level. */
3419 if (conf->power_level != 0) { 3391 if (conf->power_level != 0) {
3420 if (conf->power_level != phy->power_level) { 3392 spin_lock_irqsave(&wl->irq_lock, flags);
3421 phy->power_level = conf->power_level; 3393 if (conf->power_level != phy->desired_txpower) {
3422 b43_phy_xmitpower(dev); 3394 phy->desired_txpower = conf->power_level;
3395 b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME |
3396 B43_TXPWR_IGNORE_TSSI);
3423 } 3397 }
3398 spin_unlock_irqrestore(&wl->irq_lock, flags);
3424 } 3399 }
3425 3400
3426 /* Antennas for RX and management frame TX. */ 3401 /* Antennas for RX and management frame TX. */
3427 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx); 3402 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx);
3428 b43_mgmtframe_txantenna(dev, antenna); 3403 b43_mgmtframe_txantenna(dev, antenna);
3429 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx); 3404 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx);
3430 b43_set_rx_antenna(dev, antenna); 3405 if (phy->ops->set_rx_antenna)
3406 phy->ops->set_rx_antenna(dev, antenna);
3431 3407
3432 /* Update templates for AP/mesh mode. */ 3408 /* Update templates for AP/mesh mode. */
3433 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 3409 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
3434 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 3410 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
3435 b43_set_beacon_int(dev, conf->beacon_int); 3411 b43_set_beacon_int(dev, conf->beacon_int);
3436 3412
3437 if (!!conf->radio_enabled != phy->radio_on) { 3413 if (!!conf->radio_enabled != phy->radio_on) {
3438 if (conf->radio_enabled) { 3414 if (conf->radio_enabled) {
3439 b43_radio_turn_on(dev); 3415 b43_software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
3440 b43info(dev->wl, "Radio turned on by software\n"); 3416 b43info(dev->wl, "Radio turned on by software\n");
3441 if (!dev->radio_hw_enable) { 3417 if (!dev->radio_hw_enable) {
3442 b43info(dev->wl, "The hardware RF-kill button " 3418 b43info(dev->wl, "The hardware RF-kill button "
@@ -3444,7 +3420,7 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3444 "Press the button to turn it on.\n"); 3420 "Press the button to turn it on.\n");
3445 } 3421 }
3446 } else { 3422 } else {
3447 b43_radio_turn_off(dev, 0); 3423 b43_software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
3448 b43info(dev->wl, "Radio turned off by software\n"); 3424 b43info(dev->wl, "Radio turned off by software\n");
3449 } 3425 }
3450 } 3426 }
@@ -3619,14 +3595,14 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3619 else 3595 else
3620 memset(wl->bssid, 0, ETH_ALEN); 3596 memset(wl->bssid, 0, ETH_ALEN);
3621 if (b43_status(dev) >= B43_STAT_INITIALIZED) { 3597 if (b43_status(dev) >= B43_STAT_INITIALIZED) {
3622 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 3598 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
3623 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) { 3599 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) {
3624 B43_WARN_ON(vif->type != wl->if_type); 3600 B43_WARN_ON(vif->type != wl->if_type);
3625 if (conf->changed & IEEE80211_IFCC_SSID) 3601 if (conf->changed & IEEE80211_IFCC_SSID)
3626 b43_set_ssid(dev, conf->ssid, conf->ssid_len); 3602 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3627 if (conf->changed & IEEE80211_IFCC_BEACON) 3603 if (conf->changed & IEEE80211_IFCC_BEACON)
3628 b43_update_templates(wl); 3604 b43_update_templates(wl);
3629 } else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) { 3605 } else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
3630 if (conf->changed & IEEE80211_IFCC_BEACON) 3606 if (conf->changed & IEEE80211_IFCC_BEACON)
3631 b43_update_templates(wl); 3607 b43_update_templates(wl);
3632 } 3608 }
@@ -3818,48 +3794,10 @@ static int b43_phy_versioning(struct b43_wldev *dev)
3818static void setup_struct_phy_for_init(struct b43_wldev *dev, 3794static void setup_struct_phy_for_init(struct b43_wldev *dev,
3819 struct b43_phy *phy) 3795 struct b43_phy *phy)
3820{ 3796{
3821 struct b43_txpower_lo_control *lo;
3822 int i;
3823
3824 memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
3825 memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
3826
3827 phy->aci_enable = 0;
3828 phy->aci_wlan_automatic = 0;
3829 phy->aci_hw_rssi = 0;
3830
3831 phy->radio_off_context.valid = 0;
3832
3833 lo = phy->lo_control;
3834 if (lo) {
3835 memset(lo, 0, sizeof(*(phy->lo_control)));
3836 lo->tx_bias = 0xFF;
3837 INIT_LIST_HEAD(&lo->calib_list);
3838 }
3839 phy->max_lb_gain = 0;
3840 phy->trsw_rx_gain = 0;
3841 phy->txpwr_offset = 0;
3842
3843 /* NRSSI */
3844 phy->nrssislope = 0;
3845 for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++)
3846 phy->nrssi[i] = -1000;
3847 for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++)
3848 phy->nrssi_lt[i] = i;
3849
3850 phy->lofcal = 0xFFFF;
3851 phy->initval = 0xFFFF;
3852
3853 phy->interfmode = B43_INTERFMODE_NONE;
3854 phy->channel = 0xFF;
3855
3856 phy->hardware_power_control = !!modparam_hwpctl; 3797 phy->hardware_power_control = !!modparam_hwpctl;
3857 3798 phy->next_txpwr_check_time = jiffies;
3858 /* PHY TX errors counter. */ 3799 /* PHY TX errors counter. */
3859 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 3800 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
3860
3861 /* OFDM-table address caching. */
3862 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
3863} 3801}
3864 3802
3865static void setup_struct_wldev_for_init(struct b43_wldev *dev) 3803static void setup_struct_wldev_for_init(struct b43_wldev *dev)
@@ -3965,7 +3903,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
3965 pu_delay = 3700; 3903 pu_delay = 3700;
3966 else 3904 else
3967 pu_delay = 1050; 3905 pu_delay = 1050;
3968 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle) 3906 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
3969 pu_delay = 500; 3907 pu_delay = 500;
3970 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) 3908 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3971 pu_delay = max(pu_delay, (u16)2400); 3909 pu_delay = max(pu_delay, (u16)2400);
@@ -3979,7 +3917,7 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
3979 u16 pretbtt; 3917 u16 pretbtt;
3980 3918
3981 /* The time value is in microseconds. */ 3919 /* The time value is in microseconds. */
3982 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) { 3920 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) {
3983 pretbtt = 2; 3921 pretbtt = 2;
3984 } else { 3922 } else {
3985 if (dev->phy.type == B43_PHYTYPE_A) 3923 if (dev->phy.type == B43_PHYTYPE_A)
@@ -3995,7 +3933,6 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
3995/* Locking: wl->mutex */ 3933/* Locking: wl->mutex */
3996static void b43_wireless_core_exit(struct b43_wldev *dev) 3934static void b43_wireless_core_exit(struct b43_wldev *dev)
3997{ 3935{
3998 struct b43_phy *phy = &dev->phy;
3999 u32 macctl; 3936 u32 macctl;
4000 3937
4001 B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED); 3938 B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED);
@@ -4016,12 +3953,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4016 b43_dma_free(dev); 3953 b43_dma_free(dev);
4017 b43_pio_free(dev); 3954 b43_pio_free(dev);
4018 b43_chip_exit(dev); 3955 b43_chip_exit(dev);
4019 b43_radio_turn_off(dev, 1); 3956 dev->phy.ops->switch_analog(dev, 0);
4020 b43_switch_analog(dev, 0);
4021 if (phy->dyn_tssi_tbl)
4022 kfree(phy->tssi2dbm);
4023 kfree(phy->lo_control);
4024 phy->lo_control = NULL;
4025 if (dev->wl->current_beacon) { 3957 if (dev->wl->current_beacon) {
4026 dev_kfree_skb_any(dev->wl->current_beacon); 3958 dev_kfree_skb_any(dev->wl->current_beacon);
4027 dev->wl->current_beacon = NULL; 3959 dev->wl->current_beacon = NULL;
@@ -4052,29 +3984,23 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4052 b43_wireless_core_reset(dev, tmp); 3984 b43_wireless_core_reset(dev, tmp);
4053 } 3985 }
4054 3986
4055 if ((phy->type == B43_PHYTYPE_B) || (phy->type == B43_PHYTYPE_G)) { 3987 /* Reset all data structures. */
4056 phy->lo_control =
4057 kzalloc(sizeof(*(phy->lo_control)), GFP_KERNEL);
4058 if (!phy->lo_control) {
4059 err = -ENOMEM;
4060 goto err_busdown;
4061 }
4062 }
4063 setup_struct_wldev_for_init(dev); 3988 setup_struct_wldev_for_init(dev);
4064 3989 phy->ops->prepare_structs(dev);
4065 err = b43_phy_init_tssi2dbm_table(dev);
4066 if (err)
4067 goto err_kfree_lo_control;
4068 3990
4069 /* Enable IRQ routing to this device. */ 3991 /* Enable IRQ routing to this device. */
4070 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); 3992 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev);
4071 3993
4072 b43_imcfglo_timeouts_workaround(dev); 3994 b43_imcfglo_timeouts_workaround(dev);
4073 b43_bluetooth_coext_disable(dev); 3995 b43_bluetooth_coext_disable(dev);
4074 b43_phy_early_init(dev); 3996 if (phy->ops->prepare_hardware) {
3997 err = phy->ops->prepare_hardware(dev);
3998 if (err)
3999 goto err_busdown;
4000 }
4075 err = b43_chip_init(dev); 4001 err = b43_chip_init(dev);
4076 if (err) 4002 if (err)
4077 goto err_kfree_tssitbl; 4003 goto err_busdown;
4078 b43_shm_write16(dev, B43_SHM_SHARED, 4004 b43_shm_write16(dev, B43_SHM_SHARED,
4079 B43_SHM_SH_WLCOREREV, dev->dev->id.revision); 4005 B43_SHM_SH_WLCOREREV, dev->dev->id.revision);
4080 hf = b43_hf_read(dev); 4006 hf = b43_hf_read(dev);
@@ -4140,15 +4066,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4140out: 4066out:
4141 return err; 4067 return err;
4142 4068
4143 err_chip_exit: 4069err_chip_exit:
4144 b43_chip_exit(dev); 4070 b43_chip_exit(dev);
4145 err_kfree_tssitbl: 4071err_busdown:
4146 if (phy->dyn_tssi_tbl)
4147 kfree(phy->tssi2dbm);
4148 err_kfree_lo_control:
4149 kfree(phy->lo_control);
4150 phy->lo_control = NULL;
4151 err_busdown:
4152 ssb_bus_may_powerdown(bus); 4072 ssb_bus_may_powerdown(bus);
4153 B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); 4073 B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
4154 return err; 4074 return err;
@@ -4164,11 +4084,11 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4164 4084
4165 /* TODO: allow WDS/AP devices to coexist */ 4085 /* TODO: allow WDS/AP devices to coexist */
4166 4086
4167 if (conf->type != IEEE80211_IF_TYPE_AP && 4087 if (conf->type != NL80211_IFTYPE_AP &&
4168 conf->type != IEEE80211_IF_TYPE_MESH_POINT && 4088 conf->type != NL80211_IFTYPE_MESH_POINT &&
4169 conf->type != IEEE80211_IF_TYPE_STA && 4089 conf->type != NL80211_IFTYPE_STATION &&
4170 conf->type != IEEE80211_IF_TYPE_WDS && 4090 conf->type != NL80211_IFTYPE_WDS &&
4171 conf->type != IEEE80211_IF_TYPE_IBSS) 4091 conf->type != NL80211_IFTYPE_ADHOC)
4172 return -EOPNOTSUPP; 4092 return -EOPNOTSUPP;
4173 4093
4174 mutex_lock(&wl->mutex); 4094 mutex_lock(&wl->mutex);
@@ -4283,7 +4203,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4283 struct b43_wldev *dev = wl->current_dev; 4203 struct b43_wldev *dev = wl->current_dev;
4284 4204
4285 b43_rfkill_exit(dev); 4205 b43_rfkill_exit(dev);
4286 cancel_work_sync(&(wl->qos_update_work));
4287 cancel_work_sync(&(wl->beacon_update_trigger)); 4206 cancel_work_sync(&(wl->beacon_update_trigger));
4288 4207
4289 mutex_lock(&wl->mutex); 4208 mutex_lock(&wl->mutex);
@@ -4291,6 +4210,8 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4291 b43_wireless_core_stop(dev); 4210 b43_wireless_core_stop(dev);
4292 b43_wireless_core_exit(dev); 4211 b43_wireless_core_exit(dev);
4293 mutex_unlock(&wl->mutex); 4212 mutex_unlock(&wl->mutex);
4213
4214 cancel_work_sync(&(wl->txpower_adjust_work));
4294} 4215}
4295 4216
4296static int b43_op_set_retry_limit(struct ieee80211_hw *hw, 4217static int b43_op_set_retry_limit(struct ieee80211_hw *hw,
@@ -4313,7 +4234,8 @@ out_unlock:
4313 return err; 4234 return err;
4314} 4235}
4315 4236
4316static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set) 4237static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
4238 struct ieee80211_sta *sta, bool set)
4317{ 4239{
4318 struct b43_wl *wl = hw_to_b43_wl(hw); 4240 struct b43_wl *wl = hw_to_b43_wl(hw);
4319 unsigned long flags; 4241 unsigned long flags;
@@ -4328,7 +4250,7 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
4328static void b43_op_sta_notify(struct ieee80211_hw *hw, 4250static void b43_op_sta_notify(struct ieee80211_hw *hw,
4329 struct ieee80211_vif *vif, 4251 struct ieee80211_vif *vif,
4330 enum sta_notify_cmd notify_cmd, 4252 enum sta_notify_cmd notify_cmd,
4331 const u8 *addr) 4253 struct ieee80211_sta *sta)
4332{ 4254{
4333 struct b43_wl *wl = hw_to_b43_wl(hw); 4255 struct b43_wl *wl = hw_to_b43_wl(hw);
4334 4256
@@ -4422,6 +4344,7 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
4422 /* We release firmware that late to not be required to re-request 4344 /* We release firmware that late to not be required to re-request
4423 * is all the time when we reinit the core. */ 4345 * is all the time when we reinit the core. */
4424 b43_release_firmware(dev); 4346 b43_release_firmware(dev);
4347 b43_phy_free(dev);
4425} 4348}
4426 4349
4427static int b43_wireless_core_attach(struct b43_wldev *dev) 4350static int b43_wireless_core_attach(struct b43_wldev *dev)
@@ -4495,30 +4418,35 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4495 } 4418 }
4496 } 4419 }
4497 4420
4421 err = b43_phy_allocate(dev);
4422 if (err)
4423 goto err_powerdown;
4424
4498 dev->phy.gmode = have_2ghz_phy; 4425 dev->phy.gmode = have_2ghz_phy;
4499 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; 4426 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
4500 b43_wireless_core_reset(dev, tmp); 4427 b43_wireless_core_reset(dev, tmp);
4501 4428
4502 err = b43_validate_chipaccess(dev); 4429 err = b43_validate_chipaccess(dev);
4503 if (err) 4430 if (err)
4504 goto err_powerdown; 4431 goto err_phy_free;
4505 err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy); 4432 err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy);
4506 if (err) 4433 if (err)
4507 goto err_powerdown; 4434 goto err_phy_free;
4508 4435
4509 /* Now set some default "current_dev" */ 4436 /* Now set some default "current_dev" */
4510 if (!wl->current_dev) 4437 if (!wl->current_dev)
4511 wl->current_dev = dev; 4438 wl->current_dev = dev;
4512 INIT_WORK(&dev->restart_work, b43_chip_reset); 4439 INIT_WORK(&dev->restart_work, b43_chip_reset);
4513 4440
4514 b43_radio_turn_off(dev, 1); 4441 dev->phy.ops->switch_analog(dev, 0);
4515 b43_switch_analog(dev, 0);
4516 ssb_device_disable(dev->dev, 0); 4442 ssb_device_disable(dev->dev, 0);
4517 ssb_bus_may_powerdown(bus); 4443 ssb_bus_may_powerdown(bus);
4518 4444
4519out: 4445out:
4520 return err; 4446 return err;
4521 4447
4448err_phy_free:
4449 b43_phy_free(dev);
4522err_powerdown: 4450err_powerdown:
4523 ssb_bus_may_powerdown(bus); 4451 ssb_bus_may_powerdown(bus);
4524 return err; 4452 return err;
@@ -4650,6 +4578,13 @@ static int b43_wireless_init(struct ssb_device *dev)
4650 IEEE80211_HW_SIGNAL_DBM | 4578 IEEE80211_HW_SIGNAL_DBM |
4651 IEEE80211_HW_NOISE_DBM; 4579 IEEE80211_HW_NOISE_DBM;
4652 4580
4581 hw->wiphy->interface_modes =
4582 BIT(NL80211_IFTYPE_AP) |
4583 BIT(NL80211_IFTYPE_MESH_POINT) |
4584 BIT(NL80211_IFTYPE_STATION) |
4585 BIT(NL80211_IFTYPE_WDS) |
4586 BIT(NL80211_IFTYPE_ADHOC);
4587
4653 hw->queues = b43_modparam_qos ? 4 : 1; 4588 hw->queues = b43_modparam_qos ? 4 : 1;
4654 SET_IEEE80211_DEV(hw, dev->dev); 4589 SET_IEEE80211_DEV(hw, dev->dev);
4655 if (is_valid_ether_addr(sprom->et1mac)) 4590 if (is_valid_ether_addr(sprom->et1mac))
@@ -4667,8 +4602,8 @@ static int b43_wireless_init(struct ssb_device *dev)
4667 spin_lock_init(&wl->shm_lock); 4602 spin_lock_init(&wl->shm_lock);
4668 mutex_init(&wl->mutex); 4603 mutex_init(&wl->mutex);
4669 INIT_LIST_HEAD(&wl->devlist); 4604 INIT_LIST_HEAD(&wl->devlist);
4670 INIT_WORK(&wl->qos_update_work, b43_qos_update_work);
4671 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); 4605 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
4606 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
4672 4607
4673 ssb_set_devtypedata(dev, wl); 4608 ssb_set_devtypedata(dev, wl);
4674 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); 4609 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h
deleted file mode 100644
index 4aab10903529..000000000000
--- a/drivers/net/wireless/b43/phy.h
+++ /dev/null
@@ -1,340 +0,0 @@
1#ifndef B43_PHY_H_
2#define B43_PHY_H_
3
4#include <linux/types.h>
5
6struct b43_wldev;
7struct b43_phy;
8
9/*** PHY Registers ***/
10
11/* Routing */
12#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
13#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
14#define B43_PHYROUTE_OFDM_GPHY 0x0400 /* OFDM register routing for G-PHYs */
15#define B43_PHYROUTE_EXT_GPHY 0x0800 /* Extended G-PHY registers */
16#define B43_PHYROUTE_N_BMODE 0x0C00 /* N-PHY BMODE registers */
17
18/* CCK (B-PHY) registers. */
19#define B43_PHY_CCK(reg) ((reg) | B43_PHYROUTE_BASE)
20/* N-PHY registers. */
21#define B43_PHY_N(reg) ((reg) | B43_PHYROUTE_BASE)
22/* N-PHY BMODE registers. */
23#define B43_PHY_N_BMODE(reg) ((reg) | B43_PHYROUTE_N_BMODE)
24/* OFDM (A-PHY) registers. */
25#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY)
26/* Extended G-PHY registers. */
27#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY)
28
29/* OFDM (A) PHY Registers */
30#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */
31#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */
32#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */
33#define B43_PHY_BBANDCFG_RXANT_SHIFT 7
34#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */
35#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 (phy.rev 1 only) */
36#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */
37#define B43_PHY_LPFGAINCTL B43_PHY_OFDM(0x20) /* LPF Gain control */
38#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */
39#define B43_PHY_CRS0 B43_PHY_OFDM(0x29)
40#define B43_PHY_CRS0_EN 0x4000
41#define B43_PHY_PEAK_COUNT B43_PHY_OFDM(0x30)
42#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */
43#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */
44#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */
45#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */
46#define B43_PHY_LMS B43_PHY_OFDM(0x55)
47#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */
48#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */
49#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */
50#define B43_PHY_BBTXDC_BIAS B43_PHY_OFDM(0x6B) /* Baseband TX DC bias */
51#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */
52#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */
53#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */
54#define B43_PHY_OTABLENR_SHIFT 10
55#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */
56#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */
57#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */
58#define B43_PHY_ADCCTL B43_PHY_OFDM(0x7A) /* ADC control */
59#define B43_PHY_IDLE_TSSI B43_PHY_OFDM(0x7B)
60#define B43_PHY_A_TEMP_SENSE B43_PHY_OFDM(0x7C) /* A PHY temperature sense */
61#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */
62#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */
63#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */
64#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */
65#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */
66#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0)
67#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1)
68#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2)
69#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3)
70#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4)
71#define B43_PHY_CCKSHIFTBITS_WA B43_PHY_OFDM(0xA5) /* CCK shiftbits workaround, FIXME rename */
72#define B43_PHY_CCKSHIFTBITS B43_PHY_OFDM(0xA7) /* FIXME rename */
73#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */
74#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9)
75#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA)
76#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB)
77#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */
78#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */
79#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (phy.rev >= 2 only) */
80#define B43_PHY_CRSTHRES2 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (phy.rev >= 2 only) */
81#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */
82#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */
83#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */
84
85/* CCK (B) PHY Registers */
86#define B43_PHY_VERSION_CCK B43_PHY_CCK(0x00) /* Versioning register for B-PHY */
87#define B43_PHY_CCKBBANDCFG B43_PHY_CCK(0x01) /* Contains antenna 0/1 control bit */
88#define B43_PHY_PGACTL B43_PHY_CCK(0x15) /* PGA control */
89#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */
90#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */
91#define B43_PHY_PGACTL_UNKNOWN 0xEFA0
92#define B43_PHY_FBCTL1 B43_PHY_CCK(0x18) /* Frequency bandwidth control 1 */
93#define B43_PHY_ITSSI B43_PHY_CCK(0x29) /* Idle TSSI */
94#define B43_PHY_LO_LEAKAGE B43_PHY_CCK(0x2D) /* Measured LO leakage */
95#define B43_PHY_ENERGY B43_PHY_CCK(0x33) /* Energy */
96#define B43_PHY_SYNCCTL B43_PHY_CCK(0x35)
97#define B43_PHY_FBCTL2 B43_PHY_CCK(0x38) /* Frequency bandwidth control 2 */
98#define B43_PHY_DACCTL B43_PHY_CCK(0x60) /* DAC control */
99#define B43_PHY_RCCALOVER B43_PHY_CCK(0x78) /* RC calibration override */
100
101/* Extended G-PHY Registers */
102#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */
103#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */
104#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */
105#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */
106#define B43_PHY_GTABNR_SHIFT 10
107#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */
108#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */
109#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */
110#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */
111#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */
112#define B43_PHY_RFOVERVAL_EXTLNA 0x8000
113#define B43_PHY_RFOVERVAL_LNA 0x7000
114#define B43_PHY_RFOVERVAL_LNA_SHIFT 12
115#define B43_PHY_RFOVERVAL_PGA 0x0F00
116#define B43_PHY_RFOVERVAL_PGA_SHIFT 8
117#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */
118#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0
119#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */
120#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */
121#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */
122#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */
123#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */
124
125/*** OFDM table numbers ***/
126#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
127#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0)
128#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0)
129#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename
130#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4)
131#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0)
132#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3)
133#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0)
134#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0)
135#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0)
136#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0)
137#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0)
138#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0)
139#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0)
140#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0)
141#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7)
142#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12)
143#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13)
144#define B43_OFDMTAB_UNKNOWN_0F B43_OFDMTAB(0x0F, 0) //TODO rename
145#define B43_OFDMTAB_UNKNOWN_APHY B43_OFDMTAB(0x0F, 7) //TODO rename
146#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12)
147#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0)
148#define B43_OFDMTAB_UNKNOWN_11 B43_OFDMTAB(0x11, 4) //TODO rename
149#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0)
150#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO remove!
151#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 0)
152#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0)
153#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4)
154#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0)
155#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0)
156#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0)
157#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0)
158
159u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
160void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
161 u16 offset, u16 value);
162u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
163void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
164 u16 offset, u32 value);
165
166/*** G-PHY table numbers */
167#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset))
168#define B43_GTAB_NRSSI B43_GTAB(0x00, 0)
169#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120)
170#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298)
171
172u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); //TODO implement
173void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); //TODO implement
174
175#define B43_DEFAULT_CHANNEL_A 36
176#define B43_DEFAULT_CHANNEL_BG 6
177
178enum {
179 B43_ANTENNA0, /* Antenna 0 */
180 B43_ANTENNA1, /* Antenna 0 */
181 B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */
182 B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */
183 B43_ANTENNA2,
184 B43_ANTENNA3 = 8,
185
186 B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
187 B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
188};
189
190enum {
191 B43_INTERFMODE_NONE,
192 B43_INTERFMODE_NONWLAN,
193 B43_INTERFMODE_MANUALWLAN,
194 B43_INTERFMODE_AUTOWLAN,
195};
196
197/* Masks for the different PHY versioning registers. */
198#define B43_PHYVER_ANALOG 0xF000
199#define B43_PHYVER_ANALOG_SHIFT 12
200#define B43_PHYVER_TYPE 0x0F00
201#define B43_PHYVER_TYPE_SHIFT 8
202#define B43_PHYVER_VERSION 0x00FF
203
204void b43_phy_lock(struct b43_wldev *dev);
205void b43_phy_unlock(struct b43_wldev *dev);
206
207
208/* Read a value from a PHY register */
209u16 b43_phy_read(struct b43_wldev *dev, u16 offset);
210/* Write a value to a PHY register */
211void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val);
212/* Mask a PHY register with a mask */
213void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
214/* OR a PHY register with a bitmap */
215void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
216/* Mask and OR a PHY register with a mask and bitmap */
217void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
218
219
220int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev);
221
222void b43_phy_early_init(struct b43_wldev *dev);
223int b43_phy_init(struct b43_wldev *dev);
224
225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna);
226
227void b43_phy_xmitpower(struct b43_wldev *dev);
228
229/* Returns the boolean whether the board has HardwarePowerControl */
230bool b43_has_hardware_pctl(struct b43_phy *phy);
231/* Returns the boolean whether "TX Magnification" is enabled. */
232#define has_tx_magnification(phy) \
233 (((phy)->rev >= 2) && \
234 ((phy)->radio_ver == 0x2050) && \
235 ((phy)->radio_rev == 8))
236/* Card uses the loopback gain stuff */
237#define has_loopback_gain(phy) \
238 (((phy)->rev > 1) || ((phy)->gmode))
239
240/* Radio Attenuation (RF Attenuation) */
241struct b43_rfatt {
242 u8 att; /* Attenuation value */
243 bool with_padmix; /* Flag, PAD Mixer enabled. */
244};
245struct b43_rfatt_list {
246 /* Attenuation values list */
247 const struct b43_rfatt *list;
248 u8 len;
249 /* Minimum/Maximum attenuation values */
250 u8 min_val;
251 u8 max_val;
252};
253
254/* Returns true, if the values are the same. */
255static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
256 const struct b43_rfatt *b)
257{
258 return ((a->att == b->att) &&
259 (a->with_padmix == b->with_padmix));
260}
261
262/* Baseband Attenuation */
263struct b43_bbatt {
264 u8 att; /* Attenuation value */
265};
266struct b43_bbatt_list {
267 /* Attenuation values list */
268 const struct b43_bbatt *list;
269 u8 len;
270 /* Minimum/Maximum attenuation values */
271 u8 min_val;
272 u8 max_val;
273};
274
275/* Returns true, if the values are the same. */
276static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
277 const struct b43_bbatt *b)
278{
279 return (a->att == b->att);
280}
281
282/* tx_control bits. */
283#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
284#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
285#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */
286
287/* Write BasebandAttenuation value to the device. */
288void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
289 u16 baseband_attenuation);
290
291extern const u8 b43_radio_channel_codes_bg[];
292
293void b43_radio_lock(struct b43_wldev *dev);
294void b43_radio_unlock(struct b43_wldev *dev);
295
296
297/* Read a value from a 16bit radio register */
298u16 b43_radio_read16(struct b43_wldev *dev, u16 offset);
299/* Write a value to a 16bit radio register */
300void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val);
301/* Mask a 16bit radio register with a mask */
302void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
303/* OR a 16bit radio register with a bitmap */
304void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
305/* Mask and OR a PHY register with a mask and bitmap */
306void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
307
308
309u16 b43_radio_init2050(struct b43_wldev *dev);
310void b43_radio_init2060(struct b43_wldev *dev);
311
312void b43_radio_turn_on(struct b43_wldev *dev);
313void b43_radio_turn_off(struct b43_wldev *dev, bool force);
314
315int b43_radio_selectchannel(struct b43_wldev *dev, u8 channel,
316 int synthetic_pu_workaround);
317
318u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel);
319u8 b43_radio_aci_scan(struct b43_wldev *dev);
320
321int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode);
322
323void b43_calc_nrssi_slope(struct b43_wldev *dev);
324void b43_calc_nrssi_threshold(struct b43_wldev *dev);
325s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset);
326void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val);
327void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val);
328void b43_nrssi_mem_update(struct b43_wldev *dev);
329
330void b43_radio_set_tx_iq(struct b43_wldev *dev);
331u16 b43_radio_calibrationvalue(struct b43_wldev *dev);
332
333void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
334 int *_bbatt, int *_rfatt);
335
336void b43_set_txpower_g(struct b43_wldev *dev,
337 const struct b43_bbatt *bbatt,
338 const struct b43_rfatt *rfatt, u8 tx_control);
339
340#endif /* B43_PHY_H_ */
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
new file mode 100644
index 000000000000..0f1a84c9de61
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -0,0 +1,643 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11a PHY driver
5
6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include "b43.h"
30#include "phy_a.h"
31#include "phy_common.h"
32#include "wa.h"
33#include "tables.h"
34#include "main.h"
35
36
37/* Get the freq, as it has to be written to the device. */
38static inline u16 channel2freq_a(u8 channel)
39{
40 B43_WARN_ON(channel > 200);
41
42 return (5000 + 5 * channel);
43}
44
45static inline u16 freq_r3A_value(u16 frequency)
46{
47 u16 value;
48
49 if (frequency < 5091)
50 value = 0x0040;
51 else if (frequency < 5321)
52 value = 0x0000;
53 else if (frequency < 5806)
54 value = 0x0080;
55 else
56 value = 0x0040;
57
58 return value;
59}
60
61#if 0
62/* This function converts a TSSI value to dBm in Q5.2 */
63static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
64{
65 struct b43_phy *phy = &dev->phy;
66 struct b43_phy_a *aphy = phy->a;
67 s8 dbm = 0;
68 s32 tmp;
69
70 tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi);
71 tmp += 0x80;
72 tmp = clamp_val(tmp, 0x00, 0xFF);
73 dbm = aphy->tssi2dbm[tmp];
74 //TODO: There's a FIXME on the specs
75
76 return dbm;
77}
78#endif
79
80void b43_radio_set_tx_iq(struct b43_wldev *dev)
81{
82 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
83 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
84 u16 tmp = b43_radio_read16(dev, 0x001E);
85 int i, j;
86
87 for (i = 0; i < 5; i++) {
88 for (j = 0; j < 5; j++) {
89 if (tmp == (data_high[i] << 4 | data_low[j])) {
90 b43_phy_write(dev, 0x0069,
91 (i - j) << 8 | 0x00C0);
92 return;
93 }
94 }
95 }
96}
97
98static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
99{
100 u16 freq, r8, tmp;
101
102 freq = channel2freq_a(channel);
103
104 r8 = b43_radio_read16(dev, 0x0008);
105 b43_write16(dev, 0x03F0, freq);
106 b43_radio_write16(dev, 0x0008, r8);
107
108 //TODO: write max channel TX power? to Radio 0x2D
109 tmp = b43_radio_read16(dev, 0x002E);
110 tmp &= 0x0080;
111 //TODO: OR tmp with the Power out estimation for this channel?
112 b43_radio_write16(dev, 0x002E, tmp);
113
114 if (freq >= 4920 && freq <= 5500) {
115 /*
116 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
117 * = (freq * 0.025862069
118 */
119 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
120 }
121 b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
122 b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
123 b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
124 b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
125 & 0x000F) | (r8 << 4));
126 b43_radio_write16(dev, 0x002A, (r8 << 4));
127 b43_radio_write16(dev, 0x002B, (r8 << 4));
128 b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
129 & 0x00F0) | (r8 << 4));
130 b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
131 & 0xFF0F) | 0x00B0);
132 b43_radio_write16(dev, 0x0035, 0x00AA);
133 b43_radio_write16(dev, 0x0036, 0x0085);
134 b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
135 & 0xFF20) |
136 freq_r3A_value(freq));
137 b43_radio_write16(dev, 0x003D,
138 b43_radio_read16(dev, 0x003D) & 0x00FF);
139 b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
140 & 0xFF7F) | 0x0080);
141 b43_radio_write16(dev, 0x0035,
142 b43_radio_read16(dev, 0x0035) & 0xFFEF);
143 b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
144 & 0xFFEF) | 0x0010);
145 b43_radio_set_tx_iq(dev);
146 //TODO: TSSI2dbm workaround
147//FIXME b43_phy_xmitpower(dev);
148}
149
150void b43_radio_init2060(struct b43_wldev *dev)
151{
152 b43_radio_write16(dev, 0x0004, 0x00C0);
153 b43_radio_write16(dev, 0x0005, 0x0008);
154 b43_radio_write16(dev, 0x0009, 0x0040);
155 b43_radio_write16(dev, 0x0005, 0x00AA);
156 b43_radio_write16(dev, 0x0032, 0x008F);
157 b43_radio_write16(dev, 0x0006, 0x008F);
158 b43_radio_write16(dev, 0x0034, 0x008F);
159 b43_radio_write16(dev, 0x002C, 0x0007);
160 b43_radio_write16(dev, 0x0082, 0x0080);
161 b43_radio_write16(dev, 0x0080, 0x0000);
162 b43_radio_write16(dev, 0x003F, 0x00DA);
163 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
164 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010);
165 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
166 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
167 msleep(1); /* delay 400usec */
168
169 b43_radio_write16(dev, 0x0081,
170 (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
171 msleep(1); /* delay 400usec */
172
173 b43_radio_write16(dev, 0x0005,
174 (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
175 b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
176 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
177 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
178 b43_radio_write16(dev, 0x0081,
179 (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
180 b43_radio_write16(dev, 0x0005,
181 (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
182 b43_phy_write(dev, 0x0063, 0xDDC6);
183 b43_phy_write(dev, 0x0069, 0x07BE);
184 b43_phy_write(dev, 0x006A, 0x0000);
185
186 aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev));
187
188 msleep(1);
189}
190
191static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
192{
193 int i;
194
195 if (dev->phy.rev < 3) {
196 if (enable)
197 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
198 b43_ofdmtab_write16(dev,
199 B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
200 b43_ofdmtab_write16(dev,
201 B43_OFDMTAB_WRSSI, i, 0xFFF8);
202 }
203 else
204 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
205 b43_ofdmtab_write16(dev,
206 B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
207 b43_ofdmtab_write16(dev,
208 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
209 }
210 } else {
211 if (enable)
212 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
213 b43_ofdmtab_write16(dev,
214 B43_OFDMTAB_WRSSI, i, 0x0820);
215 else
216 for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
217 b43_ofdmtab_write16(dev,
218 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
219 }
220}
221
222static void b43_phy_ww(struct b43_wldev *dev)
223{
224 u16 b, curr_s, best_s = 0xFFFF;
225 int i;
226
227 b43_phy_write(dev, B43_PHY_CRS0,
228 b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
229 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
230 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
231 b43_phy_write(dev, B43_PHY_OFDM(0x82),
232 (b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
233 b43_radio_write16(dev, 0x0009,
234 b43_radio_read16(dev, 0x0009) | 0x0080);
235 b43_radio_write16(dev, 0x0012,
236 (b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
237 b43_wa_initgains(dev);
238 b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
239 b = b43_phy_read(dev, B43_PHY_PWRDOWN);
240 b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
241 b43_radio_write16(dev, 0x0004,
242 b43_radio_read16(dev, 0x0004) | 0x0004);
243 for (i = 0x10; i <= 0x20; i++) {
244 b43_radio_write16(dev, 0x0013, i);
245 curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
246 if (!curr_s) {
247 best_s = 0x0000;
248 break;
249 } else if (curr_s >= 0x0080)
250 curr_s = 0x0100 - curr_s;
251 if (curr_s < best_s)
252 best_s = curr_s;
253 }
254 b43_phy_write(dev, B43_PHY_PWRDOWN, b);
255 b43_radio_write16(dev, 0x0004,
256 b43_radio_read16(dev, 0x0004) & 0xFFFB);
257 b43_radio_write16(dev, 0x0013, best_s);
258 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
259 b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
260 b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
261 b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
262 b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
263 b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
264 b43_phy_write(dev, B43_PHY_OFDM(0xBB),
265 (b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
266 b43_phy_write(dev, B43_PHY_OFDM61,
267 (b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
268 b43_phy_write(dev, B43_PHY_OFDM(0x13),
269 (b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
270 b43_phy_write(dev, B43_PHY_OFDM(0x14),
271 (b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
272 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
273 for (i = 0; i < 6; i++)
274 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
275 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
276 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
277 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
278 b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
279 b43_phy_write(dev, B43_PHY_CRS0,
280 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
281}
282
283static void hardware_pctl_init_aphy(struct b43_wldev *dev)
284{
285 //TODO
286}
287
288void b43_phy_inita(struct b43_wldev *dev)
289{
290 struct ssb_bus *bus = dev->dev->bus;
291 struct b43_phy *phy = &dev->phy;
292
293 /* This lowlevel A-PHY init is also called from G-PHY init.
294 * So we must not access phy->a, if called from G-PHY code.
295 */
296 B43_WARN_ON((phy->type != B43_PHYTYPE_A) &&
297 (phy->type != B43_PHYTYPE_G));
298
299 might_sleep();
300
301 if (phy->rev >= 6) {
302 if (phy->type == B43_PHYTYPE_A)
303 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
304 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
305 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
306 b43_phy_write(dev, B43_PHY_ENCORE,
307 b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
308 else
309 b43_phy_write(dev, B43_PHY_ENCORE,
310 b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
311 }
312
313 b43_wa_all(dev);
314
315 if (phy->type == B43_PHYTYPE_A) {
316 if (phy->gmode && (phy->rev < 3))
317 b43_phy_write(dev, 0x0034,
318 b43_phy_read(dev, 0x0034) | 0x0001);
319 b43_phy_rssiagc(dev, 0);
320
321 b43_phy_write(dev, B43_PHY_CRS0,
322 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
323
324 b43_radio_init2060(dev);
325
326 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
327 ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
328 (bus->boardinfo.type == SSB_BOARD_BU4309))) {
329 ; //TODO: A PHY LO
330 }
331
332 if (phy->rev >= 3)
333 b43_phy_ww(dev);
334
335 hardware_pctl_init_aphy(dev);
336
337 //TODO: radar detection
338 }
339
340 if ((phy->type == B43_PHYTYPE_G) &&
341 (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
342 b43_phy_write(dev, B43_PHY_OFDM(0x6E),
343 (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
344 & 0xE000) | 0x3CF);
345 }
346}
347
348/* Initialise the TSSI->dBm lookup table */
349static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
350{
351 struct b43_phy *phy = &dev->phy;
352 struct b43_phy_a *aphy = phy->a;
353 s16 pab0, pab1, pab2;
354
355 pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
356 pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
357 pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
358
359 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
360 pab0 != -1 && pab1 != -1 && pab2 != -1) {
361 /* The pabX values are set in SPROM. Use them. */
362 if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
363 (s8) dev->dev->bus->sprom.itssi_a != -1)
364 aphy->tgt_idle_tssi =
365 (s8) (dev->dev->bus->sprom.itssi_a);
366 else
367 aphy->tgt_idle_tssi = 62;
368 aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
369 pab1, pab2);
370 if (!aphy->tssi2dbm)
371 return -ENOMEM;
372 } else {
373 /* pabX values not set in SPROM,
374 * but APHY needs a generated table. */
375 aphy->tssi2dbm = NULL;
376 b43err(dev->wl, "Could not generate tssi2dBm "
377 "table (wrong SPROM info)!\n");
378 return -ENODEV;
379 }
380
381 return 0;
382}
383
384static int b43_aphy_op_allocate(struct b43_wldev *dev)
385{
386 struct b43_phy_a *aphy;
387 int err;
388
389 aphy = kzalloc(sizeof(*aphy), GFP_KERNEL);
390 if (!aphy)
391 return -ENOMEM;
392 dev->phy.a = aphy;
393
394 err = b43_aphy_init_tssi2dbm_table(dev);
395 if (err)
396 goto err_free_aphy;
397
398 return 0;
399
400err_free_aphy:
401 kfree(aphy);
402 dev->phy.a = NULL;
403
404 return err;
405}
406
407static void b43_aphy_op_prepare_structs(struct b43_wldev *dev)
408{
409 struct b43_phy *phy = &dev->phy;
410 struct b43_phy_a *aphy = phy->a;
411 const void *tssi2dbm;
412 int tgt_idle_tssi;
413
414 /* tssi2dbm table is constant, so it is initialized at alloc time.
415 * Save a copy of the pointer. */
416 tssi2dbm = aphy->tssi2dbm;
417 tgt_idle_tssi = aphy->tgt_idle_tssi;
418
419 /* Zero out the whole PHY structure. */
420 memset(aphy, 0, sizeof(*aphy));
421
422 aphy->tssi2dbm = tssi2dbm;
423 aphy->tgt_idle_tssi = tgt_idle_tssi;
424
425 //TODO init struct b43_phy_a
426
427}
428
429static void b43_aphy_op_free(struct b43_wldev *dev)
430{
431 struct b43_phy *phy = &dev->phy;
432 struct b43_phy_a *aphy = phy->a;
433
434 kfree(aphy->tssi2dbm);
435 aphy->tssi2dbm = NULL;
436
437 kfree(aphy);
438 dev->phy.a = NULL;
439}
440
441static int b43_aphy_op_init(struct b43_wldev *dev)
442{
443 b43_phy_inita(dev);
444
445 return 0;
446}
447
448static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
449{
450 /* OFDM registers are base-registers for the A-PHY. */
451 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
452 offset &= ~B43_PHYROUTE;
453 offset |= B43_PHYROUTE_BASE;
454 }
455
456#if B43_DEBUG
457 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
458 /* Ext-G registers are only available on G-PHYs */
459 b43err(dev->wl, "Invalid EXT-G PHY access at "
460 "0x%04X on A-PHY\n", offset);
461 dump_stack();
462 }
463 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
464 /* N-BMODE registers are only available on N-PHYs */
465 b43err(dev->wl, "Invalid N-BMODE PHY access at "
466 "0x%04X on A-PHY\n", offset);
467 dump_stack();
468 }
469#endif /* B43_DEBUG */
470
471 return offset;
472}
473
474static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg)
475{
476 reg = adjust_phyreg(dev, reg);
477 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
478 return b43_read16(dev, B43_MMIO_PHY_DATA);
479}
480
481static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
482{
483 reg = adjust_phyreg(dev, reg);
484 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
485 b43_write16(dev, B43_MMIO_PHY_DATA, value);
486}
487
488static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg)
489{
490 /* Register 1 is a 32-bit register. */
491 B43_WARN_ON(reg == 1);
492 /* A-PHY needs 0x40 for read access */
493 reg |= 0x40;
494
495 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
496 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
497}
498
499static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
500{
501 /* Register 1 is a 32-bit register. */
502 B43_WARN_ON(reg == 1);
503
504 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
505 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
506}
507
508static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
509{
510 return (dev->phy.rev >= 5);
511}
512
513static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
514 enum rfkill_state state)
515{
516 struct b43_phy *phy = &dev->phy;
517
518 if (state == RFKILL_STATE_UNBLOCKED) {
519 if (phy->radio_on)
520 return;
521 b43_radio_write16(dev, 0x0004, 0x00C0);
522 b43_radio_write16(dev, 0x0005, 0x0008);
523 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
524 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
525 b43_radio_init2060(dev);
526 } else {
527 b43_radio_write16(dev, 0x0004, 0x00FF);
528 b43_radio_write16(dev, 0x0005, 0x00FB);
529 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
530 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
531 }
532}
533
534static int b43_aphy_op_switch_channel(struct b43_wldev *dev,
535 unsigned int new_channel)
536{
537 if (new_channel > 200)
538 return -EINVAL;
539 aphy_channel_switch(dev, new_channel);
540
541 return 0;
542}
543
544static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev)
545{
546 return 36; /* Default to channel 36 */
547}
548
549static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
550{//TODO
551 struct b43_phy *phy = &dev->phy;
552 u64 hf;
553 u16 tmp;
554 int autodiv = 0;
555
556 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
557 autodiv = 1;
558
559 hf = b43_hf_read(dev);
560 hf &= ~B43_HF_ANTDIVHELP;
561 b43_hf_write(dev, hf);
562
563 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
564 tmp &= ~B43_PHY_BBANDCFG_RXANT;
565 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
566 << B43_PHY_BBANDCFG_RXANT_SHIFT;
567 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
568
569 if (autodiv) {
570 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
571 if (antenna == B43_ANTENNA_AUTO0)
572 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
573 else
574 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
575 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
576 }
577 if (phy->rev < 3) {
578 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
579 tmp = (tmp & 0xFF00) | 0x24;
580 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
581 } else {
582 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
583 tmp |= 0x10;
584 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
585 if (phy->analog == 3) {
586 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
587 0x1D);
588 b43_phy_write(dev, B43_PHY_ADIVRELATED,
589 8);
590 } else {
591 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
592 0x3A);
593 tmp =
594 b43_phy_read(dev,
595 B43_PHY_ADIVRELATED);
596 tmp = (tmp & 0xFF00) | 8;
597 b43_phy_write(dev, B43_PHY_ADIVRELATED,
598 tmp);
599 }
600 }
601
602 hf |= B43_HF_ANTDIVHELP;
603 b43_hf_write(dev, hf);
604}
605
606static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev)
607{//TODO
608}
609
610static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev,
611 bool ignore_tssi)
612{//TODO
613 return B43_TXPWR_RES_DONE;
614}
615
616static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev)
617{//TODO
618}
619
620static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
621{//TODO
622}
623
624const struct b43_phy_operations b43_phyops_a = {
625 .allocate = b43_aphy_op_allocate,
626 .free = b43_aphy_op_free,
627 .prepare_structs = b43_aphy_op_prepare_structs,
628 .init = b43_aphy_op_init,
629 .phy_read = b43_aphy_op_read,
630 .phy_write = b43_aphy_op_write,
631 .radio_read = b43_aphy_op_radio_read,
632 .radio_write = b43_aphy_op_radio_write,
633 .supports_hwpctl = b43_aphy_op_supports_hwpctl,
634 .software_rfkill = b43_aphy_op_software_rfkill,
635 .switch_analog = b43_phyop_switch_analog_generic,
636 .switch_channel = b43_aphy_op_switch_channel,
637 .get_default_chan = b43_aphy_op_get_default_chan,
638 .set_rx_antenna = b43_aphy_op_set_rx_antenna,
639 .recalc_txpower = b43_aphy_op_recalc_txpower,
640 .adjust_txpower = b43_aphy_op_adjust_txpower,
641 .pwork_15sec = b43_aphy_op_pwork_15sec,
642 .pwork_60sec = b43_aphy_op_pwork_60sec,
643};
diff --git a/drivers/net/wireless/b43/phy_a.h b/drivers/net/wireless/b43/phy_a.h
new file mode 100644
index 000000000000..5cfaab7b16ee
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.h
@@ -0,0 +1,130 @@
1#ifndef LINUX_B43_PHY_A_H_
2#define LINUX_B43_PHY_A_H_
3
4#include "phy_common.h"
5
6
7/* OFDM (A) PHY Registers */
8#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */
9#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */
10#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */
11#define B43_PHY_BBANDCFG_RXANT_SHIFT 7
12#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */
13#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 (phy.rev 1 only) */
14#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */
15#define B43_PHY_LPFGAINCTL B43_PHY_OFDM(0x20) /* LPF Gain control */
16#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */
17#define B43_PHY_CRS0 B43_PHY_OFDM(0x29)
18#define B43_PHY_CRS0_EN 0x4000
19#define B43_PHY_PEAK_COUNT B43_PHY_OFDM(0x30)
20#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */
21#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */
22#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */
23#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */
24#define B43_PHY_LMS B43_PHY_OFDM(0x55)
25#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */
26#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */
27#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */
28#define B43_PHY_BBTXDC_BIAS B43_PHY_OFDM(0x6B) /* Baseband TX DC bias */
29#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */
30#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */
31#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */
32#define B43_PHY_OTABLENR_SHIFT 10
33#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */
34#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */
35#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */
36#define B43_PHY_ADCCTL B43_PHY_OFDM(0x7A) /* ADC control */
37#define B43_PHY_IDLE_TSSI B43_PHY_OFDM(0x7B)
38#define B43_PHY_A_TEMP_SENSE B43_PHY_OFDM(0x7C) /* A PHY temperature sense */
39#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */
40#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */
41#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */
42#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */
43#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */
44#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0)
45#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1)
46#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2)
47#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3)
48#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4)
49#define B43_PHY_CCKSHIFTBITS_WA B43_PHY_OFDM(0xA5) /* CCK shiftbits workaround, FIXME rename */
50#define B43_PHY_CCKSHIFTBITS B43_PHY_OFDM(0xA7) /* FIXME rename */
51#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */
52#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9)
53#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA)
54#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB)
55#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */
56#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */
57#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (phy.rev >= 2 only) */
58#define B43_PHY_CRSTHRES2 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (phy.rev >= 2 only) */
59#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */
60#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */
61#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */
62
63/*** OFDM table numbers ***/
64#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
65#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0)
66#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0)
67#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename
68#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4)
69#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0)
70#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3)
71#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0)
72#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0)
73#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0)
74#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0)
75#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0)
76#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0)
77#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0)
78#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0)
79#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7)
80#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12)
81#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13)
82#define B43_OFDMTAB_UNKNOWN_0F B43_OFDMTAB(0x0F, 0) //TODO rename
83#define B43_OFDMTAB_UNKNOWN_APHY B43_OFDMTAB(0x0F, 7) //TODO rename
84#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12)
85#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0)
86#define B43_OFDMTAB_UNKNOWN_11 B43_OFDMTAB(0x11, 4) //TODO rename
87#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0)
88#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO remove!
89#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 0)
90#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0)
91#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4)
92#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0)
93#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0)
94#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0)
95#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0)
96
97u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
98void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
99 u16 offset, u16 value);
100u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
101void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
102 u16 offset, u32 value);
103
104
105struct b43_phy_a {
106 /* Pointer to the table used to convert a
107 * TSSI value to dBm-Q5.2 */
108 const s8 *tssi2dbm;
109 /* Target idle TSSI */
110 int tgt_idle_tssi;
111 /* Current idle TSSI */
112 int cur_idle_tssi;//FIXME value currently not set
113
114 /* A-PHY TX Power control value. */
115 u16 txpwr_offset;
116
117 //TODO lots of missing stuff
118};
119
120/**
121 * b43_phy_inita - Lowlevel A-PHY init routine.
122 * This is _only_ used by the G-PHY code.
123 */
124void b43_phy_inita(struct b43_wldev *dev);
125
126
127struct b43_phy_operations;
128extern const struct b43_phy_operations b43_phyops_a;
129
130#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
new file mode 100644
index 000000000000..af37abccccb3
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -0,0 +1,381 @@
1/*
2
3 Broadcom B43 wireless driver
4 Common PHY routines
5
6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include "phy_common.h"
30#include "phy_g.h"
31#include "phy_a.h"
32#include "phy_n.h"
33#include "phy_lp.h"
34#include "b43.h"
35#include "main.h"
36
37
38int b43_phy_allocate(struct b43_wldev *dev)
39{
40 struct b43_phy *phy = &(dev->phy);
41 int err;
42
43 phy->ops = NULL;
44
45 switch (phy->type) {
46 case B43_PHYTYPE_A:
47 phy->ops = &b43_phyops_a;
48 break;
49 case B43_PHYTYPE_G:
50 phy->ops = &b43_phyops_g;
51 break;
52 case B43_PHYTYPE_N:
53#ifdef CONFIG_B43_NPHY
54 phy->ops = &b43_phyops_n;
55#endif
56 break;
57 case B43_PHYTYPE_LP:
58#ifdef CONFIG_B43_PHY_LP
59 phy->ops = &b43_phyops_lp;
60#endif
61 break;
62 }
63 if (B43_WARN_ON(!phy->ops))
64 return -ENODEV;
65
66 err = phy->ops->allocate(dev);
67 if (err)
68 phy->ops = NULL;
69
70 return err;
71}
72
73void b43_phy_free(struct b43_wldev *dev)
74{
75 dev->phy.ops->free(dev);
76 dev->phy.ops = NULL;
77}
78
79int b43_phy_init(struct b43_wldev *dev)
80{
81 struct b43_phy *phy = &dev->phy;
82 const struct b43_phy_operations *ops = phy->ops;
83 int err;
84
85 phy->channel = ops->get_default_chan(dev);
86
87 ops->software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
88 err = ops->init(dev);
89 if (err) {
90 b43err(dev->wl, "PHY init failed\n");
91 goto err_block_rf;
92 }
93 /* Make sure to switch hardware and firmware (SHM) to
94 * the default channel. */
95 err = b43_switch_channel(dev, ops->get_default_chan(dev));
96 if (err) {
97 b43err(dev->wl, "PHY init: Channel switch to default failed\n");
98 goto err_phy_exit;
99 }
100
101 return 0;
102
103err_phy_exit:
104 if (ops->exit)
105 ops->exit(dev);
106err_block_rf:
107 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
108
109 return err;
110}
111
112void b43_phy_exit(struct b43_wldev *dev)
113{
114 const struct b43_phy_operations *ops = dev->phy.ops;
115
116 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
117 if (ops->exit)
118 ops->exit(dev);
119}
120
121bool b43_has_hardware_pctl(struct b43_wldev *dev)
122{
123 if (!dev->phy.hardware_power_control)
124 return 0;
125 if (!dev->phy.ops->supports_hwpctl)
126 return 0;
127 return dev->phy.ops->supports_hwpctl(dev);
128}
129
130void b43_radio_lock(struct b43_wldev *dev)
131{
132 u32 macctl;
133
134 macctl = b43_read32(dev, B43_MMIO_MACCTL);
135 B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
136 macctl |= B43_MACCTL_RADIOLOCK;
137 b43_write32(dev, B43_MMIO_MACCTL, macctl);
138 /* Commit the write and wait for the device
139 * to exit any radio register access. */
140 b43_read32(dev, B43_MMIO_MACCTL);
141 udelay(10);
142}
143
144void b43_radio_unlock(struct b43_wldev *dev)
145{
146 u32 macctl;
147
148 /* Commit any write */
149 b43_read16(dev, B43_MMIO_PHY_VER);
150 /* unlock */
151 macctl = b43_read32(dev, B43_MMIO_MACCTL);
152 B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
153 macctl &= ~B43_MACCTL_RADIOLOCK;
154 b43_write32(dev, B43_MMIO_MACCTL, macctl);
155}
156
157void b43_phy_lock(struct b43_wldev *dev)
158{
159#if B43_DEBUG
160 B43_WARN_ON(dev->phy.phy_locked);
161 dev->phy.phy_locked = 1;
162#endif
163 B43_WARN_ON(dev->dev->id.revision < 3);
164
165 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
166 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
167}
168
169void b43_phy_unlock(struct b43_wldev *dev)
170{
171#if B43_DEBUG
172 B43_WARN_ON(!dev->phy.phy_locked);
173 dev->phy.phy_locked = 0;
174#endif
175 B43_WARN_ON(dev->dev->id.revision < 3);
176
177 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
178 b43_power_saving_ctl_bits(dev, 0);
179}
180
181u16 b43_radio_read(struct b43_wldev *dev, u16 reg)
182{
183 return dev->phy.ops->radio_read(dev, reg);
184}
185
186void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
187{
188 dev->phy.ops->radio_write(dev, reg, value);
189}
190
191void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
192{
193 b43_radio_write16(dev, offset,
194 b43_radio_read16(dev, offset) & mask);
195}
196
197void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
198{
199 b43_radio_write16(dev, offset,
200 b43_radio_read16(dev, offset) | set);
201}
202
203void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
204{
205 b43_radio_write16(dev, offset,
206 (b43_radio_read16(dev, offset) & mask) | set);
207}
208
209u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
210{
211 return dev->phy.ops->phy_read(dev, reg);
212}
213
214void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
215{
216 dev->phy.ops->phy_write(dev, reg, value);
217}
218
219void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
220{
221 b43_phy_write(dev, offset,
222 b43_phy_read(dev, offset) & mask);
223}
224
225void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set)
226{
227 b43_phy_write(dev, offset,
228 b43_phy_read(dev, offset) | set);
229}
230
231void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
232{
233 b43_phy_write(dev, offset,
234 (b43_phy_read(dev, offset) & mask) | set);
235}
236
237int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
238{
239 struct b43_phy *phy = &(dev->phy);
240 u16 channelcookie, savedcookie;
241 int err;
242
243 if (new_channel == B43_DEFAULT_CHANNEL)
244 new_channel = phy->ops->get_default_chan(dev);
245
246 /* First we set the channel radio code to prevent the
247 * firmware from sending ghost packets.
248 */
249 channelcookie = new_channel;
250 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
251 channelcookie |= 0x100;
252 //FIXME set 40Mhz flag if required
253 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN);
254 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie);
255
256 /* Now try to switch the PHY hardware channel. */
257 err = phy->ops->switch_channel(dev, new_channel);
258 if (err)
259 goto err_restore_cookie;
260
261 dev->phy.channel = new_channel;
262 /* Wait for the radio to tune to the channel and stabilize. */
263 msleep(8);
264
265 return 0;
266
267err_restore_cookie:
268 b43_shm_write16(dev, B43_SHM_SHARED,
269 B43_SHM_SH_CHAN, savedcookie);
270
271 return err;
272}
273
274void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state)
275{
276 struct b43_phy *phy = &dev->phy;
277
278 if (state == RFKILL_STATE_HARD_BLOCKED) {
279 /* We cannot hardware-block the device */
280 state = RFKILL_STATE_SOFT_BLOCKED;
281 }
282
283 phy->ops->software_rfkill(dev, state);
284 phy->radio_on = (state == RFKILL_STATE_UNBLOCKED);
285}
286
287/**
288 * b43_phy_txpower_adjust_work - TX power workqueue.
289 *
290 * Workqueue for updating the TX power parameters in hardware.
291 */
292void b43_phy_txpower_adjust_work(struct work_struct *work)
293{
294 struct b43_wl *wl = container_of(work, struct b43_wl,
295 txpower_adjust_work);
296 struct b43_wldev *dev;
297
298 mutex_lock(&wl->mutex);
299 dev = wl->current_dev;
300
301 if (likely(dev && (b43_status(dev) >= B43_STAT_STARTED)))
302 dev->phy.ops->adjust_txpower(dev);
303
304 mutex_unlock(&wl->mutex);
305}
306
307/* Called with wl->irq_lock locked */
308void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
309{
310 struct b43_phy *phy = &dev->phy;
311 unsigned long now = jiffies;
312 enum b43_txpwr_result result;
313
314 if (!(flags & B43_TXPWR_IGNORE_TIME)) {
315 /* Check if it's time for a TXpower check. */
316 if (time_before(now, phy->next_txpwr_check_time))
317 return; /* Not yet */
318 }
319 /* The next check will be needed in two seconds, or later. */
320 phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
321
322 if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
323 (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306))
324 return; /* No software txpower adjustment needed */
325
326 result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
327 if (result == B43_TXPWR_RES_DONE)
328 return; /* We are done. */
329 B43_WARN_ON(result != B43_TXPWR_RES_NEED_ADJUST);
330 B43_WARN_ON(phy->ops->adjust_txpower == NULL);
331
332 /* We must adjust the transmission power in hardware.
333 * Schedule b43_phy_txpower_adjust_work(). */
334 queue_work(dev->wl->hw->workqueue, &dev->wl->txpower_adjust_work);
335}
336
337int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
338{
339 const bool is_ofdm = (shm_offset != B43_SHM_SH_TSSI_CCK);
340 unsigned int a, b, c, d;
341 unsigned int average;
342 u32 tmp;
343
344 tmp = b43_shm_read32(dev, B43_SHM_SHARED, shm_offset);
345 a = tmp & 0xFF;
346 b = (tmp >> 8) & 0xFF;
347 c = (tmp >> 16) & 0xFF;
348 d = (tmp >> 24) & 0xFF;
349 if (a == 0 || a == B43_TSSI_MAX ||
350 b == 0 || b == B43_TSSI_MAX ||
351 c == 0 || c == B43_TSSI_MAX ||
352 d == 0 || d == B43_TSSI_MAX)
353 return -ENOENT;
354 /* The values are OK. Clear them. */
355 tmp = B43_TSSI_MAX | (B43_TSSI_MAX << 8) |
356 (B43_TSSI_MAX << 16) | (B43_TSSI_MAX << 24);
357 b43_shm_write32(dev, B43_SHM_SHARED, shm_offset, tmp);
358
359 if (is_ofdm) {
360 a = (a + 32) & 0x3F;
361 b = (b + 32) & 0x3F;
362 c = (c + 32) & 0x3F;
363 d = (d + 32) & 0x3F;
364 }
365
366 /* Get the average of the values with 0.5 added to each value. */
367 average = (a + b + c + d + 2) / 4;
368 if (is_ofdm) {
369 /* Adjust for CCK-boost */
370 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
371 & B43_HF_CCKBOOST)
372 average = (average >= 13) ? (average - 13) : 0;
373 }
374
375 return average;
376}
377
378void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
379{
380 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
381}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
new file mode 100644
index 000000000000..c9f5430d1d7d
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -0,0 +1,413 @@
1#ifndef LINUX_B43_PHY_COMMON_H_
2#define LINUX_B43_PHY_COMMON_H_
3
4#include <linux/rfkill.h>
5
6struct b43_wldev;
7
8
9/* PHY register routing bits */
10#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
11#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
12#define B43_PHYROUTE_OFDM_GPHY 0x0400 /* OFDM register routing for G-PHYs */
13#define B43_PHYROUTE_EXT_GPHY 0x0800 /* Extended G-PHY registers */
14#define B43_PHYROUTE_N_BMODE 0x0C00 /* N-PHY BMODE registers */
15
16/* CCK (B-PHY) registers. */
17#define B43_PHY_CCK(reg) ((reg) | B43_PHYROUTE_BASE)
18/* N-PHY registers. */
19#define B43_PHY_N(reg) ((reg) | B43_PHYROUTE_BASE)
20/* N-PHY BMODE registers. */
21#define B43_PHY_N_BMODE(reg) ((reg) | B43_PHYROUTE_N_BMODE)
22/* OFDM (A-PHY) registers. */
23#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY)
24/* Extended G-PHY registers. */
25#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY)
26
27
28/* Masks for the PHY versioning registers. */
29#define B43_PHYVER_ANALOG 0xF000
30#define B43_PHYVER_ANALOG_SHIFT 12
31#define B43_PHYVER_TYPE 0x0F00
32#define B43_PHYVER_TYPE_SHIFT 8
33#define B43_PHYVER_VERSION 0x00FF
34
35/**
36 * enum b43_interference_mitigation - Interference Mitigation mode
37 *
38 * @B43_INTERFMODE_NONE: Disabled
39 * @B43_INTERFMODE_NONWLAN: Non-WLAN Interference Mitigation
40 * @B43_INTERFMODE_MANUALWLAN: WLAN Interference Mitigation
41 * @B43_INTERFMODE_AUTOWLAN: Automatic WLAN Interference Mitigation
42 */
43enum b43_interference_mitigation {
44 B43_INTERFMODE_NONE,
45 B43_INTERFMODE_NONWLAN,
46 B43_INTERFMODE_MANUALWLAN,
47 B43_INTERFMODE_AUTOWLAN,
48};
49
50/* Antenna identifiers */
51enum {
52 B43_ANTENNA0, /* Antenna 0 */
53 B43_ANTENNA1, /* Antenna 0 */
54 B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */
55 B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */
56 B43_ANTENNA2,
57 B43_ANTENNA3 = 8,
58
59 B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
60 B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
61};
62
63/**
64 * enum b43_txpwr_result - Return value for the recalc_txpower PHY op.
65 *
66 * @B43_TXPWR_RES_NEED_ADJUST: Values changed. Hardware adjustment is needed.
67 * @B43_TXPWR_RES_DONE: No more work to do. Everything is done.
68 */
69enum b43_txpwr_result {
70 B43_TXPWR_RES_NEED_ADJUST,
71 B43_TXPWR_RES_DONE,
72};
73
74/**
75 * struct b43_phy_operations - Function pointers for PHY ops.
76 *
77 * @allocate: Allocate and initialise the PHY data structures.
78 * Must not be NULL.
79 * @free: Destroy and free the PHY data structures.
80 * Must not be NULL.
81 *
82 * @prepare_structs: Prepare the PHY data structures.
83 * The data structures allocated in @allocate are
84 * initialized here.
85 * Must not be NULL.
86 * @prepare_hardware: Prepare the PHY. This is called before b43_chip_init to
87 * do some early early PHY hardware init.
88 * Can be NULL, if not required.
89 * @init: Initialize the PHY.
90 * Must not be NULL.
91 * @exit: Shutdown the PHY.
92 * Can be NULL, if not required.
93 *
94 * @phy_read: Read from a PHY register.
95 * Must not be NULL.
96 * @phy_write: Write to a PHY register.
97 * Must not be NULL.
98 * @radio_read: Read from a Radio register.
99 * Must not be NULL.
100 * @radio_write: Write to a Radio register.
101 * Must not be NULL.
102 *
103 * @supports_hwpctl: Returns a boolean whether Hardware Power Control
104 * is supported or not.
105 * If NULL, hwpctl is assumed to be never supported.
106 * @software_rfkill: Turn the radio ON or OFF.
107 * Possible state values are
108 * RFKILL_STATE_SOFT_BLOCKED or
109 * RFKILL_STATE_UNBLOCKED
110 * Must not be NULL.
111 * @switch_analog: Turn the Analog on/off.
112 * Must not be NULL.
113 * @switch_channel: Switch the radio to another channel.
114 * Must not be NULL.
115 * @get_default_chan: Just returns the default channel number.
116 * Must not be NULL.
117 * @set_rx_antenna: Set the antenna used for RX.
118 * Can be NULL, if not supported.
119 * @interf_mitigation: Switch the Interference Mitigation mode.
120 * Can be NULL, if not supported.
121 *
122 * @recalc_txpower: Recalculate the transmission power parameters.
123 * This callback has to recalculate the TX power settings,
124 * but does not need to write them to the hardware, yet.
125 * Returns enum b43_txpwr_result to indicate whether the hardware
126 * needs to be adjusted.
127 * If B43_TXPWR_NEED_ADJUST is returned, @adjust_txpower
128 * will be called later.
129 * If the parameter "ignore_tssi" is true, the TSSI values should
130 * be ignored and a recalculation of the power settings should be
131 * done even if the TSSI values did not change.
132 * This callback is called with wl->irq_lock held and must not sleep.
133 * Must not be NULL.
134 * @adjust_txpower: Write the previously calculated TX power settings
135 * (from @recalc_txpower) to the hardware.
136 * This function may sleep.
137 * Can be NULL, if (and ONLY if) @recalc_txpower _always_
138 * returns B43_TXPWR_RES_DONE.
139 *
140 * @pwork_15sec: Periodic work. Called every 15 seconds.
141 * Can be NULL, if not required.
142 * @pwork_60sec: Periodic work. Called every 60 seconds.
143 * Can be NULL, if not required.
144 */
145struct b43_phy_operations {
146 /* Initialisation */
147 int (*allocate)(struct b43_wldev *dev);
148 void (*free)(struct b43_wldev *dev);
149 void (*prepare_structs)(struct b43_wldev *dev);
150 int (*prepare_hardware)(struct b43_wldev *dev);
151 int (*init)(struct b43_wldev *dev);
152 void (*exit)(struct b43_wldev *dev);
153
154 /* Register access */
155 u16 (*phy_read)(struct b43_wldev *dev, u16 reg);
156 void (*phy_write)(struct b43_wldev *dev, u16 reg, u16 value);
157 u16 (*radio_read)(struct b43_wldev *dev, u16 reg);
158 void (*radio_write)(struct b43_wldev *dev, u16 reg, u16 value);
159
160 /* Radio */
161 bool (*supports_hwpctl)(struct b43_wldev *dev);
162 void (*software_rfkill)(struct b43_wldev *dev, enum rfkill_state state);
163 void (*switch_analog)(struct b43_wldev *dev, bool on);
164 int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel);
165 unsigned int (*get_default_chan)(struct b43_wldev *dev);
166 void (*set_rx_antenna)(struct b43_wldev *dev, int antenna);
167 int (*interf_mitigation)(struct b43_wldev *dev,
168 enum b43_interference_mitigation new_mode);
169
170 /* Transmission power adjustment */
171 enum b43_txpwr_result (*recalc_txpower)(struct b43_wldev *dev,
172 bool ignore_tssi);
173 void (*adjust_txpower)(struct b43_wldev *dev);
174
175 /* Misc */
176 void (*pwork_15sec)(struct b43_wldev *dev);
177 void (*pwork_60sec)(struct b43_wldev *dev);
178};
179
180struct b43_phy_a;
181struct b43_phy_g;
182struct b43_phy_n;
183struct b43_phy_lp;
184
185struct b43_phy {
186 /* Hardware operation callbacks. */
187 const struct b43_phy_operations *ops;
188
189 /* Most hardware context information is stored in the standard-
190 * specific data structures pointed to by the pointers below.
191 * Only one of them is valid (the currently enabled PHY). */
192#ifdef CONFIG_B43_DEBUG
193 /* No union for debug build to force NULL derefs in buggy code. */
194 struct {
195#else
196 union {
197#endif
198 /* A-PHY specific information */
199 struct b43_phy_a *a;
200 /* G-PHY specific information */
201 struct b43_phy_g *g;
202 /* N-PHY specific information */
203 struct b43_phy_n *n;
204 /* LP-PHY specific information */
205 struct b43_phy_lp *lp;
206 };
207
208 /* Band support flags. */
209 bool supports_2ghz;
210 bool supports_5ghz;
211
212 /* GMODE bit enabled? */
213 bool gmode;
214
215 /* Analog Type */
216 u8 analog;
217 /* B43_PHYTYPE_ */
218 u8 type;
219 /* PHY revision number. */
220 u8 rev;
221
222 /* Radio versioning */
223 u16 radio_manuf; /* Radio manufacturer */
224 u16 radio_ver; /* Radio version */
225 u8 radio_rev; /* Radio revision */
226
227 /* Software state of the radio */
228 bool radio_on;
229
230 /* Desired TX power level (in dBm).
231 * This is set by the user and adjusted in b43_phy_xmitpower(). */
232 int desired_txpower;
233
234 /* Hardware Power Control enabled? */
235 bool hardware_power_control;
236
237 /* The time (in absolute jiffies) when the next TX power output
238 * check is needed. */
239 unsigned long next_txpwr_check_time;
240
241 /* current channel */
242 unsigned int channel;
243
244 /* PHY TX errors counter. */
245 atomic_t txerr_cnt;
246
247#ifdef CONFIG_B43_DEBUG
248 /* PHY registers locked by b43_phy_lock()? */
249 bool phy_locked;
250#endif /* B43_DEBUG */
251};
252
253
254/**
255 * b43_phy_allocate - Allocate PHY structs
256 * Allocate the PHY data structures, based on the current dev->phy.type
257 */
258int b43_phy_allocate(struct b43_wldev *dev);
259
260/**
261 * b43_phy_free - Free PHY structs
262 */
263void b43_phy_free(struct b43_wldev *dev);
264
265/**
266 * b43_phy_init - Initialise the PHY
267 */
268int b43_phy_init(struct b43_wldev *dev);
269
270/**
271 * b43_phy_exit - Cleanup PHY
272 */
273void b43_phy_exit(struct b43_wldev *dev);
274
275/**
276 * b43_has_hardware_pctl - Hardware Power Control supported?
277 * Returns a boolean, whether hardware power control is supported.
278 */
279bool b43_has_hardware_pctl(struct b43_wldev *dev);
280
281/**
282 * b43_phy_read - 16bit PHY register read access
283 */
284u16 b43_phy_read(struct b43_wldev *dev, u16 reg);
285
286/**
287 * b43_phy_write - 16bit PHY register write access
288 */
289void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value);
290
291/**
292 * b43_phy_mask - Mask a PHY register with a mask
293 */
294void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
295
296/**
297 * b43_phy_set - OR a PHY register with a bitmap
298 */
299void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
300
301/**
302 * b43_phy_maskset - Mask and OR a PHY register with a mask and bitmap
303 */
304void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
305
306/**
307 * b43_radio_read - 16bit Radio register read access
308 */
309u16 b43_radio_read(struct b43_wldev *dev, u16 reg);
310#define b43_radio_read16 b43_radio_read /* DEPRECATED */
311
312/**
313 * b43_radio_write - 16bit Radio register write access
314 */
315void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value);
316#define b43_radio_write16 b43_radio_write /* DEPRECATED */
317
318/**
319 * b43_radio_mask - Mask a 16bit radio register with a mask
320 */
321void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
322
323/**
324 * b43_radio_set - OR a 16bit radio register with a bitmap
325 */
326void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
327
328/**
329 * b43_radio_maskset - Mask and OR a radio register with a mask and bitmap
330 */
331void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
332
333/**
334 * b43_radio_lock - Lock firmware radio register access
335 */
336void b43_radio_lock(struct b43_wldev *dev);
337
338/**
339 * b43_radio_unlock - Unlock firmware radio register access
340 */
341void b43_radio_unlock(struct b43_wldev *dev);
342
343/**
344 * b43_phy_lock - Lock firmware PHY register access
345 */
346void b43_phy_lock(struct b43_wldev *dev);
347
348/**
349 * b43_phy_unlock - Unlock firmware PHY register access
350 */
351void b43_phy_unlock(struct b43_wldev *dev);
352
353/**
354 * b43_switch_channel - Switch to another channel
355 */
356int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
357/**
358 * B43_DEFAULT_CHANNEL - Switch to the default channel.
359 */
360#define B43_DEFAULT_CHANNEL UINT_MAX
361
362/**
363 * b43_software_rfkill - Turn the radio ON or OFF in software.
364 */
365void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state);
366
367/**
368 * b43_phy_txpower_check - Check TX power output.
369 *
370 * Compare the current TX power output to the desired power emission
371 * and schedule an adjustment in case it mismatches.
372 * Requires wl->irq_lock locked.
373 *
374 * @flags: OR'ed enum b43_phy_txpower_check_flags flags.
375 * See the docs below.
376 */
377void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags);
378/**
379 * enum b43_phy_txpower_check_flags - Flags for b43_phy_txpower_check()
380 *
381 * @B43_TXPWR_IGNORE_TIME: Ignore the schedule time and force-redo
382 * the check now.
383 * @B43_TXPWR_IGNORE_TSSI: Redo the recalculation, even if the average
384 * TSSI did not change.
385 */
386enum b43_phy_txpower_check_flags {
387 B43_TXPWR_IGNORE_TIME = (1 << 0),
388 B43_TXPWR_IGNORE_TSSI = (1 << 1),
389};
390
391struct work_struct;
392void b43_phy_txpower_adjust_work(struct work_struct *work);
393
394/**
395 * b43_phy_shm_tssi_read - Read the average of the last 4 TSSI from SHM.
396 *
397 * @shm_offset: The SHM address to read the values from.
398 *
399 * Returns the average of the 4 TSSI values, or a negative error code.
400 */
401int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
402
403/**
404 * b43_phy_switch_analog_generic - Generic PHY operation for switching the Analog.
405 *
406 * It does the switching based on the PHY0 core register.
407 * Do _not_ call this directly. Only use it as a switch_analog callback
408 * for struct b43_phy_operations.
409 */
410void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
411
412
413#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy_g.c
index 305d4cd6fd03..232181f6333c 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1,10 +1,11 @@
1/* 1/*
2 2
3 Broadcom B43 wireless driver 3 Broadcom B43 wireless driver
4 IEEE 802.11g PHY driver
4 5
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, 6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> 7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> 9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
9 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> 10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10 11
@@ -25,38 +26,14 @@
25 26
26*/ 27*/
27 28
28#include <linux/delay.h>
29#include <linux/io.h>
30#include <linux/types.h>
31#include <linux/bitrev.h>
32
33#include "b43.h" 29#include "b43.h"
34#include "phy.h" 30#include "phy_g.h"
35#include "nphy.h" 31#include "phy_common.h"
36#include "main.h"
37#include "tables.h"
38#include "lo.h" 32#include "lo.h"
39#include "wa.h" 33#include "main.h"
40 34
41 35#include <linux/bitrev.h>
42static const s8 b43_tssi2dbm_b_table[] = { 36
43 0x4D, 0x4C, 0x4B, 0x4A,
44 0x4A, 0x49, 0x48, 0x47,
45 0x47, 0x46, 0x45, 0x45,
46 0x44, 0x43, 0x42, 0x42,
47 0x41, 0x40, 0x3F, 0x3E,
48 0x3D, 0x3C, 0x3B, 0x3A,
49 0x39, 0x38, 0x37, 0x36,
50 0x35, 0x34, 0x32, 0x31,
51 0x30, 0x2F, 0x2D, 0x2C,
52 0x2B, 0x29, 0x28, 0x26,
53 0x25, 0x23, 0x21, 0x1F,
54 0x1D, 0x1A, 0x17, 0x14,
55 0x10, 0x0C, 0x06, 0x00,
56 -7, -7, -7, -7,
57 -7, -7, -7, -7,
58 -7, -7, -7, -7,
59};
60 37
61static const s8 b43_tssi2dbm_g_table[] = { 38static const s8 b43_tssi2dbm_g_table[] = {
62 77, 77, 77, 76, 39 77, 77, 77, 76,
@@ -84,8 +61,20 @@ const u8 b43_radio_channel_codes_bg[] = {
84 72, 84, 61 72, 84,
85}; 62};
86 63
64
65static void b43_calc_nrssi_threshold(struct b43_wldev *dev);
66
67
87#define bitrev4(tmp) (bitrev8(tmp) >> 4) 68#define bitrev4(tmp) (bitrev8(tmp) >> 4)
88static void b43_phy_initg(struct b43_wldev *dev); 69
70
71/* Get the freq, as it has to be written to the device. */
72static inline u16 channel2freq_bg(u8 channel)
73{
74 B43_WARN_ON(!(channel >= 1 && channel <= 14));
75
76 return b43_radio_channel_codes_bg[channel - 1];
77}
89 78
90static void generate_rfatt_list(struct b43_wldev *dev, 79static void generate_rfatt_list(struct b43_wldev *dev,
91 struct b43_rfatt_list *list) 80 struct b43_rfatt_list *list)
@@ -130,7 +119,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
130 {.att = 9,.with_padmix = 1,}, 119 {.att = 9,.with_padmix = 1,},
131 }; 120 };
132 121
133 if (!b43_has_hardware_pctl(phy)) { 122 if (!b43_has_hardware_pctl(dev)) {
134 /* Software pctl */ 123 /* Software pctl */
135 list->list = rfatt_0; 124 list->list = rfatt_0;
136 list->len = ARRAY_SIZE(rfatt_0); 125 list->len = ARRAY_SIZE(rfatt_0);
@@ -174,140 +163,55 @@ static void generate_bbatt_list(struct b43_wldev *dev,
174 list->max_val = 8; 163 list->max_val = 8;
175} 164}
176 165
177bool b43_has_hardware_pctl(struct b43_phy *phy)
178{
179 if (!phy->hardware_power_control)
180 return 0;
181 switch (phy->type) {
182 case B43_PHYTYPE_A:
183 if (phy->rev >= 5)
184 return 1;
185 break;
186 case B43_PHYTYPE_G:
187 if (phy->rev >= 6)
188 return 1;
189 break;
190 default:
191 B43_WARN_ON(1);
192 }
193 return 0;
194}
195
196static void b43_shm_clear_tssi(struct b43_wldev *dev) 166static void b43_shm_clear_tssi(struct b43_wldev *dev)
197{ 167{
198 struct b43_phy *phy = &dev->phy; 168 b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
199 169 b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
200 switch (phy->type) { 170 b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
201 case B43_PHYTYPE_A: 171 b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
202 b43_shm_write16(dev, B43_SHM_SHARED, 0x0068, 0x7F7F);
203 b43_shm_write16(dev, B43_SHM_SHARED, 0x006a, 0x7F7F);
204 break;
205 case B43_PHYTYPE_B:
206 case B43_PHYTYPE_G:
207 b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
208 b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
209 b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
210 b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
211 break;
212 }
213}
214
215/* Lock the PHY registers against concurrent access from the microcode.
216 * This lock is nonrecursive. */
217void b43_phy_lock(struct b43_wldev *dev)
218{
219#if B43_DEBUG
220 B43_WARN_ON(dev->phy.phy_locked);
221 dev->phy.phy_locked = 1;
222#endif
223 B43_WARN_ON(dev->dev->id.revision < 3);
224
225 if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
226 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
227} 172}
228 173
229void b43_phy_unlock(struct b43_wldev *dev) 174/* Synthetic PU workaround */
175static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
230{ 176{
231#if B43_DEBUG 177 struct b43_phy *phy = &dev->phy;
232 B43_WARN_ON(!dev->phy.phy_locked);
233 dev->phy.phy_locked = 0;
234#endif
235 B43_WARN_ON(dev->dev->id.revision < 3);
236 178
237 if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 179 might_sleep();
238 b43_power_saving_ctl_bits(dev, 0);
239}
240 180
241/* Different PHYs require different register routing flags. 181 if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
242 * This adjusts (and does sanity checks on) the routing flags. 182 /* We do not need the workaround. */
243 */ 183 return;
244static inline u16 adjust_phyreg_for_phytype(struct b43_phy *phy,
245 u16 offset, struct b43_wldev *dev)
246{
247 if (phy->type == B43_PHYTYPE_A) {
248 /* OFDM registers are base-registers for the A-PHY. */
249 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
250 offset &= ~B43_PHYROUTE;
251 offset |= B43_PHYROUTE_BASE;
252 }
253 } 184 }
254 185
255#if B43_DEBUG 186 if (channel <= 10) {
256 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { 187 b43_write16(dev, B43_MMIO_CHANNEL,
257 /* Ext-G registers are only available on G-PHYs */ 188 channel2freq_bg(channel + 4));
258 if (phy->type != B43_PHYTYPE_G) { 189 } else {
259 b43err(dev->wl, "Invalid EXT-G PHY access at " 190 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
260 "0x%04X on PHY type %u\n", offset, phy->type);
261 dump_stack();
262 }
263 }
264 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
265 /* N-BMODE registers are only available on N-PHYs */
266 if (phy->type != B43_PHYTYPE_N) {
267 b43err(dev->wl, "Invalid N-BMODE PHY access at "
268 "0x%04X on PHY type %u\n", offset, phy->type);
269 dump_stack();
270 }
271 } 191 }
272#endif /* B43_DEBUG */ 192 msleep(1);
273 193 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
274 return offset;
275}
276
277u16 b43_phy_read(struct b43_wldev * dev, u16 offset)
278{
279 struct b43_phy *phy = &dev->phy;
280
281 offset = adjust_phyreg_for_phytype(phy, offset, dev);
282 b43_write16(dev, B43_MMIO_PHY_CONTROL, offset);
283 return b43_read16(dev, B43_MMIO_PHY_DATA);
284} 194}
285 195
286void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val) 196/* Set the baseband attenuation value on chip. */
197void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
198 u16 baseband_attenuation)
287{ 199{
288 struct b43_phy *phy = &dev->phy; 200 struct b43_phy *phy = &dev->phy;
289 201
290 offset = adjust_phyreg_for_phytype(phy, offset, dev); 202 if (phy->analog == 0) {
291 b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); 203 b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
292 b43_write16(dev, B43_MMIO_PHY_DATA, val); 204 & 0xFFF0) |
293} 205 baseband_attenuation);
294 206 } else if (phy->analog > 1) {
295void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) 207 b43_phy_write(dev, B43_PHY_DACCTL,
296{ 208 (b43_phy_read(dev, B43_PHY_DACCTL)
297 b43_phy_write(dev, offset, 209 & 0xFFC3) | (baseband_attenuation << 2));
298 b43_phy_read(dev, offset) & mask); 210 } else {
299} 211 b43_phy_write(dev, B43_PHY_DACCTL,
300 212 (b43_phy_read(dev, B43_PHY_DACCTL)
301void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set) 213 & 0xFF87) | (baseband_attenuation << 3));
302{ 214 }
303 b43_phy_write(dev, offset,
304 b43_phy_read(dev, offset) | set);
305}
306
307void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
308{
309 b43_phy_write(dev, offset,
310 (b43_phy_read(dev, offset) & mask) | set);
311} 215}
312 216
313/* Adjust the transmission power output (G-PHY) */ 217/* Adjust the transmission power output (G-PHY) */
@@ -316,7 +220,8 @@ void b43_set_txpower_g(struct b43_wldev *dev,
316 const struct b43_rfatt *rfatt, u8 tx_control) 220 const struct b43_rfatt *rfatt, u8 tx_control)
317{ 221{
318 struct b43_phy *phy = &dev->phy; 222 struct b43_phy *phy = &dev->phy;
319 struct b43_txpower_lo_control *lo = phy->lo_control; 223 struct b43_phy_g *gphy = phy->g;
224 struct b43_txpower_lo_control *lo = gphy->lo_control;
320 u16 bb, rf; 225 u16 bb, rf;
321 u16 tx_bias, tx_magn; 226 u16 tx_bias, tx_magn;
322 227
@@ -327,11 +232,12 @@ void b43_set_txpower_g(struct b43_wldev *dev,
327 if (unlikely(tx_bias == 0xFF)) 232 if (unlikely(tx_bias == 0xFF))
328 tx_bias = 0; 233 tx_bias = 0;
329 234
330 /* Save the values for later */ 235 /* Save the values for later. Use memmove, because it's valid
331 phy->tx_control = tx_control; 236 * to pass &gphy->rfatt as rfatt pointer argument. Same for bbatt. */
332 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); 237 gphy->tx_control = tx_control;
333 phy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX); 238 memmove(&gphy->rfatt, rfatt, sizeof(*rfatt));
334 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); 239 gphy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
240 memmove(&gphy->bbatt, bbatt, sizeof(*bbatt));
335 241
336 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 242 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
337 b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " 243 b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), "
@@ -340,7 +246,7 @@ void b43_set_txpower_g(struct b43_wldev *dev,
340 bb, rf, tx_control, tx_bias, tx_magn); 246 bb, rf, tx_control, tx_bias, tx_magn);
341 } 247 }
342 248
343 b43_phy_set_baseband_attenuation(dev, bb); 249 b43_gphy_set_baseband_attenuation(dev, bb);
344 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); 250 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf);
345 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { 251 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
346 b43_radio_write16(dev, 0x43, 252 b43_radio_write16(dev, 0x43,
@@ -358,179 +264,23 @@ void b43_set_txpower_g(struct b43_wldev *dev,
358 b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) 264 b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52)
359 & 0xFFF0) | (tx_bias & 0x000F)); 265 & 0xFFF0) | (tx_bias & 0x000F));
360 } 266 }
361 if (phy->type == B43_PHYTYPE_G) 267 b43_lo_g_adjust(dev);
362 b43_lo_g_adjust(dev);
363}
364
365static void default_baseband_attenuation(struct b43_wldev *dev,
366 struct b43_bbatt *bb)
367{
368 struct b43_phy *phy = &dev->phy;
369
370 if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
371 bb->att = 0;
372 else
373 bb->att = 2;
374}
375
376static void default_radio_attenuation(struct b43_wldev *dev,
377 struct b43_rfatt *rf)
378{
379 struct ssb_bus *bus = dev->dev->bus;
380 struct b43_phy *phy = &dev->phy;
381
382 rf->with_padmix = 0;
383
384 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
385 bus->boardinfo.type == SSB_BOARD_BCM4309G) {
386 if (bus->boardinfo.rev < 0x43) {
387 rf->att = 2;
388 return;
389 } else if (bus->boardinfo.rev < 0x51) {
390 rf->att = 3;
391 return;
392 }
393 }
394
395 if (phy->type == B43_PHYTYPE_A) {
396 rf->att = 0x60;
397 return;
398 }
399
400 switch (phy->radio_ver) {
401 case 0x2053:
402 switch (phy->radio_rev) {
403 case 1:
404 rf->att = 6;
405 return;
406 }
407 break;
408 case 0x2050:
409 switch (phy->radio_rev) {
410 case 0:
411 rf->att = 5;
412 return;
413 case 1:
414 if (phy->type == B43_PHYTYPE_G) {
415 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
416 && bus->boardinfo.type == SSB_BOARD_BCM4309G
417 && bus->boardinfo.rev >= 30)
418 rf->att = 3;
419 else if (bus->boardinfo.vendor ==
420 SSB_BOARDVENDOR_BCM
421 && bus->boardinfo.type ==
422 SSB_BOARD_BU4306)
423 rf->att = 3;
424 else
425 rf->att = 1;
426 } else {
427 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
428 && bus->boardinfo.type == SSB_BOARD_BCM4309G
429 && bus->boardinfo.rev >= 30)
430 rf->att = 7;
431 else
432 rf->att = 6;
433 }
434 return;
435 case 2:
436 if (phy->type == B43_PHYTYPE_G) {
437 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
438 && bus->boardinfo.type == SSB_BOARD_BCM4309G
439 && bus->boardinfo.rev >= 30)
440 rf->att = 3;
441 else if (bus->boardinfo.vendor ==
442 SSB_BOARDVENDOR_BCM
443 && bus->boardinfo.type ==
444 SSB_BOARD_BU4306)
445 rf->att = 5;
446 else if (bus->chip_id == 0x4320)
447 rf->att = 4;
448 else
449 rf->att = 3;
450 } else
451 rf->att = 6;
452 return;
453 case 3:
454 rf->att = 5;
455 return;
456 case 4:
457 case 5:
458 rf->att = 1;
459 return;
460 case 6:
461 case 7:
462 rf->att = 5;
463 return;
464 case 8:
465 rf->att = 0xA;
466 rf->with_padmix = 1;
467 return;
468 case 9:
469 default:
470 rf->att = 5;
471 return;
472 }
473 }
474 rf->att = 5;
475}
476
477static u16 default_tx_control(struct b43_wldev *dev)
478{
479 struct b43_phy *phy = &dev->phy;
480
481 if (phy->radio_ver != 0x2050)
482 return 0;
483 if (phy->radio_rev == 1)
484 return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
485 if (phy->radio_rev < 6)
486 return B43_TXCTL_PA2DB;
487 if (phy->radio_rev == 8)
488 return B43_TXCTL_TXMIX;
489 return 0;
490}
491
492/* This func is called "PHY calibrate" in the specs... */
493void b43_phy_early_init(struct b43_wldev *dev)
494{
495 struct b43_phy *phy = &dev->phy;
496 struct b43_txpower_lo_control *lo = phy->lo_control;
497
498 default_baseband_attenuation(dev, &phy->bbatt);
499 default_radio_attenuation(dev, &phy->rfatt);
500 phy->tx_control = (default_tx_control(dev) << 4);
501
502 /* Commit previous writes */
503 b43_read32(dev, B43_MMIO_MACCTL);
504
505 if (phy->type == B43_PHYTYPE_B || phy->type == B43_PHYTYPE_G) {
506 generate_rfatt_list(dev, &lo->rfatt_list);
507 generate_bbatt_list(dev, &lo->bbatt_list);
508 }
509 if (phy->type == B43_PHYTYPE_G && phy->rev == 1) {
510 /* Workaround: Temporarly disable gmode through the early init
511 * phase, as the gmode stuff is not needed for phy rev 1 */
512 phy->gmode = 0;
513 b43_wireless_core_reset(dev, 0);
514 b43_phy_initg(dev);
515 phy->gmode = 1;
516 b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
517 }
518} 268}
519 269
520/* GPHY_TSSI_Power_Lookup_Table_Init */ 270/* GPHY_TSSI_Power_Lookup_Table_Init */
521static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) 271static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev)
522{ 272{
523 struct b43_phy *phy = &dev->phy; 273 struct b43_phy_g *gphy = dev->phy.g;
524 int i; 274 int i;
525 u16 value; 275 u16 value;
526 276
527 for (i = 0; i < 32; i++) 277 for (i = 0; i < 32; i++)
528 b43_ofdmtab_write16(dev, 0x3C20, i, phy->tssi2dbm[i]); 278 b43_ofdmtab_write16(dev, 0x3C20, i, gphy->tssi2dbm[i]);
529 for (i = 32; i < 64; i++) 279 for (i = 32; i < 64; i++)
530 b43_ofdmtab_write16(dev, 0x3C00, i - 32, phy->tssi2dbm[i]); 280 b43_ofdmtab_write16(dev, 0x3C00, i - 32, gphy->tssi2dbm[i]);
531 for (i = 0; i < 64; i += 2) { 281 for (i = 0; i < 64; i += 2) {
532 value = (u16) phy->tssi2dbm[i]; 282 value = (u16) gphy->tssi2dbm[i];
533 value |= ((u16) phy->tssi2dbm[i + 1]) << 8; 283 value |= ((u16) gphy->tssi2dbm[i + 1]) << 8;
534 b43_phy_write(dev, 0x380 + (i / 2), value); 284 b43_phy_write(dev, 0x380 + (i / 2), value);
535 } 285 }
536} 286}
@@ -539,7 +289,8 @@ static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev)
539static void b43_gphy_gain_lt_init(struct b43_wldev *dev) 289static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
540{ 290{
541 struct b43_phy *phy = &dev->phy; 291 struct b43_phy *phy = &dev->phy;
542 struct b43_txpower_lo_control *lo = phy->lo_control; 292 struct b43_phy_g *gphy = phy->g;
293 struct b43_txpower_lo_control *lo = gphy->lo_control;
543 u16 nr_written = 0; 294 u16 nr_written = 0;
544 u16 tmp; 295 u16 tmp;
545 u8 rf, bb; 296 u8 rf, bb;
@@ -561,1509 +312,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
561 } 312 }
562} 313}
563 314
564static void hardware_pctl_init_aphy(struct b43_wldev *dev)
565{
566 //TODO
567}
568
569static void hardware_pctl_init_gphy(struct b43_wldev *dev)
570{
571 struct b43_phy *phy = &dev->phy;
572
573 b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
574 | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
575 b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
576 | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
577 b43_gphy_tssi_power_lt_init(dev);
578 b43_gphy_gain_lt_init(dev);
579 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
580 b43_phy_write(dev, 0x0014, 0x0000);
581
582 B43_WARN_ON(phy->rev < 6);
583 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
584 | 0x0800);
585 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
586 & 0xFEFF);
587 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
588 & 0xFFBF);
589
590 b43_gphy_dc_lt_init(dev, 1);
591}
592
593/* HardwarePowerControl init for A and G PHY */
594static void b43_hardware_pctl_init(struct b43_wldev *dev)
595{
596 struct b43_phy *phy = &dev->phy;
597
598 if (!b43_has_hardware_pctl(phy)) {
599 /* No hardware power control */
600 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
601 return;
602 }
603 /* Init the hwpctl related hardware */
604 switch (phy->type) {
605 case B43_PHYTYPE_A:
606 hardware_pctl_init_aphy(dev);
607 break;
608 case B43_PHYTYPE_G:
609 hardware_pctl_init_gphy(dev);
610 break;
611 default:
612 B43_WARN_ON(1);
613 }
614 /* Enable hardware pctl in firmware. */
615 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
616}
617
618static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
619{
620 struct b43_phy *phy = &dev->phy;
621
622 if (!b43_has_hardware_pctl(phy)) {
623 b43_phy_write(dev, 0x047A, 0xC111);
624 return;
625 }
626
627 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
628 b43_phy_write(dev, 0x002F, 0x0202);
629 b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
630 b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
631 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
632 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
633 & 0xFF0F) | 0x0010);
634 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
635 | 0x8000);
636 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
637 & 0xFFC0) | 0x0010);
638 b43_phy_write(dev, 0x002E, 0xC07F);
639 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
640 | 0x0400);
641 } else {
642 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
643 | 0x0200);
644 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
645 | 0x0400);
646 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
647 & 0x7FFF);
648 b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
649 & 0xFFFE);
650 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
651 & 0xFFC0) | 0x0010);
652 b43_phy_write(dev, 0x002E, 0xC07F);
653 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
654 & 0xFF0F) | 0x0010);
655 }
656}
657
658/* Intialize B/G PHY power control
659 * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
660 */
661static void b43_phy_init_pctl(struct b43_wldev *dev)
662{
663 struct ssb_bus *bus = dev->dev->bus;
664 struct b43_phy *phy = &dev->phy;
665 struct b43_rfatt old_rfatt;
666 struct b43_bbatt old_bbatt;
667 u8 old_tx_control = 0;
668
669 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
670 (bus->boardinfo.type == SSB_BOARD_BU4306))
671 return;
672
673 b43_phy_write(dev, 0x0028, 0x8018);
674
675 /* This does something with the Analog... */
676 b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
677 & 0xFFDF);
678
679 if (phy->type == B43_PHYTYPE_G && !phy->gmode)
680 return;
681 b43_hardware_pctl_early_init(dev);
682 if (phy->cur_idle_tssi == 0) {
683 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
684 b43_radio_write16(dev, 0x0076,
685 (b43_radio_read16(dev, 0x0076)
686 & 0x00F7) | 0x0084);
687 } else {
688 struct b43_rfatt rfatt;
689 struct b43_bbatt bbatt;
690
691 memcpy(&old_rfatt, &phy->rfatt, sizeof(old_rfatt));
692 memcpy(&old_bbatt, &phy->bbatt, sizeof(old_bbatt));
693 old_tx_control = phy->tx_control;
694
695 bbatt.att = 11;
696 if (phy->radio_rev == 8) {
697 rfatt.att = 15;
698 rfatt.with_padmix = 1;
699 } else {
700 rfatt.att = 9;
701 rfatt.with_padmix = 0;
702 }
703 b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
704 }
705 b43_dummy_transmission(dev);
706 phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
707 if (B43_DEBUG) {
708 /* Current-Idle-TSSI sanity check. */
709 if (abs(phy->cur_idle_tssi - phy->tgt_idle_tssi) >= 20) {
710 b43dbg(dev->wl,
711 "!WARNING! Idle-TSSI phy->cur_idle_tssi "
712 "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
713 "adjustment.\n", phy->cur_idle_tssi,
714 phy->tgt_idle_tssi);
715 phy->cur_idle_tssi = 0;
716 }
717 }
718 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
719 b43_radio_write16(dev, 0x0076,
720 b43_radio_read16(dev, 0x0076)
721 & 0xFF7B);
722 } else {
723 b43_set_txpower_g(dev, &old_bbatt,
724 &old_rfatt, old_tx_control);
725 }
726 }
727 b43_hardware_pctl_init(dev);
728 b43_shm_clear_tssi(dev);
729}
730
731static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
732{
733 int i;
734
735 if (dev->phy.rev < 3) {
736 if (enable)
737 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
738 b43_ofdmtab_write16(dev,
739 B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
740 b43_ofdmtab_write16(dev,
741 B43_OFDMTAB_WRSSI, i, 0xFFF8);
742 }
743 else
744 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
745 b43_ofdmtab_write16(dev,
746 B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
747 b43_ofdmtab_write16(dev,
748 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
749 }
750 } else {
751 if (enable)
752 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
753 b43_ofdmtab_write16(dev,
754 B43_OFDMTAB_WRSSI, i, 0x0820);
755 else
756 for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
757 b43_ofdmtab_write16(dev,
758 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
759 }
760}
761
762static void b43_phy_ww(struct b43_wldev *dev)
763{
764 u16 b, curr_s, best_s = 0xFFFF;
765 int i;
766
767 b43_phy_write(dev, B43_PHY_CRS0,
768 b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
769 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
770 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
771 b43_phy_write(dev, B43_PHY_OFDM(0x82),
772 (b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
773 b43_radio_write16(dev, 0x0009,
774 b43_radio_read16(dev, 0x0009) | 0x0080);
775 b43_radio_write16(dev, 0x0012,
776 (b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
777 b43_wa_initgains(dev);
778 b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
779 b = b43_phy_read(dev, B43_PHY_PWRDOWN);
780 b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
781 b43_radio_write16(dev, 0x0004,
782 b43_radio_read16(dev, 0x0004) | 0x0004);
783 for (i = 0x10; i <= 0x20; i++) {
784 b43_radio_write16(dev, 0x0013, i);
785 curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
786 if (!curr_s) {
787 best_s = 0x0000;
788 break;
789 } else if (curr_s >= 0x0080)
790 curr_s = 0x0100 - curr_s;
791 if (curr_s < best_s)
792 best_s = curr_s;
793 }
794 b43_phy_write(dev, B43_PHY_PWRDOWN, b);
795 b43_radio_write16(dev, 0x0004,
796 b43_radio_read16(dev, 0x0004) & 0xFFFB);
797 b43_radio_write16(dev, 0x0013, best_s);
798 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
799 b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
800 b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
801 b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
802 b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
803 b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
804 b43_phy_write(dev, B43_PHY_OFDM(0xBB),
805 (b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
806 b43_phy_write(dev, B43_PHY_OFDM61,
807 (b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
808 b43_phy_write(dev, B43_PHY_OFDM(0x13),
809 (b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
810 b43_phy_write(dev, B43_PHY_OFDM(0x14),
811 (b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
812 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
813 for (i = 0; i < 6; i++)
814 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
815 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
816 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
817 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
818 b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
819 b43_phy_write(dev, B43_PHY_CRS0,
820 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
821}
822
823/* Initialize APHY. This is also called for the GPHY in some cases. */
824static void b43_phy_inita(struct b43_wldev *dev)
825{
826 struct ssb_bus *bus = dev->dev->bus;
827 struct b43_phy *phy = &dev->phy;
828
829 might_sleep();
830
831 if (phy->rev >= 6) {
832 if (phy->type == B43_PHYTYPE_A)
833 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
834 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
835 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
836 b43_phy_write(dev, B43_PHY_ENCORE,
837 b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
838 else
839 b43_phy_write(dev, B43_PHY_ENCORE,
840 b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
841 }
842
843 b43_wa_all(dev);
844
845 if (phy->type == B43_PHYTYPE_A) {
846 if (phy->gmode && (phy->rev < 3))
847 b43_phy_write(dev, 0x0034,
848 b43_phy_read(dev, 0x0034) | 0x0001);
849 b43_phy_rssiagc(dev, 0);
850
851 b43_phy_write(dev, B43_PHY_CRS0,
852 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
853
854 b43_radio_init2060(dev);
855
856 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
857 ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
858 (bus->boardinfo.type == SSB_BOARD_BU4309))) {
859 ; //TODO: A PHY LO
860 }
861
862 if (phy->rev >= 3)
863 b43_phy_ww(dev);
864
865 hardware_pctl_init_aphy(dev);
866
867 //TODO: radar detection
868 }
869
870 if ((phy->type == B43_PHYTYPE_G) &&
871 (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
872 b43_phy_write(dev, B43_PHY_OFDM(0x6E),
873 (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
874 & 0xE000) | 0x3CF);
875 }
876}
877
878static void b43_phy_initb5(struct b43_wldev *dev)
879{
880 struct ssb_bus *bus = dev->dev->bus;
881 struct b43_phy *phy = &dev->phy;
882 u16 offset, value;
883 u8 old_channel;
884
885 if (phy->analog == 1) {
886 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
887 | 0x0050);
888 }
889 if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
890 (bus->boardinfo.type != SSB_BOARD_BU4306)) {
891 value = 0x2120;
892 for (offset = 0x00A8; offset < 0x00C7; offset++) {
893 b43_phy_write(dev, offset, value);
894 value += 0x202;
895 }
896 }
897 b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
898 | 0x0700);
899 if (phy->radio_ver == 0x2050)
900 b43_phy_write(dev, 0x0038, 0x0667);
901
902 if (phy->gmode || phy->rev >= 2) {
903 if (phy->radio_ver == 0x2050) {
904 b43_radio_write16(dev, 0x007A,
905 b43_radio_read16(dev, 0x007A)
906 | 0x0020);
907 b43_radio_write16(dev, 0x0051,
908 b43_radio_read16(dev, 0x0051)
909 | 0x0004);
910 }
911 b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
912
913 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
914 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
915
916 b43_phy_write(dev, 0x001C, 0x186A);
917
918 b43_phy_write(dev, 0x0013,
919 (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
920 b43_phy_write(dev, 0x0035,
921 (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
922 b43_phy_write(dev, 0x005D,
923 (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
924 }
925
926 if (dev->bad_frames_preempt) {
927 b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
928 b43_phy_read(dev,
929 B43_PHY_RADIO_BITFIELD) | (1 << 11));
930 }
931
932 if (phy->analog == 1) {
933 b43_phy_write(dev, 0x0026, 0xCE00);
934 b43_phy_write(dev, 0x0021, 0x3763);
935 b43_phy_write(dev, 0x0022, 0x1BC3);
936 b43_phy_write(dev, 0x0023, 0x06F9);
937 b43_phy_write(dev, 0x0024, 0x037E);
938 } else
939 b43_phy_write(dev, 0x0026, 0xCC00);
940 b43_phy_write(dev, 0x0030, 0x00C6);
941 b43_write16(dev, 0x03EC, 0x3F22);
942
943 if (phy->analog == 1)
944 b43_phy_write(dev, 0x0020, 0x3E1C);
945 else
946 b43_phy_write(dev, 0x0020, 0x301C);
947
948 if (phy->analog == 0)
949 b43_write16(dev, 0x03E4, 0x3000);
950
951 old_channel = phy->channel;
952 /* Force to channel 7, even if not supported. */
953 b43_radio_selectchannel(dev, 7, 0);
954
955 if (phy->radio_ver != 0x2050) {
956 b43_radio_write16(dev, 0x0075, 0x0080);
957 b43_radio_write16(dev, 0x0079, 0x0081);
958 }
959
960 b43_radio_write16(dev, 0x0050, 0x0020);
961 b43_radio_write16(dev, 0x0050, 0x0023);
962
963 if (phy->radio_ver == 0x2050) {
964 b43_radio_write16(dev, 0x0050, 0x0020);
965 b43_radio_write16(dev, 0x005A, 0x0070);
966 }
967
968 b43_radio_write16(dev, 0x005B, 0x007B);
969 b43_radio_write16(dev, 0x005C, 0x00B0);
970
971 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
972
973 b43_radio_selectchannel(dev, old_channel, 0);
974
975 b43_phy_write(dev, 0x0014, 0x0080);
976 b43_phy_write(dev, 0x0032, 0x00CA);
977 b43_phy_write(dev, 0x002A, 0x88A3);
978
979 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
980
981 if (phy->radio_ver == 0x2050)
982 b43_radio_write16(dev, 0x005D, 0x000D);
983
984 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
985}
986
987static void b43_phy_initb6(struct b43_wldev *dev)
988{
989 struct b43_phy *phy = &dev->phy;
990 u16 offset, val;
991 u8 old_channel;
992
993 b43_phy_write(dev, 0x003E, 0x817A);
994 b43_radio_write16(dev, 0x007A,
995 (b43_radio_read16(dev, 0x007A) | 0x0058));
996 if (phy->radio_rev == 4 || phy->radio_rev == 5) {
997 b43_radio_write16(dev, 0x51, 0x37);
998 b43_radio_write16(dev, 0x52, 0x70);
999 b43_radio_write16(dev, 0x53, 0xB3);
1000 b43_radio_write16(dev, 0x54, 0x9B);
1001 b43_radio_write16(dev, 0x5A, 0x88);
1002 b43_radio_write16(dev, 0x5B, 0x88);
1003 b43_radio_write16(dev, 0x5D, 0x88);
1004 b43_radio_write16(dev, 0x5E, 0x88);
1005 b43_radio_write16(dev, 0x7D, 0x88);
1006 b43_hf_write(dev, b43_hf_read(dev)
1007 | B43_HF_TSSIRPSMW);
1008 }
1009 B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */
1010 if (phy->radio_rev == 8) {
1011 b43_radio_write16(dev, 0x51, 0);
1012 b43_radio_write16(dev, 0x52, 0x40);
1013 b43_radio_write16(dev, 0x53, 0xB7);
1014 b43_radio_write16(dev, 0x54, 0x98);
1015 b43_radio_write16(dev, 0x5A, 0x88);
1016 b43_radio_write16(dev, 0x5B, 0x6B);
1017 b43_radio_write16(dev, 0x5C, 0x0F);
1018 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
1019 b43_radio_write16(dev, 0x5D, 0xFA);
1020 b43_radio_write16(dev, 0x5E, 0xD8);
1021 } else {
1022 b43_radio_write16(dev, 0x5D, 0xF5);
1023 b43_radio_write16(dev, 0x5E, 0xB8);
1024 }
1025 b43_radio_write16(dev, 0x0073, 0x0003);
1026 b43_radio_write16(dev, 0x007D, 0x00A8);
1027 b43_radio_write16(dev, 0x007C, 0x0001);
1028 b43_radio_write16(dev, 0x007E, 0x0008);
1029 }
1030 val = 0x1E1F;
1031 for (offset = 0x0088; offset < 0x0098; offset++) {
1032 b43_phy_write(dev, offset, val);
1033 val -= 0x0202;
1034 }
1035 val = 0x3E3F;
1036 for (offset = 0x0098; offset < 0x00A8; offset++) {
1037 b43_phy_write(dev, offset, val);
1038 val -= 0x0202;
1039 }
1040 val = 0x2120;
1041 for (offset = 0x00A8; offset < 0x00C8; offset++) {
1042 b43_phy_write(dev, offset, (val & 0x3F3F));
1043 val += 0x0202;
1044 }
1045 if (phy->type == B43_PHYTYPE_G) {
1046 b43_radio_write16(dev, 0x007A,
1047 b43_radio_read16(dev, 0x007A) | 0x0020);
1048 b43_radio_write16(dev, 0x0051,
1049 b43_radio_read16(dev, 0x0051) | 0x0004);
1050 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1051 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1052 b43_phy_write(dev, 0x5B, 0);
1053 b43_phy_write(dev, 0x5C, 0);
1054 }
1055
1056 old_channel = phy->channel;
1057 if (old_channel >= 8)
1058 b43_radio_selectchannel(dev, 1, 0);
1059 else
1060 b43_radio_selectchannel(dev, 13, 0);
1061
1062 b43_radio_write16(dev, 0x0050, 0x0020);
1063 b43_radio_write16(dev, 0x0050, 0x0023);
1064 udelay(40);
1065 if (phy->radio_rev < 6 || phy->radio_rev == 8) {
1066 b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
1067 | 0x0002));
1068 b43_radio_write16(dev, 0x50, 0x20);
1069 }
1070 if (phy->radio_rev <= 2) {
1071 b43_radio_write16(dev, 0x7C, 0x20);
1072 b43_radio_write16(dev, 0x5A, 0x70);
1073 b43_radio_write16(dev, 0x5B, 0x7B);
1074 b43_radio_write16(dev, 0x5C, 0xB0);
1075 }
1076 b43_radio_write16(dev, 0x007A,
1077 (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
1078
1079 b43_radio_selectchannel(dev, old_channel, 0);
1080
1081 b43_phy_write(dev, 0x0014, 0x0200);
1082 if (phy->radio_rev >= 6)
1083 b43_phy_write(dev, 0x2A, 0x88C2);
1084 else
1085 b43_phy_write(dev, 0x2A, 0x8AC0);
1086 b43_phy_write(dev, 0x0038, 0x0668);
1087 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
1088 if (phy->radio_rev <= 5) {
1089 b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
1090 & 0xFF80) | 0x0003);
1091 }
1092 if (phy->radio_rev <= 2)
1093 b43_radio_write16(dev, 0x005D, 0x000D);
1094
1095 if (phy->analog == 4) {
1096 b43_write16(dev, 0x3E4, 9);
1097 b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
1098 & 0x0FFF);
1099 } else {
1100 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1101 | 0x0004);
1102 }
1103 if (phy->type == B43_PHYTYPE_B)
1104 B43_WARN_ON(1);
1105 else if (phy->type == B43_PHYTYPE_G)
1106 b43_write16(dev, 0x03E6, 0x0);
1107}
1108
1109static void b43_calc_loopback_gain(struct b43_wldev *dev)
1110{
1111 struct b43_phy *phy = &dev->phy;
1112 u16 backup_phy[16] = { 0 };
1113 u16 backup_radio[3];
1114 u16 backup_bband;
1115 u16 i, j, loop_i_max;
1116 u16 trsw_rx;
1117 u16 loop1_outer_done, loop1_inner_done;
1118
1119 backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
1120 backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
1121 backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
1122 backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
1123 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1124 backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
1125 backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
1126 }
1127 backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
1128 backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
1129 backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
1130 backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
1131 backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
1132 backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
1133 backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
1134 backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
1135 backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
1136 backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
1137 backup_bband = phy->bbatt.att;
1138 backup_radio[0] = b43_radio_read16(dev, 0x52);
1139 backup_radio[1] = b43_radio_read16(dev, 0x43);
1140 backup_radio[2] = b43_radio_read16(dev, 0x7A);
1141
1142 b43_phy_write(dev, B43_PHY_CRS0,
1143 b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
1144 b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
1145 b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
1146 b43_phy_write(dev, B43_PHY_RFOVER,
1147 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
1148 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1149 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
1150 b43_phy_write(dev, B43_PHY_RFOVER,
1151 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
1152 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1153 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
1154 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1155 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1156 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
1157 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1158 b43_phy_read(dev,
1159 B43_PHY_ANALOGOVERVAL) & 0xFFFE);
1160 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1161 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
1162 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1163 b43_phy_read(dev,
1164 B43_PHY_ANALOGOVERVAL) & 0xFFFD);
1165 }
1166 b43_phy_write(dev, B43_PHY_RFOVER,
1167 b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
1168 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1169 b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
1170 b43_phy_write(dev, B43_PHY_RFOVER,
1171 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
1172 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1173 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1174 & 0xFFCF) | 0x10);
1175
1176 b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
1177 b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
1178 b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
1179
1180 b43_phy_write(dev, B43_PHY_CCK(0x0A),
1181 b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
1182 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1183 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1184 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
1185 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1186 b43_phy_read(dev,
1187 B43_PHY_ANALOGOVERVAL) & 0xFFFB);
1188 }
1189 b43_phy_write(dev, B43_PHY_CCK(0x03),
1190 (b43_phy_read(dev, B43_PHY_CCK(0x03))
1191 & 0xFF9F) | 0x40);
1192
1193 if (phy->radio_rev == 8) {
1194 b43_radio_write16(dev, 0x43, 0x000F);
1195 } else {
1196 b43_radio_write16(dev, 0x52, 0);
1197 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
1198 & 0xFFF0) | 0x9);
1199 }
1200 b43_phy_set_baseband_attenuation(dev, 11);
1201
1202 if (phy->rev >= 3)
1203 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
1204 else
1205 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
1206 b43_phy_write(dev, B43_PHY_LO_CTL, 0);
1207
1208 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1209 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1210 & 0xFFC0) | 0x01);
1211 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1212 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1213 & 0xC0FF) | 0x800);
1214
1215 b43_phy_write(dev, B43_PHY_RFOVER,
1216 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
1217 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1218 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
1219
1220 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
1221 if (phy->rev >= 7) {
1222 b43_phy_write(dev, B43_PHY_RFOVER,
1223 b43_phy_read(dev, B43_PHY_RFOVER)
1224 | 0x0800);
1225 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1226 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1227 | 0x8000);
1228 }
1229 }
1230 b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
1231 & 0x00F7);
1232
1233 j = 0;
1234 loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
1235 for (i = 0; i < loop_i_max; i++) {
1236 for (j = 0; j < 16; j++) {
1237 b43_radio_write16(dev, 0x43, i);
1238 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1239 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1240 & 0xF0FF) | (j << 8));
1241 b43_phy_write(dev, B43_PHY_PGACTL,
1242 (b43_phy_read(dev, B43_PHY_PGACTL)
1243 & 0x0FFF) | 0xA000);
1244 b43_phy_write(dev, B43_PHY_PGACTL,
1245 b43_phy_read(dev, B43_PHY_PGACTL)
1246 | 0xF000);
1247 udelay(20);
1248 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1249 goto exit_loop1;
1250 }
1251 }
1252 exit_loop1:
1253 loop1_outer_done = i;
1254 loop1_inner_done = j;
1255 if (j >= 8) {
1256 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1257 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1258 | 0x30);
1259 trsw_rx = 0x1B;
1260 for (j = j - 8; j < 16; j++) {
1261 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1262 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1263 & 0xF0FF) | (j << 8));
1264 b43_phy_write(dev, B43_PHY_PGACTL,
1265 (b43_phy_read(dev, B43_PHY_PGACTL)
1266 & 0x0FFF) | 0xA000);
1267 b43_phy_write(dev, B43_PHY_PGACTL,
1268 b43_phy_read(dev, B43_PHY_PGACTL)
1269 | 0xF000);
1270 udelay(20);
1271 trsw_rx -= 3;
1272 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1273 goto exit_loop2;
1274 }
1275 } else
1276 trsw_rx = 0x18;
1277 exit_loop2:
1278
1279 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1280 b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
1281 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
1282 }
1283 b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
1284 b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
1285 b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
1286 b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
1287 b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
1288 b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
1289 b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
1290 b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
1291 b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
1292
1293 b43_phy_set_baseband_attenuation(dev, backup_bband);
1294
1295 b43_radio_write16(dev, 0x52, backup_radio[0]);
1296 b43_radio_write16(dev, 0x43, backup_radio[1]);
1297 b43_radio_write16(dev, 0x7A, backup_radio[2]);
1298
1299 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
1300 udelay(10);
1301 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
1302 b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
1303 b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
1304 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
1305
1306 phy->max_lb_gain =
1307 ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
1308 phy->trsw_rx_gain = trsw_rx * 2;
1309}
1310
1311static void b43_phy_initg(struct b43_wldev *dev)
1312{
1313 struct b43_phy *phy = &dev->phy;
1314 u16 tmp;
1315
1316 if (phy->rev == 1)
1317 b43_phy_initb5(dev);
1318 else
1319 b43_phy_initb6(dev);
1320
1321 if (phy->rev >= 2 || phy->gmode)
1322 b43_phy_inita(dev);
1323
1324 if (phy->rev >= 2) {
1325 b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
1326 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
1327 }
1328 if (phy->rev == 2) {
1329 b43_phy_write(dev, B43_PHY_RFOVER, 0);
1330 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
1331 }
1332 if (phy->rev > 5) {
1333 b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
1334 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
1335 }
1336 if (phy->gmode || phy->rev >= 2) {
1337 tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
1338 tmp &= B43_PHYVER_VERSION;
1339 if (tmp == 3 || tmp == 5) {
1340 b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
1341 b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
1342 }
1343 if (tmp == 5) {
1344 b43_phy_write(dev, B43_PHY_OFDM(0xCC),
1345 (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
1346 & 0x00FF) | 0x1F00);
1347 }
1348 }
1349 if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
1350 b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
1351 if (phy->radio_rev == 8) {
1352 b43_phy_write(dev, B43_PHY_EXTG(0x01),
1353 b43_phy_read(dev, B43_PHY_EXTG(0x01))
1354 | 0x80);
1355 b43_phy_write(dev, B43_PHY_OFDM(0x3E),
1356 b43_phy_read(dev, B43_PHY_OFDM(0x3E))
1357 | 0x4);
1358 }
1359 if (has_loopback_gain(phy))
1360 b43_calc_loopback_gain(dev);
1361
1362 if (phy->radio_rev != 8) {
1363 if (phy->initval == 0xFFFF)
1364 phy->initval = b43_radio_init2050(dev);
1365 else
1366 b43_radio_write16(dev, 0x0078, phy->initval);
1367 }
1368 b43_lo_g_init(dev);
1369 if (has_tx_magnification(phy)) {
1370 b43_radio_write16(dev, 0x52,
1371 (b43_radio_read16(dev, 0x52) & 0xFF00)
1372 | phy->lo_control->tx_bias | phy->
1373 lo_control->tx_magn);
1374 } else {
1375 b43_radio_write16(dev, 0x52,
1376 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1377 | phy->lo_control->tx_bias);
1378 }
1379 if (phy->rev >= 6) {
1380 b43_phy_write(dev, B43_PHY_CCK(0x36),
1381 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1382 & 0x0FFF) | (phy->lo_control->
1383 tx_bias << 12));
1384 }
1385 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1386 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1387 else
1388 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1389 if (phy->rev < 2)
1390 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1391 else
1392 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1393 if (phy->gmode || phy->rev >= 2) {
1394 b43_lo_g_adjust(dev);
1395 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
1396 }
1397
1398 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
1399 /* The specs state to update the NRSSI LT with
1400 * the value 0x7FFFFFFF here. I think that is some weird
1401 * compiler optimization in the original driver.
1402 * Essentially, what we do here is resetting all NRSSI LT
1403 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1404 */
1405 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
1406 b43_calc_nrssi_threshold(dev);
1407 } else if (phy->gmode || phy->rev >= 2) {
1408 if (phy->nrssi[0] == -1000) {
1409 B43_WARN_ON(phy->nrssi[1] != -1000);
1410 b43_calc_nrssi_slope(dev);
1411 } else
1412 b43_calc_nrssi_threshold(dev);
1413 }
1414 if (phy->radio_rev == 8)
1415 b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
1416 b43_phy_init_pctl(dev);
1417 /* FIXME: The spec says in the following if, the 0 should be replaced
1418 'if OFDM may not be used in the current locale'
1419 but OFDM is legal everywhere */
1420 if ((dev->dev->bus->chip_id == 0x4306
1421 && dev->dev->bus->chip_package == 2) || 0) {
1422 b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
1423 & 0xBFFF);
1424 b43_phy_write(dev, B43_PHY_OFDM(0xC3),
1425 b43_phy_read(dev, B43_PHY_OFDM(0xC3))
1426 & 0x7FFF);
1427 }
1428}
1429
1430/* Set the baseband attenuation value on chip. */
1431void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
1432 u16 baseband_attenuation)
1433{
1434 struct b43_phy *phy = &dev->phy;
1435
1436 if (phy->analog == 0) {
1437 b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
1438 & 0xFFF0) |
1439 baseband_attenuation);
1440 } else if (phy->analog > 1) {
1441 b43_phy_write(dev, B43_PHY_DACCTL,
1442 (b43_phy_read(dev, B43_PHY_DACCTL)
1443 & 0xFFC3) | (baseband_attenuation << 2));
1444 } else {
1445 b43_phy_write(dev, B43_PHY_DACCTL,
1446 (b43_phy_read(dev, B43_PHY_DACCTL)
1447 & 0xFF87) | (baseband_attenuation << 3));
1448 }
1449}
1450
1451/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
1452 * This function converts a TSSI value to dBm in Q5.2
1453 */
1454static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
1455{
1456 struct b43_phy *phy = &dev->phy;
1457 s8 dbm = 0;
1458 s32 tmp;
1459
1460 tmp = (phy->tgt_idle_tssi - phy->cur_idle_tssi + tssi);
1461
1462 switch (phy->type) {
1463 case B43_PHYTYPE_A:
1464 tmp += 0x80;
1465 tmp = clamp_val(tmp, 0x00, 0xFF);
1466 dbm = phy->tssi2dbm[tmp];
1467 //TODO: There's a FIXME on the specs
1468 break;
1469 case B43_PHYTYPE_B:
1470 case B43_PHYTYPE_G:
1471 tmp = clamp_val(tmp, 0x00, 0x3F);
1472 dbm = phy->tssi2dbm[tmp];
1473 break;
1474 default:
1475 B43_WARN_ON(1);
1476 }
1477
1478 return dbm;
1479}
1480
1481void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
1482 int *_bbatt, int *_rfatt)
1483{
1484 int rfatt = *_rfatt;
1485 int bbatt = *_bbatt;
1486 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
1487
1488 /* Get baseband and radio attenuation values into their permitted ranges.
1489 * Radio attenuation affects power level 4 times as much as baseband. */
1490
1491 /* Range constants */
1492 const int rf_min = lo->rfatt_list.min_val;
1493 const int rf_max = lo->rfatt_list.max_val;
1494 const int bb_min = lo->bbatt_list.min_val;
1495 const int bb_max = lo->bbatt_list.max_val;
1496
1497 while (1) {
1498 if (rfatt > rf_max && bbatt > bb_max - 4)
1499 break; /* Can not get it into ranges */
1500 if (rfatt < rf_min && bbatt < bb_min + 4)
1501 break; /* Can not get it into ranges */
1502 if (bbatt > bb_max && rfatt > rf_max - 1)
1503 break; /* Can not get it into ranges */
1504 if (bbatt < bb_min && rfatt < rf_min + 1)
1505 break; /* Can not get it into ranges */
1506
1507 if (bbatt > bb_max) {
1508 bbatt -= 4;
1509 rfatt += 1;
1510 continue;
1511 }
1512 if (bbatt < bb_min) {
1513 bbatt += 4;
1514 rfatt -= 1;
1515 continue;
1516 }
1517 if (rfatt > rf_max) {
1518 rfatt -= 1;
1519 bbatt += 4;
1520 continue;
1521 }
1522 if (rfatt < rf_min) {
1523 rfatt += 1;
1524 bbatt -= 4;
1525 continue;
1526 }
1527 break;
1528 }
1529
1530 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
1531 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
1532}
1533
1534/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
1535void b43_phy_xmitpower(struct b43_wldev *dev)
1536{
1537 struct ssb_bus *bus = dev->dev->bus;
1538 struct b43_phy *phy = &dev->phy;
1539
1540 if (phy->cur_idle_tssi == 0)
1541 return;
1542 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
1543 (bus->boardinfo.type == SSB_BOARD_BU4306))
1544 return;
1545#ifdef CONFIG_B43_DEBUG
1546 if (phy->manual_txpower_control)
1547 return;
1548#endif
1549
1550 switch (phy->type) {
1551 case B43_PHYTYPE_A:{
1552
1553 //TODO: Nothing for A PHYs yet :-/
1554
1555 break;
1556 }
1557 case B43_PHYTYPE_B:
1558 case B43_PHYTYPE_G:{
1559 u16 tmp;
1560 s8 v0, v1, v2, v3;
1561 s8 average;
1562 int max_pwr;
1563 int desired_pwr, estimated_pwr, pwr_adjust;
1564 int rfatt_delta, bbatt_delta;
1565 int rfatt, bbatt;
1566 u8 tx_control;
1567
1568 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x0058);
1569 v0 = (s8) (tmp & 0x00FF);
1570 v1 = (s8) ((tmp & 0xFF00) >> 8);
1571 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x005A);
1572 v2 = (s8) (tmp & 0x00FF);
1573 v3 = (s8) ((tmp & 0xFF00) >> 8);
1574 tmp = 0;
1575
1576 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
1577 || v3 == 0x7F) {
1578 tmp =
1579 b43_shm_read16(dev, B43_SHM_SHARED, 0x0070);
1580 v0 = (s8) (tmp & 0x00FF);
1581 v1 = (s8) ((tmp & 0xFF00) >> 8);
1582 tmp =
1583 b43_shm_read16(dev, B43_SHM_SHARED, 0x0072);
1584 v2 = (s8) (tmp & 0x00FF);
1585 v3 = (s8) ((tmp & 0xFF00) >> 8);
1586 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
1587 || v3 == 0x7F)
1588 return;
1589 v0 = (v0 + 0x20) & 0x3F;
1590 v1 = (v1 + 0x20) & 0x3F;
1591 v2 = (v2 + 0x20) & 0x3F;
1592 v3 = (v3 + 0x20) & 0x3F;
1593 tmp = 1;
1594 }
1595 b43_shm_clear_tssi(dev);
1596
1597 average = (v0 + v1 + v2 + v3 + 2) / 4;
1598
1599 if (tmp
1600 && (b43_shm_read16(dev, B43_SHM_SHARED, 0x005E) &
1601 0x8))
1602 average -= 13;
1603
1604 estimated_pwr =
1605 b43_phy_estimate_power_out(dev, average);
1606
1607 max_pwr = dev->dev->bus->sprom.maxpwr_bg;
1608 if ((dev->dev->bus->sprom.boardflags_lo
1609 & B43_BFL_PACTRL) && (phy->type == B43_PHYTYPE_G))
1610 max_pwr -= 0x3;
1611 if (unlikely(max_pwr <= 0)) {
1612 b43warn(dev->wl,
1613 "Invalid max-TX-power value in SPROM.\n");
1614 max_pwr = 60; /* fake it */
1615 dev->dev->bus->sprom.maxpwr_bg = max_pwr;
1616 }
1617
1618 /*TODO:
1619 max_pwr = min(REG - dev->dev->bus->sprom.antennagain_bgphy - 0x6, max_pwr)
1620 where REG is the max power as per the regulatory domain
1621 */
1622
1623 /* Get desired power (in Q5.2) */
1624 desired_pwr = INT_TO_Q52(phy->power_level);
1625 /* And limit it. max_pwr already is Q5.2 */
1626 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
1627 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
1628 b43dbg(dev->wl,
1629 "Current TX power output: " Q52_FMT
1630 " dBm, " "Desired TX power output: "
1631 Q52_FMT " dBm\n", Q52_ARG(estimated_pwr),
1632 Q52_ARG(desired_pwr));
1633 }
1634
1635 /* Calculate the adjustment delta. */
1636 pwr_adjust = desired_pwr - estimated_pwr;
1637
1638 /* RF attenuation delta. */
1639 rfatt_delta = ((pwr_adjust + 7) / 8);
1640 /* Lower attenuation => Bigger power output. Negate it. */
1641 rfatt_delta = -rfatt_delta;
1642
1643 /* Baseband attenuation delta. */
1644 bbatt_delta = pwr_adjust / 2;
1645 /* Lower attenuation => Bigger power output. Negate it. */
1646 bbatt_delta = -bbatt_delta;
1647 /* RF att affects power level 4 times as much as
1648 * Baseband attennuation. Subtract it. */
1649 bbatt_delta -= 4 * rfatt_delta;
1650
1651 /* So do we finally need to adjust something? */
1652 if ((rfatt_delta == 0) && (bbatt_delta == 0))
1653 return;
1654
1655 /* Calculate the new attenuation values. */
1656 bbatt = phy->bbatt.att;
1657 bbatt += bbatt_delta;
1658 rfatt = phy->rfatt.att;
1659 rfatt += rfatt_delta;
1660
1661 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
1662 tx_control = phy->tx_control;
1663 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
1664 if (rfatt <= 1) {
1665 if (tx_control == 0) {
1666 tx_control =
1667 B43_TXCTL_PA2DB |
1668 B43_TXCTL_TXMIX;
1669 rfatt += 2;
1670 bbatt += 2;
1671 } else if (dev->dev->bus->sprom.
1672 boardflags_lo &
1673 B43_BFL_PACTRL) {
1674 bbatt += 4 * (rfatt - 2);
1675 rfatt = 2;
1676 }
1677 } else if (rfatt > 4 && tx_control) {
1678 tx_control = 0;
1679 if (bbatt < 3) {
1680 rfatt -= 3;
1681 bbatt += 2;
1682 } else {
1683 rfatt -= 2;
1684 bbatt -= 2;
1685 }
1686 }
1687 }
1688 /* Save the control values */
1689 phy->tx_control = tx_control;
1690 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
1691 phy->rfatt.att = rfatt;
1692 phy->bbatt.att = bbatt;
1693
1694 /* Adjust the hardware */
1695 b43_phy_lock(dev);
1696 b43_radio_lock(dev);
1697 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt,
1698 phy->tx_control);
1699 b43_radio_unlock(dev);
1700 b43_phy_unlock(dev);
1701 break;
1702 }
1703 case B43_PHYTYPE_N:
1704 b43_nphy_xmitpower(dev);
1705 break;
1706 default:
1707 B43_WARN_ON(1);
1708 }
1709}
1710
1711static inline s32 b43_tssi2dbm_ad(s32 num, s32 den)
1712{
1713 if (num < 0)
1714 return num / den;
1715 else
1716 return (num + den / 2) / den;
1717}
1718
1719static inline
1720 s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2)
1721{
1722 s32 m1, m2, f = 256, q, delta;
1723 s8 i = 0;
1724
1725 m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
1726 m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
1727 do {
1728 if (i > 15)
1729 return -EINVAL;
1730 q = b43_tssi2dbm_ad(f * 4096 -
1731 b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
1732 delta = abs(q - f);
1733 f = q;
1734 i++;
1735 } while (delta >= 2);
1736 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
1737 return 0;
1738}
1739
1740/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */
1741int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev)
1742{
1743 struct b43_phy *phy = &dev->phy;
1744 s16 pab0, pab1, pab2;
1745 u8 idx;
1746 s8 *dyn_tssi2dbm;
1747
1748 if (phy->type == B43_PHYTYPE_A) {
1749 pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
1750 pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
1751 pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
1752 } else {
1753 pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
1754 pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
1755 pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
1756 }
1757
1758 if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) {
1759 phy->tgt_idle_tssi = 0x34;
1760 phy->tssi2dbm = b43_tssi2dbm_b_table;
1761 return 0;
1762 }
1763
1764 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
1765 pab0 != -1 && pab1 != -1 && pab2 != -1) {
1766 /* The pabX values are set in SPROM. Use them. */
1767 if (phy->type == B43_PHYTYPE_A) {
1768 if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
1769 (s8) dev->dev->bus->sprom.itssi_a != -1)
1770 phy->tgt_idle_tssi =
1771 (s8) (dev->dev->bus->sprom.itssi_a);
1772 else
1773 phy->tgt_idle_tssi = 62;
1774 } else {
1775 if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
1776 (s8) dev->dev->bus->sprom.itssi_bg != -1)
1777 phy->tgt_idle_tssi =
1778 (s8) (dev->dev->bus->sprom.itssi_bg);
1779 else
1780 phy->tgt_idle_tssi = 62;
1781 }
1782 dyn_tssi2dbm = kmalloc(64, GFP_KERNEL);
1783 if (dyn_tssi2dbm == NULL) {
1784 b43err(dev->wl, "Could not allocate memory "
1785 "for tssi2dbm table\n");
1786 return -ENOMEM;
1787 }
1788 for (idx = 0; idx < 64; idx++)
1789 if (b43_tssi2dbm_entry
1790 (dyn_tssi2dbm, idx, pab0, pab1, pab2)) {
1791 phy->tssi2dbm = NULL;
1792 b43err(dev->wl, "Could not generate "
1793 "tssi2dBm table\n");
1794 kfree(dyn_tssi2dbm);
1795 return -ENODEV;
1796 }
1797 phy->tssi2dbm = dyn_tssi2dbm;
1798 phy->dyn_tssi_tbl = 1;
1799 } else {
1800 /* pabX values not set in SPROM. */
1801 switch (phy->type) {
1802 case B43_PHYTYPE_A:
1803 /* APHY needs a generated table. */
1804 phy->tssi2dbm = NULL;
1805 b43err(dev->wl, "Could not generate tssi2dBm "
1806 "table (wrong SPROM info)!\n");
1807 return -ENODEV;
1808 case B43_PHYTYPE_B:
1809 phy->tgt_idle_tssi = 0x34;
1810 phy->tssi2dbm = b43_tssi2dbm_b_table;
1811 break;
1812 case B43_PHYTYPE_G:
1813 phy->tgt_idle_tssi = 0x34;
1814 phy->tssi2dbm = b43_tssi2dbm_g_table;
1815 break;
1816 }
1817 }
1818
1819 return 0;
1820}
1821
1822int b43_phy_init(struct b43_wldev *dev)
1823{
1824 struct b43_phy *phy = &dev->phy;
1825 bool unsupported = 0;
1826 int err = 0;
1827
1828 switch (phy->type) {
1829 case B43_PHYTYPE_A:
1830 if (phy->rev == 2 || phy->rev == 3)
1831 b43_phy_inita(dev);
1832 else
1833 unsupported = 1;
1834 break;
1835 case B43_PHYTYPE_G:
1836 b43_phy_initg(dev);
1837 break;
1838 case B43_PHYTYPE_N:
1839 err = b43_phy_initn(dev);
1840 break;
1841 default:
1842 unsupported = 1;
1843 }
1844 if (unsupported)
1845 b43err(dev->wl, "Unknown PHYTYPE found\n");
1846
1847 return err;
1848}
1849
1850void b43_set_rx_antenna(struct b43_wldev *dev, int antenna)
1851{
1852 struct b43_phy *phy = &dev->phy;
1853 u64 hf;
1854 u16 tmp;
1855 int autodiv = 0;
1856
1857 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
1858 autodiv = 1;
1859
1860 hf = b43_hf_read(dev);
1861 hf &= ~B43_HF_ANTDIVHELP;
1862 b43_hf_write(dev, hf);
1863
1864 switch (phy->type) {
1865 case B43_PHYTYPE_A:
1866 case B43_PHYTYPE_G:
1867 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
1868 tmp &= ~B43_PHY_BBANDCFG_RXANT;
1869 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
1870 << B43_PHY_BBANDCFG_RXANT_SHIFT;
1871 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
1872
1873 if (autodiv) {
1874 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
1875 if (antenna == B43_ANTENNA_AUTO0)
1876 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
1877 else
1878 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
1879 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
1880 }
1881 if (phy->type == B43_PHYTYPE_G) {
1882 tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
1883 if (autodiv)
1884 tmp |= B43_PHY_ANTWRSETT_ARXDIV;
1885 else
1886 tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
1887 b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
1888 if (phy->rev >= 2) {
1889 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
1890 tmp |= B43_PHY_OFDM61_10;
1891 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
1892
1893 tmp =
1894 b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
1895 tmp = (tmp & 0xFF00) | 0x15;
1896 b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
1897 tmp);
1898
1899 if (phy->rev == 2) {
1900 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1901 8);
1902 } else {
1903 tmp =
1904 b43_phy_read(dev,
1905 B43_PHY_ADIVRELATED);
1906 tmp = (tmp & 0xFF00) | 8;
1907 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1908 tmp);
1909 }
1910 }
1911 if (phy->rev >= 6)
1912 b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
1913 } else {
1914 if (phy->rev < 3) {
1915 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
1916 tmp = (tmp & 0xFF00) | 0x24;
1917 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
1918 } else {
1919 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
1920 tmp |= 0x10;
1921 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
1922 if (phy->analog == 3) {
1923 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
1924 0x1D);
1925 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1926 8);
1927 } else {
1928 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
1929 0x3A);
1930 tmp =
1931 b43_phy_read(dev,
1932 B43_PHY_ADIVRELATED);
1933 tmp = (tmp & 0xFF00) | 8;
1934 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1935 tmp);
1936 }
1937 }
1938 }
1939 break;
1940 case B43_PHYTYPE_B:
1941 tmp = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
1942 tmp &= ~B43_PHY_BBANDCFG_RXANT;
1943 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
1944 << B43_PHY_BBANDCFG_RXANT_SHIFT;
1945 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, tmp);
1946 break;
1947 case B43_PHYTYPE_N:
1948 b43_nphy_set_rxantenna(dev, antenna);
1949 break;
1950 default:
1951 B43_WARN_ON(1);
1952 }
1953
1954 hf |= B43_HF_ANTDIVHELP;
1955 b43_hf_write(dev, hf);
1956}
1957
1958/* Get the freq, as it has to be written to the device. */
1959static inline u16 channel2freq_bg(u8 channel)
1960{
1961 B43_WARN_ON(!(channel >= 1 && channel <= 14));
1962
1963 return b43_radio_channel_codes_bg[channel - 1];
1964}
1965
1966/* Get the freq, as it has to be written to the device. */
1967static inline u16 channel2freq_a(u8 channel)
1968{
1969 B43_WARN_ON(channel > 200);
1970
1971 return (5000 + 5 * channel);
1972}
1973
1974void b43_radio_lock(struct b43_wldev *dev)
1975{
1976 u32 macctl;
1977
1978 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1979 B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
1980 macctl |= B43_MACCTL_RADIOLOCK;
1981 b43_write32(dev, B43_MMIO_MACCTL, macctl);
1982 /* Commit the write and wait for the device
1983 * to exit any radio register access. */
1984 b43_read32(dev, B43_MMIO_MACCTL);
1985 udelay(10);
1986}
1987
1988void b43_radio_unlock(struct b43_wldev *dev)
1989{
1990 u32 macctl;
1991
1992 /* Commit any write */
1993 b43_read16(dev, B43_MMIO_PHY_VER);
1994 /* unlock */
1995 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1996 B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
1997 macctl &= ~B43_MACCTL_RADIOLOCK;
1998 b43_write32(dev, B43_MMIO_MACCTL, macctl);
1999}
2000
2001u16 b43_radio_read16(struct b43_wldev *dev, u16 offset)
2002{
2003 struct b43_phy *phy = &dev->phy;
2004
2005 /* Offset 1 is a 32-bit register. */
2006 B43_WARN_ON(offset == 1);
2007
2008 switch (phy->type) {
2009 case B43_PHYTYPE_A:
2010 offset |= 0x40;
2011 break;
2012 case B43_PHYTYPE_B:
2013 if (phy->radio_ver == 0x2053) {
2014 if (offset < 0x70)
2015 offset += 0x80;
2016 else if (offset < 0x80)
2017 offset += 0x70;
2018 } else if (phy->radio_ver == 0x2050) {
2019 offset |= 0x80;
2020 } else
2021 B43_WARN_ON(1);
2022 break;
2023 case B43_PHYTYPE_G:
2024 offset |= 0x80;
2025 break;
2026 case B43_PHYTYPE_N:
2027 offset |= 0x100;
2028 break;
2029 case B43_PHYTYPE_LP:
2030 /* No adjustment required. */
2031 break;
2032 default:
2033 B43_WARN_ON(1);
2034 }
2035
2036 b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
2037 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
2038}
2039
2040void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val)
2041{
2042 /* Offset 1 is a 32-bit register. */
2043 B43_WARN_ON(offset == 1);
2044
2045 b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
2046 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, val);
2047}
2048
2049void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
2050{
2051 b43_radio_write16(dev, offset,
2052 b43_radio_read16(dev, offset) & mask);
2053}
2054
2055void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
2056{
2057 b43_radio_write16(dev, offset,
2058 b43_radio_read16(dev, offset) | set);
2059}
2060
2061void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
2062{
2063 b43_radio_write16(dev, offset,
2064 (b43_radio_read16(dev, offset) & mask) | set);
2065}
2066
2067static void b43_set_all_gains(struct b43_wldev *dev, 315static void b43_set_all_gains(struct b43_wldev *dev,
2068 s16 first, s16 second, s16 third) 316 s16 first, s16 second, s16 third)
2069{ 317{
@@ -2134,108 +382,10 @@ static void b43_set_original_gains(struct b43_wldev *dev)
2134 b43_dummy_transmission(dev); 382 b43_dummy_transmission(dev);
2135} 383}
2136 384
2137/* Synthetic PU workaround */
2138static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
2139{
2140 struct b43_phy *phy = &dev->phy;
2141
2142 might_sleep();
2143
2144 if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
2145 /* We do not need the workaround. */
2146 return;
2147 }
2148
2149 if (channel <= 10) {
2150 b43_write16(dev, B43_MMIO_CHANNEL,
2151 channel2freq_bg(channel + 4));
2152 } else {
2153 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
2154 }
2155 msleep(1);
2156 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
2157}
2158
2159u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel)
2160{
2161 struct b43_phy *phy = &dev->phy;
2162 u8 ret = 0;
2163 u16 saved, rssi, temp;
2164 int i, j = 0;
2165
2166 saved = b43_phy_read(dev, 0x0403);
2167 b43_radio_selectchannel(dev, channel, 0);
2168 b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
2169 if (phy->aci_hw_rssi)
2170 rssi = b43_phy_read(dev, 0x048A) & 0x3F;
2171 else
2172 rssi = saved & 0x3F;
2173 /* clamp temp to signed 5bit */
2174 if (rssi > 32)
2175 rssi -= 64;
2176 for (i = 0; i < 100; i++) {
2177 temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
2178 if (temp > 32)
2179 temp -= 64;
2180 if (temp < rssi)
2181 j++;
2182 if (j >= 20)
2183 ret = 1;
2184 }
2185 b43_phy_write(dev, 0x0403, saved);
2186
2187 return ret;
2188}
2189
2190u8 b43_radio_aci_scan(struct b43_wldev * dev)
2191{
2192 struct b43_phy *phy = &dev->phy;
2193 u8 ret[13];
2194 unsigned int channel = phy->channel;
2195 unsigned int i, j, start, end;
2196
2197 if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
2198 return 0;
2199
2200 b43_phy_lock(dev);
2201 b43_radio_lock(dev);
2202 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2203 b43_phy_write(dev, B43_PHY_G_CRS,
2204 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2205 b43_set_all_gains(dev, 3, 8, 1);
2206
2207 start = (channel - 5 > 0) ? channel - 5 : 1;
2208 end = (channel + 5 < 14) ? channel + 5 : 13;
2209
2210 for (i = start; i <= end; i++) {
2211 if (abs(channel - i) > 2)
2212 ret[i - 1] = b43_radio_aci_detect(dev, i);
2213 }
2214 b43_radio_selectchannel(dev, channel, 0);
2215 b43_phy_write(dev, 0x0802,
2216 (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
2217 b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
2218 b43_phy_write(dev, B43_PHY_G_CRS,
2219 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
2220 b43_set_original_gains(dev);
2221 for (i = 0; i < 13; i++) {
2222 if (!ret[i])
2223 continue;
2224 end = (i + 5 < 13) ? i + 5 : 13;
2225 for (j = i; j < end; j++)
2226 ret[j] = 1;
2227 }
2228 b43_radio_unlock(dev);
2229 b43_phy_unlock(dev);
2230
2231 return ret[channel - 1];
2232}
2233
2234/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 385/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
2235void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) 386void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
2236{ 387{
2237 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); 388 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
2238 mmiowb();
2239 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); 389 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
2240} 390}
2241 391
@@ -2267,17 +417,17 @@ void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
2267/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 417/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
2268void b43_nrssi_mem_update(struct b43_wldev *dev) 418void b43_nrssi_mem_update(struct b43_wldev *dev)
2269{ 419{
2270 struct b43_phy *phy = &dev->phy; 420 struct b43_phy_g *gphy = dev->phy.g;
2271 s16 i, delta; 421 s16 i, delta;
2272 s32 tmp; 422 s32 tmp;
2273 423
2274 delta = 0x1F - phy->nrssi[0]; 424 delta = 0x1F - gphy->nrssi[0];
2275 for (i = 0; i < 64; i++) { 425 for (i = 0; i < 64; i++) {
2276 tmp = (i - delta) * phy->nrssislope; 426 tmp = (i - delta) * gphy->nrssislope;
2277 tmp /= 0x10000; 427 tmp /= 0x10000;
2278 tmp += 0x3A; 428 tmp += 0x3A;
2279 tmp = clamp_val(tmp, 0, 0x3F); 429 tmp = clamp_val(tmp, 0, 0x3F);
2280 phy->nrssi_lt[i] = tmp; 430 gphy->nrssi_lt[i] = tmp;
2281 } 431 }
2282} 432}
2283 433
@@ -2442,347 +592,230 @@ static void b43_calc_nrssi_offset(struct b43_wldev *dev)
2442void b43_calc_nrssi_slope(struct b43_wldev *dev) 592void b43_calc_nrssi_slope(struct b43_wldev *dev)
2443{ 593{
2444 struct b43_phy *phy = &dev->phy; 594 struct b43_phy *phy = &dev->phy;
595 struct b43_phy_g *gphy = phy->g;
2445 u16 backup[18] = { 0 }; 596 u16 backup[18] = { 0 };
2446 u16 tmp; 597 u16 tmp;
2447 s16 nrssi0, nrssi1; 598 s16 nrssi0, nrssi1;
2448 599
2449 switch (phy->type) { 600 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2450 case B43_PHYTYPE_B:
2451 backup[0] = b43_radio_read16(dev, 0x007A);
2452 backup[1] = b43_radio_read16(dev, 0x0052);
2453 backup[2] = b43_radio_read16(dev, 0x0043);
2454 backup[3] = b43_phy_read(dev, 0x0030);
2455 backup[4] = b43_phy_read(dev, 0x0026);
2456 backup[5] = b43_phy_read(dev, 0x0015);
2457 backup[6] = b43_phy_read(dev, 0x002A);
2458 backup[7] = b43_phy_read(dev, 0x0020);
2459 backup[8] = b43_phy_read(dev, 0x005A);
2460 backup[9] = b43_phy_read(dev, 0x0059);
2461 backup[10] = b43_phy_read(dev, 0x0058);
2462 backup[11] = b43_read16(dev, 0x03E2);
2463 backup[12] = b43_read16(dev, 0x03E6);
2464 backup[13] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
2465
2466 tmp = b43_radio_read16(dev, 0x007A);
2467 tmp &= (phy->rev >= 5) ? 0x007F : 0x000F;
2468 b43_radio_write16(dev, 0x007A, tmp);
2469 b43_phy_write(dev, 0x0030, 0x00FF);
2470 b43_write16(dev, 0x03EC, 0x7F7F);
2471 b43_phy_write(dev, 0x0026, 0x0000);
2472 b43_phy_write(dev, 0x0015, b43_phy_read(dev, 0x0015) | 0x0020);
2473 b43_phy_write(dev, 0x002A, 0x08A3);
2474 b43_radio_write16(dev, 0x007A,
2475 b43_radio_read16(dev, 0x007A) | 0x0080);
2476 601
2477 nrssi0 = (s16) b43_phy_read(dev, 0x0027); 602 if (phy->radio_rev >= 9)
2478 b43_radio_write16(dev, 0x007A, 603 return;
2479 b43_radio_read16(dev, 0x007A) & 0x007F); 604 if (phy->radio_rev == 8)
2480 if (phy->rev >= 2) { 605 b43_calc_nrssi_offset(dev);
2481 b43_write16(dev, 0x03E6, 0x0040);
2482 } else if (phy->rev == 0) {
2483 b43_write16(dev, 0x03E6, 0x0122);
2484 } else {
2485 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2486 b43_read16(dev,
2487 B43_MMIO_CHANNEL_EXT) & 0x2000);
2488 }
2489 b43_phy_write(dev, 0x0020, 0x3F3F);
2490 b43_phy_write(dev, 0x0015, 0xF330);
2491 b43_radio_write16(dev, 0x005A, 0x0060);
2492 b43_radio_write16(dev, 0x0043,
2493 b43_radio_read16(dev, 0x0043) & 0x00F0);
2494 b43_phy_write(dev, 0x005A, 0x0480);
2495 b43_phy_write(dev, 0x0059, 0x0810);
2496 b43_phy_write(dev, 0x0058, 0x000D);
2497 udelay(20);
2498
2499 nrssi1 = (s16) b43_phy_read(dev, 0x0027);
2500 b43_phy_write(dev, 0x0030, backup[3]);
2501 b43_radio_write16(dev, 0x007A, backup[0]);
2502 b43_write16(dev, 0x03E2, backup[11]);
2503 b43_phy_write(dev, 0x0026, backup[4]);
2504 b43_phy_write(dev, 0x0015, backup[5]);
2505 b43_phy_write(dev, 0x002A, backup[6]);
2506 b43_synth_pu_workaround(dev, phy->channel);
2507 if (phy->rev != 0)
2508 b43_write16(dev, 0x03F4, backup[13]);
2509
2510 b43_phy_write(dev, 0x0020, backup[7]);
2511 b43_phy_write(dev, 0x005A, backup[8]);
2512 b43_phy_write(dev, 0x0059, backup[9]);
2513 b43_phy_write(dev, 0x0058, backup[10]);
2514 b43_radio_write16(dev, 0x0052, backup[1]);
2515 b43_radio_write16(dev, 0x0043, backup[2]);
2516
2517 if (nrssi0 == nrssi1)
2518 phy->nrssislope = 0x00010000;
2519 else
2520 phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2521
2522 if (nrssi0 <= -4) {
2523 phy->nrssi[0] = nrssi0;
2524 phy->nrssi[1] = nrssi1;
2525 }
2526 break;
2527 case B43_PHYTYPE_G:
2528 if (phy->radio_rev >= 9)
2529 return;
2530 if (phy->radio_rev == 8)
2531 b43_calc_nrssi_offset(dev);
2532 606
2533 b43_phy_write(dev, B43_PHY_G_CRS, 607 b43_phy_write(dev, B43_PHY_G_CRS,
2534 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); 608 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2535 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); 609 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2536 backup[7] = b43_read16(dev, 0x03E2); 610 backup[7] = b43_read16(dev, 0x03E2);
2537 b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); 611 b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000);
2538 backup[0] = b43_radio_read16(dev, 0x007A); 612 backup[0] = b43_radio_read16(dev, 0x007A);
2539 backup[1] = b43_radio_read16(dev, 0x0052); 613 backup[1] = b43_radio_read16(dev, 0x0052);
2540 backup[2] = b43_radio_read16(dev, 0x0043); 614 backup[2] = b43_radio_read16(dev, 0x0043);
2541 backup[3] = b43_phy_read(dev, 0x0015); 615 backup[3] = b43_phy_read(dev, 0x0015);
2542 backup[4] = b43_phy_read(dev, 0x005A); 616 backup[4] = b43_phy_read(dev, 0x005A);
2543 backup[5] = b43_phy_read(dev, 0x0059); 617 backup[5] = b43_phy_read(dev, 0x0059);
2544 backup[6] = b43_phy_read(dev, 0x0058); 618 backup[6] = b43_phy_read(dev, 0x0058);
2545 backup[8] = b43_read16(dev, 0x03E6); 619 backup[8] = b43_read16(dev, 0x03E6);
2546 backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); 620 backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
2547 if (phy->rev >= 3) { 621 if (phy->rev >= 3) {
2548 backup[10] = b43_phy_read(dev, 0x002E); 622 backup[10] = b43_phy_read(dev, 0x002E);
2549 backup[11] = b43_phy_read(dev, 0x002F); 623 backup[11] = b43_phy_read(dev, 0x002F);
2550 backup[12] = b43_phy_read(dev, 0x080F); 624 backup[12] = b43_phy_read(dev, 0x080F);
2551 backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); 625 backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL);
2552 backup[14] = b43_phy_read(dev, 0x0801); 626 backup[14] = b43_phy_read(dev, 0x0801);
2553 backup[15] = b43_phy_read(dev, 0x0060); 627 backup[15] = b43_phy_read(dev, 0x0060);
2554 backup[16] = b43_phy_read(dev, 0x0014); 628 backup[16] = b43_phy_read(dev, 0x0014);
2555 backup[17] = b43_phy_read(dev, 0x0478); 629 backup[17] = b43_phy_read(dev, 0x0478);
2556 b43_phy_write(dev, 0x002E, 0); 630 b43_phy_write(dev, 0x002E, 0);
2557 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); 631 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0);
2558 switch (phy->rev) { 632 switch (phy->rev) {
2559 case 4: 633 case 4:
2560 case 6: 634 case 6:
2561 case 7: 635 case 7:
2562 b43_phy_write(dev, 0x0478, 636 b43_phy_write(dev, 0x0478,
2563 b43_phy_read(dev, 0x0478) 637 b43_phy_read(dev, 0x0478)
2564 | 0x0100); 638 | 0x0100);
2565 b43_phy_write(dev, 0x0801, 639 b43_phy_write(dev, 0x0801,
2566 b43_phy_read(dev, 0x0801) 640 b43_phy_read(dev, 0x0801)
2567 | 0x0040);
2568 break;
2569 case 3:
2570 case 5:
2571 b43_phy_write(dev, 0x0801,
2572 b43_phy_read(dev, 0x0801)
2573 & 0xFFBF);
2574 break;
2575 }
2576 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
2577 | 0x0040); 641 | 0x0040);
2578 b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) 642 break;
2579 | 0x0200); 643 case 3:
2580 } 644 case 5:
2581 b43_radio_write16(dev, 0x007A, 645 b43_phy_write(dev, 0x0801,
2582 b43_radio_read16(dev, 0x007A) | 0x0070); 646 b43_phy_read(dev, 0x0801)
2583 b43_set_all_gains(dev, 0, 8, 0); 647 & 0xFFBF);
2584 b43_radio_write16(dev, 0x007A, 648 break;
2585 b43_radio_read16(dev, 0x007A) & 0x00F7);
2586 if (phy->rev >= 2) {
2587 b43_phy_write(dev, 0x0811,
2588 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2589 0x0030);
2590 b43_phy_write(dev, 0x0812,
2591 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2592 0x0010);
2593 } 649 }
2594 b43_radio_write16(dev, 0x007A, 650 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
2595 b43_radio_read16(dev, 0x007A) | 0x0080); 651 | 0x0040);
2596 udelay(20); 652 b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014)
653 | 0x0200);
654 }
655 b43_radio_write16(dev, 0x007A,
656 b43_radio_read16(dev, 0x007A) | 0x0070);
657 b43_set_all_gains(dev, 0, 8, 0);
658 b43_radio_write16(dev, 0x007A,
659 b43_radio_read16(dev, 0x007A) & 0x00F7);
660 if (phy->rev >= 2) {
661 b43_phy_write(dev, 0x0811,
662 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
663 0x0030);
664 b43_phy_write(dev, 0x0812,
665 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
666 0x0010);
667 }
668 b43_radio_write16(dev, 0x007A,
669 b43_radio_read16(dev, 0x007A) | 0x0080);
670 udelay(20);
2597 671
2598 nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); 672 nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
2599 if (nrssi0 >= 0x0020) 673 if (nrssi0 >= 0x0020)
2600 nrssi0 -= 0x0040; 674 nrssi0 -= 0x0040;
2601 675
2602 b43_radio_write16(dev, 0x007A, 676 b43_radio_write16(dev, 0x007A,
2603 b43_radio_read16(dev, 0x007A) & 0x007F); 677 b43_radio_read16(dev, 0x007A) & 0x007F);
2604 if (phy->rev >= 2) { 678 if (phy->rev >= 2) {
2605 b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) 679 b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003)
2606 & 0xFF9F) | 0x0040); 680 & 0xFF9F) | 0x0040);
2607 } 681 }
2608
2609 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2610 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2611 | 0x2000);
2612 b43_radio_write16(dev, 0x007A,
2613 b43_radio_read16(dev, 0x007A) | 0x000F);
2614 b43_phy_write(dev, 0x0015, 0xF330);
2615 if (phy->rev >= 2) {
2616 b43_phy_write(dev, 0x0812,
2617 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2618 0x0020);
2619 b43_phy_write(dev, 0x0811,
2620 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2621 0x0020);
2622 }
2623 682
2624 b43_set_all_gains(dev, 3, 0, 1); 683 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2625 if (phy->radio_rev == 8) { 684 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2626 b43_radio_write16(dev, 0x0043, 0x001F); 685 | 0x2000);
2627 } else { 686 b43_radio_write16(dev, 0x007A,
2628 tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; 687 b43_radio_read16(dev, 0x007A) | 0x000F);
2629 b43_radio_write16(dev, 0x0052, tmp | 0x0060); 688 b43_phy_write(dev, 0x0015, 0xF330);
2630 tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; 689 if (phy->rev >= 2) {
2631 b43_radio_write16(dev, 0x0043, tmp | 0x0009); 690 b43_phy_write(dev, 0x0812,
2632 } 691 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2633 b43_phy_write(dev, 0x005A, 0x0480); 692 0x0020);
2634 b43_phy_write(dev, 0x0059, 0x0810); 693 b43_phy_write(dev, 0x0811,
2635 b43_phy_write(dev, 0x0058, 0x000D); 694 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2636 udelay(20); 695 0x0020);
2637 nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); 696 }
2638 if (nrssi1 >= 0x0020)
2639 nrssi1 -= 0x0040;
2640 if (nrssi0 == nrssi1)
2641 phy->nrssislope = 0x00010000;
2642 else
2643 phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2644 if (nrssi0 >= -4) {
2645 phy->nrssi[0] = nrssi1;
2646 phy->nrssi[1] = nrssi0;
2647 }
2648 if (phy->rev >= 3) {
2649 b43_phy_write(dev, 0x002E, backup[10]);
2650 b43_phy_write(dev, 0x002F, backup[11]);
2651 b43_phy_write(dev, 0x080F, backup[12]);
2652 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
2653 }
2654 if (phy->rev >= 2) {
2655 b43_phy_write(dev, 0x0812,
2656 b43_phy_read(dev, 0x0812) & 0xFFCF);
2657 b43_phy_write(dev, 0x0811,
2658 b43_phy_read(dev, 0x0811) & 0xFFCF);
2659 }
2660 697
2661 b43_radio_write16(dev, 0x007A, backup[0]); 698 b43_set_all_gains(dev, 3, 0, 1);
2662 b43_radio_write16(dev, 0x0052, backup[1]); 699 if (phy->radio_rev == 8) {
2663 b43_radio_write16(dev, 0x0043, backup[2]); 700 b43_radio_write16(dev, 0x0043, 0x001F);
2664 b43_write16(dev, 0x03E2, backup[7]); 701 } else {
2665 b43_write16(dev, 0x03E6, backup[8]); 702 tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F;
2666 b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); 703 b43_radio_write16(dev, 0x0052, tmp | 0x0060);
2667 b43_phy_write(dev, 0x0015, backup[3]); 704 tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0;
2668 b43_phy_write(dev, 0x005A, backup[4]); 705 b43_radio_write16(dev, 0x0043, tmp | 0x0009);
2669 b43_phy_write(dev, 0x0059, backup[5]); 706 }
2670 b43_phy_write(dev, 0x0058, backup[6]); 707 b43_phy_write(dev, 0x005A, 0x0480);
2671 b43_synth_pu_workaround(dev, phy->channel); 708 b43_phy_write(dev, 0x0059, 0x0810);
2672 b43_phy_write(dev, 0x0802, 709 b43_phy_write(dev, 0x0058, 0x000D);
2673 b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002)); 710 udelay(20);
2674 b43_set_original_gains(dev); 711 nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
2675 b43_phy_write(dev, B43_PHY_G_CRS, 712 if (nrssi1 >= 0x0020)
2676 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); 713 nrssi1 -= 0x0040;
2677 if (phy->rev >= 3) { 714 if (nrssi0 == nrssi1)
2678 b43_phy_write(dev, 0x0801, backup[14]); 715 gphy->nrssislope = 0x00010000;
2679 b43_phy_write(dev, 0x0060, backup[15]); 716 else
2680 b43_phy_write(dev, 0x0014, backup[16]); 717 gphy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2681 b43_phy_write(dev, 0x0478, backup[17]); 718 if (nrssi0 >= -4) {
2682 } 719 gphy->nrssi[0] = nrssi1;
2683 b43_nrssi_mem_update(dev); 720 gphy->nrssi[1] = nrssi0;
2684 b43_calc_nrssi_threshold(dev); 721 }
2685 break; 722 if (phy->rev >= 3) {
2686 default: 723 b43_phy_write(dev, 0x002E, backup[10]);
2687 B43_WARN_ON(1); 724 b43_phy_write(dev, 0x002F, backup[11]);
725 b43_phy_write(dev, 0x080F, backup[12]);
726 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
2688 } 727 }
728 if (phy->rev >= 2) {
729 b43_phy_write(dev, 0x0812,
730 b43_phy_read(dev, 0x0812) & 0xFFCF);
731 b43_phy_write(dev, 0x0811,
732 b43_phy_read(dev, 0x0811) & 0xFFCF);
733 }
734
735 b43_radio_write16(dev, 0x007A, backup[0]);
736 b43_radio_write16(dev, 0x0052, backup[1]);
737 b43_radio_write16(dev, 0x0043, backup[2]);
738 b43_write16(dev, 0x03E2, backup[7]);
739 b43_write16(dev, 0x03E6, backup[8]);
740 b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]);
741 b43_phy_write(dev, 0x0015, backup[3]);
742 b43_phy_write(dev, 0x005A, backup[4]);
743 b43_phy_write(dev, 0x0059, backup[5]);
744 b43_phy_write(dev, 0x0058, backup[6]);
745 b43_synth_pu_workaround(dev, phy->channel);
746 b43_phy_write(dev, 0x0802,
747 b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002));
748 b43_set_original_gains(dev);
749 b43_phy_write(dev, B43_PHY_G_CRS,
750 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
751 if (phy->rev >= 3) {
752 b43_phy_write(dev, 0x0801, backup[14]);
753 b43_phy_write(dev, 0x0060, backup[15]);
754 b43_phy_write(dev, 0x0014, backup[16]);
755 b43_phy_write(dev, 0x0478, backup[17]);
756 }
757 b43_nrssi_mem_update(dev);
758 b43_calc_nrssi_threshold(dev);
2689} 759}
2690 760
2691void b43_calc_nrssi_threshold(struct b43_wldev *dev) 761static void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2692{ 762{
2693 struct b43_phy *phy = &dev->phy; 763 struct b43_phy *phy = &dev->phy;
2694 s32 threshold; 764 struct b43_phy_g *gphy = phy->g;
2695 s32 a, b; 765 s32 a, b;
2696 s16 tmp16; 766 s16 tmp16;
2697 u16 tmp_u16; 767 u16 tmp_u16;
2698 768
2699 switch (phy->type) { 769 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2700 case B43_PHYTYPE_B:{ 770
2701 if (phy->radio_ver != 0x2050) 771 if (!phy->gmode ||
2702 return; 772 !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
2703 if (! 773 tmp16 = b43_nrssi_hw_read(dev, 0x20);
2704 (dev->dev->bus->sprom. 774 if (tmp16 >= 0x20)
2705 boardflags_lo & B43_BFL_RSSI)) 775 tmp16 -= 0x40;
2706 return; 776 if (tmp16 < 3) {
2707 777 b43_phy_write(dev, 0x048A,
2708 if (phy->radio_rev >= 6) { 778 (b43_phy_read(dev, 0x048A)
2709 threshold = 779 & 0xF000) | 0x09EB);
2710 (phy->nrssi[1] - phy->nrssi[0]) * 32; 780 } else {
2711 threshold += 20 * (phy->nrssi[0] + 1); 781 b43_phy_write(dev, 0x048A,
2712 threshold /= 40; 782 (b43_phy_read(dev, 0x048A)
2713 } else 783 & 0xF000) | 0x0AED);
2714 threshold = phy->nrssi[1] - 5;
2715
2716 threshold = clamp_val(threshold, 0, 0x3E);
2717 b43_phy_read(dev, 0x0020); /* dummy read */
2718 b43_phy_write(dev, 0x0020,
2719 (((u16) threshold) << 8) | 0x001C);
2720
2721 if (phy->radio_rev >= 6) {
2722 b43_phy_write(dev, 0x0087, 0x0E0D);
2723 b43_phy_write(dev, 0x0086, 0x0C0B);
2724 b43_phy_write(dev, 0x0085, 0x0A09);
2725 b43_phy_write(dev, 0x0084, 0x0808);
2726 b43_phy_write(dev, 0x0083, 0x0808);
2727 b43_phy_write(dev, 0x0082, 0x0604);
2728 b43_phy_write(dev, 0x0081, 0x0302);
2729 b43_phy_write(dev, 0x0080, 0x0100);
2730 }
2731 break;
2732 } 784 }
2733 case B43_PHYTYPE_G: 785 } else {
2734 if (!phy->gmode || 786 if (gphy->interfmode == B43_INTERFMODE_NONWLAN) {
2735 !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { 787 a = 0xE;
2736 tmp16 = b43_nrssi_hw_read(dev, 0x20); 788 b = 0xA;
2737 if (tmp16 >= 0x20) 789 } else if (!gphy->aci_wlan_automatic && gphy->aci_enable) {
2738 tmp16 -= 0x40; 790 a = 0x13;
2739 if (tmp16 < 3) { 791 b = 0x12;
2740 b43_phy_write(dev, 0x048A,
2741 (b43_phy_read(dev, 0x048A)
2742 & 0xF000) | 0x09EB);
2743 } else {
2744 b43_phy_write(dev, 0x048A,
2745 (b43_phy_read(dev, 0x048A)
2746 & 0xF000) | 0x0AED);
2747 }
2748 } else { 792 } else {
2749 if (phy->interfmode == B43_INTERFMODE_NONWLAN) { 793 a = 0xE;
2750 a = 0xE; 794 b = 0x11;
2751 b = 0xA;
2752 } else if (!phy->aci_wlan_automatic && phy->aci_enable) {
2753 a = 0x13;
2754 b = 0x12;
2755 } else {
2756 a = 0xE;
2757 b = 0x11;
2758 }
2759
2760 a = a * (phy->nrssi[1] - phy->nrssi[0]);
2761 a += (phy->nrssi[0] << 6);
2762 if (a < 32)
2763 a += 31;
2764 else
2765 a += 32;
2766 a = a >> 6;
2767 a = clamp_val(a, -31, 31);
2768
2769 b = b * (phy->nrssi[1] - phy->nrssi[0]);
2770 b += (phy->nrssi[0] << 6);
2771 if (b < 32)
2772 b += 31;
2773 else
2774 b += 32;
2775 b = b >> 6;
2776 b = clamp_val(b, -31, 31);
2777
2778 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
2779 tmp_u16 |= ((u32) b & 0x0000003F);
2780 tmp_u16 |= (((u32) a & 0x0000003F) << 6);
2781 b43_phy_write(dev, 0x048A, tmp_u16);
2782 } 795 }
2783 break; 796
2784 default: 797 a = a * (gphy->nrssi[1] - gphy->nrssi[0]);
2785 B43_WARN_ON(1); 798 a += (gphy->nrssi[0] << 6);
799 if (a < 32)
800 a += 31;
801 else
802 a += 32;
803 a = a >> 6;
804 a = clamp_val(a, -31, 31);
805
806 b = b * (gphy->nrssi[1] - gphy->nrssi[0]);
807 b += (gphy->nrssi[0] << 6);
808 if (b < 32)
809 b += 31;
810 else
811 b += 32;
812 b = b >> 6;
813 b = clamp_val(b, -31, 31);
814
815 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
816 tmp_u16 |= ((u32) b & 0x0000003F);
817 tmp_u16 |= (((u32) a & 0x0000003F) << 6);
818 b43_phy_write(dev, 0x048A, tmp_u16);
2786 } 819 }
2787} 820}
2788 821
@@ -2860,9 +893,10 @@ static void
2860b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) 893b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
2861{ 894{
2862 struct b43_phy *phy = &dev->phy; 895 struct b43_phy *phy = &dev->phy;
896 struct b43_phy_g *gphy = phy->g;
2863 u16 tmp, flipped; 897 u16 tmp, flipped;
2864 size_t stackidx = 0; 898 size_t stackidx = 0;
2865 u32 *stack = phy->interfstack; 899 u32 *stack = gphy->interfstack;
2866 900
2867 switch (mode) { 901 switch (mode) {
2868 case B43_INTERFMODE_NONWLAN: 902 case B43_INTERFMODE_NONWLAN:
@@ -2928,7 +962,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
2928 if (b43_phy_read(dev, 0x0033) & 0x0800) 962 if (b43_phy_read(dev, 0x0033) & 0x0800)
2929 break; 963 break;
2930 964
2931 phy->aci_enable = 1; 965 gphy->aci_enable = 1;
2932 966
2933 phy_stacksave(B43_PHY_RADIO_BITFIELD); 967 phy_stacksave(B43_PHY_RADIO_BITFIELD);
2934 phy_stacksave(B43_PHY_G_CRS); 968 phy_stacksave(B43_PHY_G_CRS);
@@ -3064,7 +1098,8 @@ static void
3064b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) 1098b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3065{ 1099{
3066 struct b43_phy *phy = &dev->phy; 1100 struct b43_phy *phy = &dev->phy;
3067 u32 *stack = phy->interfstack; 1101 struct b43_phy_g *gphy = phy->g;
1102 u32 *stack = gphy->interfstack;
3068 1103
3069 switch (mode) { 1104 switch (mode) {
3070 case B43_INTERFMODE_NONWLAN: 1105 case B43_INTERFMODE_NONWLAN:
@@ -3103,7 +1138,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3103 if (!(b43_phy_read(dev, 0x0033) & 0x0800)) 1138 if (!(b43_phy_read(dev, 0x0033) & 0x0800))
3104 break; 1139 break;
3105 1140
3106 phy->aci_enable = 0; 1141 gphy->aci_enable = 0;
3107 1142
3108 phy_stackrestore(B43_PHY_RADIO_BITFIELD); 1143 phy_stackrestore(B43_PHY_RADIO_BITFIELD);
3109 phy_stackrestore(B43_PHY_G_CRS); 1144 phy_stackrestore(B43_PHY_G_CRS);
@@ -3153,47 +1188,6 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3153#undef ofdmtab_stacksave 1188#undef ofdmtab_stacksave
3154#undef ofdmtab_stackrestore 1189#undef ofdmtab_stackrestore
3155 1190
3156int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode)
3157{
3158 struct b43_phy *phy = &dev->phy;
3159 int currentmode;
3160
3161 if ((phy->type != B43_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode))
3162 return -ENODEV;
3163
3164 phy->aci_wlan_automatic = 0;
3165 switch (mode) {
3166 case B43_INTERFMODE_AUTOWLAN:
3167 phy->aci_wlan_automatic = 1;
3168 if (phy->aci_enable)
3169 mode = B43_INTERFMODE_MANUALWLAN;
3170 else
3171 mode = B43_INTERFMODE_NONE;
3172 break;
3173 case B43_INTERFMODE_NONE:
3174 case B43_INTERFMODE_NONWLAN:
3175 case B43_INTERFMODE_MANUALWLAN:
3176 break;
3177 default:
3178 return -EINVAL;
3179 }
3180
3181 currentmode = phy->interfmode;
3182 if (currentmode == mode)
3183 return 0;
3184 if (currentmode != B43_INTERFMODE_NONE)
3185 b43_radio_interference_mitigation_disable(dev, currentmode);
3186
3187 if (mode == B43_INTERFMODE_NONE) {
3188 phy->aci_enable = 0;
3189 phy->aci_hw_rssi = 0;
3190 } else
3191 b43_radio_interference_mitigation_enable(dev, mode);
3192 phy->interfmode = mode;
3193
3194 return 0;
3195}
3196
3197static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) 1191static u16 b43_radio_core_calibration_value(struct b43_wldev *dev)
3198{ 1192{
3199 u16 reg, index, ret; 1193 u16 reg, index, ret;
@@ -3219,13 +1213,14 @@ static u16 radio2050_rfover_val(struct b43_wldev *dev,
3219 u16 phy_register, unsigned int lpd) 1213 u16 phy_register, unsigned int lpd)
3220{ 1214{
3221 struct b43_phy *phy = &dev->phy; 1215 struct b43_phy *phy = &dev->phy;
1216 struct b43_phy_g *gphy = phy->g;
3222 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 1217 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
3223 1218
3224 if (!phy->gmode) 1219 if (!phy->gmode)
3225 return 0; 1220 return 0;
3226 1221
3227 if (has_loopback_gain(phy)) { 1222 if (has_loopback_gain(phy)) {
3228 int max_lb_gain = phy->max_lb_gain; 1223 int max_lb_gain = gphy->max_lb_gain;
3229 u16 extlna; 1224 u16 extlna;
3230 u16 i; 1225 u16 i;
3231 1226
@@ -3606,301 +1601,1682 @@ u16 b43_radio_init2050(struct b43_wldev *dev)
3606 return ret; 1601 return ret;
3607} 1602}
3608 1603
3609void b43_radio_init2060(struct b43_wldev *dev) 1604static void b43_phy_initb5(struct b43_wldev *dev)
3610{ 1605{
3611 int err; 1606 struct ssb_bus *bus = dev->dev->bus;
1607 struct b43_phy *phy = &dev->phy;
1608 struct b43_phy_g *gphy = phy->g;
1609 u16 offset, value;
1610 u8 old_channel;
3612 1611
3613 b43_radio_write16(dev, 0x0004, 0x00C0); 1612 if (phy->analog == 1) {
3614 b43_radio_write16(dev, 0x0005, 0x0008); 1613 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
3615 b43_radio_write16(dev, 0x0009, 0x0040); 1614 | 0x0050);
3616 b43_radio_write16(dev, 0x0005, 0x00AA); 1615 }
3617 b43_radio_write16(dev, 0x0032, 0x008F); 1616 if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
3618 b43_radio_write16(dev, 0x0006, 0x008F); 1617 (bus->boardinfo.type != SSB_BOARD_BU4306)) {
3619 b43_radio_write16(dev, 0x0034, 0x008F); 1618 value = 0x2120;
3620 b43_radio_write16(dev, 0x002C, 0x0007); 1619 for (offset = 0x00A8; offset < 0x00C7; offset++) {
3621 b43_radio_write16(dev, 0x0082, 0x0080); 1620 b43_phy_write(dev, offset, value);
3622 b43_radio_write16(dev, 0x0080, 0x0000); 1621 value += 0x202;
3623 b43_radio_write16(dev, 0x003F, 0x00DA); 1622 }
3624 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); 1623 }
3625 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010); 1624 b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
3626 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); 1625 | 0x0700);
3627 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); 1626 if (phy->radio_ver == 0x2050)
3628 msleep(1); /* delay 400usec */ 1627 b43_phy_write(dev, 0x0038, 0x0667);
3629
3630 b43_radio_write16(dev, 0x0081,
3631 (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
3632 msleep(1); /* delay 400usec */
3633
3634 b43_radio_write16(dev, 0x0005,
3635 (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
3636 b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
3637 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
3638 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
3639 b43_radio_write16(dev, 0x0081,
3640 (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
3641 b43_radio_write16(dev, 0x0005,
3642 (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
3643 b43_phy_write(dev, 0x0063, 0xDDC6);
3644 b43_phy_write(dev, 0x0069, 0x07BE);
3645 b43_phy_write(dev, 0x006A, 0x0000);
3646
3647 err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_A, 0);
3648 B43_WARN_ON(err);
3649 1628
3650 msleep(1); 1629 if (phy->gmode || phy->rev >= 2) {
1630 if (phy->radio_ver == 0x2050) {
1631 b43_radio_write16(dev, 0x007A,
1632 b43_radio_read16(dev, 0x007A)
1633 | 0x0020);
1634 b43_radio_write16(dev, 0x0051,
1635 b43_radio_read16(dev, 0x0051)
1636 | 0x0004);
1637 }
1638 b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
1639
1640 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1641 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1642
1643 b43_phy_write(dev, 0x001C, 0x186A);
1644
1645 b43_phy_write(dev, 0x0013,
1646 (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
1647 b43_phy_write(dev, 0x0035,
1648 (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
1649 b43_phy_write(dev, 0x005D,
1650 (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
1651 }
1652
1653 if (dev->bad_frames_preempt) {
1654 b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
1655 b43_phy_read(dev,
1656 B43_PHY_RADIO_BITFIELD) | (1 << 11));
1657 }
1658
1659 if (phy->analog == 1) {
1660 b43_phy_write(dev, 0x0026, 0xCE00);
1661 b43_phy_write(dev, 0x0021, 0x3763);
1662 b43_phy_write(dev, 0x0022, 0x1BC3);
1663 b43_phy_write(dev, 0x0023, 0x06F9);
1664 b43_phy_write(dev, 0x0024, 0x037E);
1665 } else
1666 b43_phy_write(dev, 0x0026, 0xCC00);
1667 b43_phy_write(dev, 0x0030, 0x00C6);
1668 b43_write16(dev, 0x03EC, 0x3F22);
1669
1670 if (phy->analog == 1)
1671 b43_phy_write(dev, 0x0020, 0x3E1C);
1672 else
1673 b43_phy_write(dev, 0x0020, 0x301C);
1674
1675 if (phy->analog == 0)
1676 b43_write16(dev, 0x03E4, 0x3000);
1677
1678 old_channel = phy->channel;
1679 /* Force to channel 7, even if not supported. */
1680 b43_gphy_channel_switch(dev, 7, 0);
1681
1682 if (phy->radio_ver != 0x2050) {
1683 b43_radio_write16(dev, 0x0075, 0x0080);
1684 b43_radio_write16(dev, 0x0079, 0x0081);
1685 }
1686
1687 b43_radio_write16(dev, 0x0050, 0x0020);
1688 b43_radio_write16(dev, 0x0050, 0x0023);
1689
1690 if (phy->radio_ver == 0x2050) {
1691 b43_radio_write16(dev, 0x0050, 0x0020);
1692 b43_radio_write16(dev, 0x005A, 0x0070);
1693 }
1694
1695 b43_radio_write16(dev, 0x005B, 0x007B);
1696 b43_radio_write16(dev, 0x005C, 0x00B0);
1697
1698 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
1699
1700 b43_gphy_channel_switch(dev, old_channel, 0);
1701
1702 b43_phy_write(dev, 0x0014, 0x0080);
1703 b43_phy_write(dev, 0x0032, 0x00CA);
1704 b43_phy_write(dev, 0x002A, 0x88A3);
1705
1706 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1707
1708 if (phy->radio_ver == 0x2050)
1709 b43_radio_write16(dev, 0x005D, 0x000D);
1710
1711 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
3651} 1712}
3652 1713
3653static inline u16 freq_r3A_value(u16 frequency) 1714static void b43_phy_initb6(struct b43_wldev *dev)
3654{ 1715{
3655 u16 value; 1716 struct b43_phy *phy = &dev->phy;
1717 struct b43_phy_g *gphy = phy->g;
1718 u16 offset, val;
1719 u8 old_channel;
1720
1721 b43_phy_write(dev, 0x003E, 0x817A);
1722 b43_radio_write16(dev, 0x007A,
1723 (b43_radio_read16(dev, 0x007A) | 0x0058));
1724 if (phy->radio_rev == 4 || phy->radio_rev == 5) {
1725 b43_radio_write16(dev, 0x51, 0x37);
1726 b43_radio_write16(dev, 0x52, 0x70);
1727 b43_radio_write16(dev, 0x53, 0xB3);
1728 b43_radio_write16(dev, 0x54, 0x9B);
1729 b43_radio_write16(dev, 0x5A, 0x88);
1730 b43_radio_write16(dev, 0x5B, 0x88);
1731 b43_radio_write16(dev, 0x5D, 0x88);
1732 b43_radio_write16(dev, 0x5E, 0x88);
1733 b43_radio_write16(dev, 0x7D, 0x88);
1734 b43_hf_write(dev, b43_hf_read(dev)
1735 | B43_HF_TSSIRPSMW);
1736 }
1737 B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */
1738 if (phy->radio_rev == 8) {
1739 b43_radio_write16(dev, 0x51, 0);
1740 b43_radio_write16(dev, 0x52, 0x40);
1741 b43_radio_write16(dev, 0x53, 0xB7);
1742 b43_radio_write16(dev, 0x54, 0x98);
1743 b43_radio_write16(dev, 0x5A, 0x88);
1744 b43_radio_write16(dev, 0x5B, 0x6B);
1745 b43_radio_write16(dev, 0x5C, 0x0F);
1746 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
1747 b43_radio_write16(dev, 0x5D, 0xFA);
1748 b43_radio_write16(dev, 0x5E, 0xD8);
1749 } else {
1750 b43_radio_write16(dev, 0x5D, 0xF5);
1751 b43_radio_write16(dev, 0x5E, 0xB8);
1752 }
1753 b43_radio_write16(dev, 0x0073, 0x0003);
1754 b43_radio_write16(dev, 0x007D, 0x00A8);
1755 b43_radio_write16(dev, 0x007C, 0x0001);
1756 b43_radio_write16(dev, 0x007E, 0x0008);
1757 }
1758 val = 0x1E1F;
1759 for (offset = 0x0088; offset < 0x0098; offset++) {
1760 b43_phy_write(dev, offset, val);
1761 val -= 0x0202;
1762 }
1763 val = 0x3E3F;
1764 for (offset = 0x0098; offset < 0x00A8; offset++) {
1765 b43_phy_write(dev, offset, val);
1766 val -= 0x0202;
1767 }
1768 val = 0x2120;
1769 for (offset = 0x00A8; offset < 0x00C8; offset++) {
1770 b43_phy_write(dev, offset, (val & 0x3F3F));
1771 val += 0x0202;
1772 }
1773 if (phy->type == B43_PHYTYPE_G) {
1774 b43_radio_write16(dev, 0x007A,
1775 b43_radio_read16(dev, 0x007A) | 0x0020);
1776 b43_radio_write16(dev, 0x0051,
1777 b43_radio_read16(dev, 0x0051) | 0x0004);
1778 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1779 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1780 b43_phy_write(dev, 0x5B, 0);
1781 b43_phy_write(dev, 0x5C, 0);
1782 }
1783
1784 old_channel = phy->channel;
1785 if (old_channel >= 8)
1786 b43_gphy_channel_switch(dev, 1, 0);
1787 else
1788 b43_gphy_channel_switch(dev, 13, 0);
1789
1790 b43_radio_write16(dev, 0x0050, 0x0020);
1791 b43_radio_write16(dev, 0x0050, 0x0023);
1792 udelay(40);
1793 if (phy->radio_rev < 6 || phy->radio_rev == 8) {
1794 b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
1795 | 0x0002));
1796 b43_radio_write16(dev, 0x50, 0x20);
1797 }
1798 if (phy->radio_rev <= 2) {
1799 b43_radio_write16(dev, 0x7C, 0x20);
1800 b43_radio_write16(dev, 0x5A, 0x70);
1801 b43_radio_write16(dev, 0x5B, 0x7B);
1802 b43_radio_write16(dev, 0x5C, 0xB0);
1803 }
1804 b43_radio_write16(dev, 0x007A,
1805 (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
1806
1807 b43_gphy_channel_switch(dev, old_channel, 0);
3656 1808
3657 if (frequency < 5091) 1809 b43_phy_write(dev, 0x0014, 0x0200);
3658 value = 0x0040; 1810 if (phy->radio_rev >= 6)
3659 else if (frequency < 5321) 1811 b43_phy_write(dev, 0x2A, 0x88C2);
3660 value = 0x0000;
3661 else if (frequency < 5806)
3662 value = 0x0080;
3663 else 1812 else
3664 value = 0x0040; 1813 b43_phy_write(dev, 0x2A, 0x8AC0);
1814 b43_phy_write(dev, 0x0038, 0x0668);
1815 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1816 if (phy->radio_rev <= 5) {
1817 b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
1818 & 0xFF80) | 0x0003);
1819 }
1820 if (phy->radio_rev <= 2)
1821 b43_radio_write16(dev, 0x005D, 0x000D);
3665 1822
3666 return value; 1823 if (phy->analog == 4) {
1824 b43_write16(dev, 0x3E4, 9);
1825 b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
1826 & 0x0FFF);
1827 } else {
1828 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1829 | 0x0004);
1830 }
1831 if (phy->type == B43_PHYTYPE_B)
1832 B43_WARN_ON(1);
1833 else if (phy->type == B43_PHYTYPE_G)
1834 b43_write16(dev, 0x03E6, 0x0);
3667} 1835}
3668 1836
3669void b43_radio_set_tx_iq(struct b43_wldev *dev) 1837static void b43_calc_loopback_gain(struct b43_wldev *dev)
3670{ 1838{
3671 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; 1839 struct b43_phy *phy = &dev->phy;
3672 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; 1840 struct b43_phy_g *gphy = phy->g;
3673 u16 tmp = b43_radio_read16(dev, 0x001E); 1841 u16 backup_phy[16] = { 0 };
3674 int i, j; 1842 u16 backup_radio[3];
1843 u16 backup_bband;
1844 u16 i, j, loop_i_max;
1845 u16 trsw_rx;
1846 u16 loop1_outer_done, loop1_inner_done;
3675 1847
3676 for (i = 0; i < 5; i++) { 1848 backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
3677 for (j = 0; j < 5; j++) { 1849 backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
3678 if (tmp == (data_high[i] << 4 | data_low[j])) { 1850 backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
3679 b43_phy_write(dev, 0x0069, 1851 backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
3680 (i - j) << 8 | 0x00C0); 1852 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
3681 return; 1853 backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
1854 backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
1855 }
1856 backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
1857 backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
1858 backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
1859 backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
1860 backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
1861 backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
1862 backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
1863 backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
1864 backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
1865 backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
1866 backup_bband = gphy->bbatt.att;
1867 backup_radio[0] = b43_radio_read16(dev, 0x52);
1868 backup_radio[1] = b43_radio_read16(dev, 0x43);
1869 backup_radio[2] = b43_radio_read16(dev, 0x7A);
1870
1871 b43_phy_write(dev, B43_PHY_CRS0,
1872 b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
1873 b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
1874 b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
1875 b43_phy_write(dev, B43_PHY_RFOVER,
1876 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
1877 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1878 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
1879 b43_phy_write(dev, B43_PHY_RFOVER,
1880 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
1881 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1882 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
1883 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1884 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1885 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
1886 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1887 b43_phy_read(dev,
1888 B43_PHY_ANALOGOVERVAL) & 0xFFFE);
1889 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1890 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
1891 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1892 b43_phy_read(dev,
1893 B43_PHY_ANALOGOVERVAL) & 0xFFFD);
1894 }
1895 b43_phy_write(dev, B43_PHY_RFOVER,
1896 b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
1897 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1898 b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
1899 b43_phy_write(dev, B43_PHY_RFOVER,
1900 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
1901 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1902 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1903 & 0xFFCF) | 0x10);
1904
1905 b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
1906 b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
1907 b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
1908
1909 b43_phy_write(dev, B43_PHY_CCK(0x0A),
1910 b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
1911 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1912 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1913 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
1914 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1915 b43_phy_read(dev,
1916 B43_PHY_ANALOGOVERVAL) & 0xFFFB);
1917 }
1918 b43_phy_write(dev, B43_PHY_CCK(0x03),
1919 (b43_phy_read(dev, B43_PHY_CCK(0x03))
1920 & 0xFF9F) | 0x40);
1921
1922 if (phy->radio_rev == 8) {
1923 b43_radio_write16(dev, 0x43, 0x000F);
1924 } else {
1925 b43_radio_write16(dev, 0x52, 0);
1926 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
1927 & 0xFFF0) | 0x9);
1928 }
1929 b43_gphy_set_baseband_attenuation(dev, 11);
1930
1931 if (phy->rev >= 3)
1932 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
1933 else
1934 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
1935 b43_phy_write(dev, B43_PHY_LO_CTL, 0);
1936
1937 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1938 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1939 & 0xFFC0) | 0x01);
1940 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1941 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1942 & 0xC0FF) | 0x800);
1943
1944 b43_phy_write(dev, B43_PHY_RFOVER,
1945 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
1946 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1947 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
1948
1949 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
1950 if (phy->rev >= 7) {
1951 b43_phy_write(dev, B43_PHY_RFOVER,
1952 b43_phy_read(dev, B43_PHY_RFOVER)
1953 | 0x0800);
1954 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1955 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1956 | 0x8000);
1957 }
1958 }
1959 b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
1960 & 0x00F7);
1961
1962 j = 0;
1963 loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
1964 for (i = 0; i < loop_i_max; i++) {
1965 for (j = 0; j < 16; j++) {
1966 b43_radio_write16(dev, 0x43, i);
1967 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1968 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1969 & 0xF0FF) | (j << 8));
1970 b43_phy_write(dev, B43_PHY_PGACTL,
1971 (b43_phy_read(dev, B43_PHY_PGACTL)
1972 & 0x0FFF) | 0xA000);
1973 b43_phy_write(dev, B43_PHY_PGACTL,
1974 b43_phy_read(dev, B43_PHY_PGACTL)
1975 | 0xF000);
1976 udelay(20);
1977 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1978 goto exit_loop1;
1979 }
1980 }
1981 exit_loop1:
1982 loop1_outer_done = i;
1983 loop1_inner_done = j;
1984 if (j >= 8) {
1985 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1986 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1987 | 0x30);
1988 trsw_rx = 0x1B;
1989 for (j = j - 8; j < 16; j++) {
1990 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1991 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1992 & 0xF0FF) | (j << 8));
1993 b43_phy_write(dev, B43_PHY_PGACTL,
1994 (b43_phy_read(dev, B43_PHY_PGACTL)
1995 & 0x0FFF) | 0xA000);
1996 b43_phy_write(dev, B43_PHY_PGACTL,
1997 b43_phy_read(dev, B43_PHY_PGACTL)
1998 | 0xF000);
1999 udelay(20);
2000 trsw_rx -= 3;
2001 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
2002 goto exit_loop2;
2003 }
2004 } else
2005 trsw_rx = 0x18;
2006 exit_loop2:
2007
2008 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
2009 b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
2010 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
2011 }
2012 b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
2013 b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
2014 b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
2015 b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
2016 b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
2017 b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
2018 b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
2019 b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
2020 b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
2021
2022 b43_gphy_set_baseband_attenuation(dev, backup_bband);
2023
2024 b43_radio_write16(dev, 0x52, backup_radio[0]);
2025 b43_radio_write16(dev, 0x43, backup_radio[1]);
2026 b43_radio_write16(dev, 0x7A, backup_radio[2]);
2027
2028 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
2029 udelay(10);
2030 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
2031 b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
2032 b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
2033 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
2034
2035 gphy->max_lb_gain =
2036 ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
2037 gphy->trsw_rx_gain = trsw_rx * 2;
2038}
2039
2040static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
2041{
2042 struct b43_phy *phy = &dev->phy;
2043
2044 if (!b43_has_hardware_pctl(dev)) {
2045 b43_phy_write(dev, 0x047A, 0xC111);
2046 return;
2047 }
2048
2049 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
2050 b43_phy_write(dev, 0x002F, 0x0202);
2051 b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
2052 b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
2053 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
2054 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
2055 & 0xFF0F) | 0x0010);
2056 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
2057 | 0x8000);
2058 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
2059 & 0xFFC0) | 0x0010);
2060 b43_phy_write(dev, 0x002E, 0xC07F);
2061 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2062 | 0x0400);
2063 } else {
2064 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2065 | 0x0200);
2066 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2067 | 0x0400);
2068 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
2069 & 0x7FFF);
2070 b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
2071 & 0xFFFE);
2072 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
2073 & 0xFFC0) | 0x0010);
2074 b43_phy_write(dev, 0x002E, 0xC07F);
2075 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
2076 & 0xFF0F) | 0x0010);
2077 }
2078}
2079
2080/* Hardware power control for G-PHY */
2081static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
2082{
2083 struct b43_phy *phy = &dev->phy;
2084 struct b43_phy_g *gphy = phy->g;
2085
2086 if (!b43_has_hardware_pctl(dev)) {
2087 /* No hardware power control */
2088 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
2089 return;
2090 }
2091
2092 b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
2093 | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
2094 b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
2095 | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
2096 b43_gphy_tssi_power_lt_init(dev);
2097 b43_gphy_gain_lt_init(dev);
2098 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
2099 b43_phy_write(dev, 0x0014, 0x0000);
2100
2101 B43_WARN_ON(phy->rev < 6);
2102 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
2103 | 0x0800);
2104 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
2105 & 0xFEFF);
2106 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
2107 & 0xFFBF);
2108
2109 b43_gphy_dc_lt_init(dev, 1);
2110
2111 /* Enable hardware pctl in firmware. */
2112 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
2113}
2114
2115/* Intialize B/G PHY power control */
2116static void b43_phy_init_pctl(struct b43_wldev *dev)
2117{
2118 struct ssb_bus *bus = dev->dev->bus;
2119 struct b43_phy *phy = &dev->phy;
2120 struct b43_phy_g *gphy = phy->g;
2121 struct b43_rfatt old_rfatt;
2122 struct b43_bbatt old_bbatt;
2123 u8 old_tx_control = 0;
2124
2125 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2126
2127 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
2128 (bus->boardinfo.type == SSB_BOARD_BU4306))
2129 return;
2130
2131 b43_phy_write(dev, 0x0028, 0x8018);
2132
2133 /* This does something with the Analog... */
2134 b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
2135 & 0xFFDF);
2136
2137 if (!phy->gmode)
2138 return;
2139 b43_hardware_pctl_early_init(dev);
2140 if (gphy->cur_idle_tssi == 0) {
2141 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
2142 b43_radio_write16(dev, 0x0076,
2143 (b43_radio_read16(dev, 0x0076)
2144 & 0x00F7) | 0x0084);
2145 } else {
2146 struct b43_rfatt rfatt;
2147 struct b43_bbatt bbatt;
2148
2149 memcpy(&old_rfatt, &gphy->rfatt, sizeof(old_rfatt));
2150 memcpy(&old_bbatt, &gphy->bbatt, sizeof(old_bbatt));
2151 old_tx_control = gphy->tx_control;
2152
2153 bbatt.att = 11;
2154 if (phy->radio_rev == 8) {
2155 rfatt.att = 15;
2156 rfatt.with_padmix = 1;
2157 } else {
2158 rfatt.att = 9;
2159 rfatt.with_padmix = 0;
3682 } 2160 }
2161 b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
2162 }
2163 b43_dummy_transmission(dev);
2164 gphy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
2165 if (B43_DEBUG) {
2166 /* Current-Idle-TSSI sanity check. */
2167 if (abs(gphy->cur_idle_tssi - gphy->tgt_idle_tssi) >= 20) {
2168 b43dbg(dev->wl,
2169 "!WARNING! Idle-TSSI phy->cur_idle_tssi "
2170 "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
2171 "adjustment.\n", gphy->cur_idle_tssi,
2172 gphy->tgt_idle_tssi);
2173 gphy->cur_idle_tssi = 0;
2174 }
2175 }
2176 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
2177 b43_radio_write16(dev, 0x0076,
2178 b43_radio_read16(dev, 0x0076)
2179 & 0xFF7B);
2180 } else {
2181 b43_set_txpower_g(dev, &old_bbatt,
2182 &old_rfatt, old_tx_control);
3683 } 2183 }
3684 } 2184 }
2185 b43_hardware_pctl_init_gphy(dev);
2186 b43_shm_clear_tssi(dev);
3685} 2187}
3686 2188
3687int b43_radio_selectchannel(struct b43_wldev *dev, 2189static void b43_phy_initg(struct b43_wldev *dev)
3688 u8 channel, int synthetic_pu_workaround)
3689{ 2190{
3690 struct b43_phy *phy = &dev->phy; 2191 struct b43_phy *phy = &dev->phy;
3691 u16 r8, tmp; 2192 struct b43_phy_g *gphy = phy->g;
3692 u16 freq; 2193 u16 tmp;
3693 u16 channelcookie, savedcookie; 2194
3694 int err = 0; 2195 if (phy->rev == 1)
3695 2196 b43_phy_initb5(dev);
3696 if (channel == 0xFF) { 2197 else
3697 switch (phy->type) { 2198 b43_phy_initb6(dev);
3698 case B43_PHYTYPE_A: 2199
3699 channel = B43_DEFAULT_CHANNEL_A; 2200 if (phy->rev >= 2 || phy->gmode)
3700 break; 2201 b43_phy_inita(dev);
3701 case B43_PHYTYPE_B: 2202
3702 case B43_PHYTYPE_G: 2203 if (phy->rev >= 2) {
3703 channel = B43_DEFAULT_CHANNEL_BG; 2204 b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
3704 break; 2205 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
3705 case B43_PHYTYPE_N: 2206 }
3706 //FIXME check if we are on 2.4GHz or 5GHz and set a default channel. 2207 if (phy->rev == 2) {
3707 channel = 1; 2208 b43_phy_write(dev, B43_PHY_RFOVER, 0);
3708 break; 2209 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
3709 default: 2210 }
3710 B43_WARN_ON(1); 2211 if (phy->rev > 5) {
2212 b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
2213 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
2214 }
2215 if (phy->gmode || phy->rev >= 2) {
2216 tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
2217 tmp &= B43_PHYVER_VERSION;
2218 if (tmp == 3 || tmp == 5) {
2219 b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
2220 b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
3711 } 2221 }
2222 if (tmp == 5) {
2223 b43_phy_write(dev, B43_PHY_OFDM(0xCC),
2224 (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
2225 & 0x00FF) | 0x1F00);
2226 }
2227 }
2228 if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
2229 b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
2230 if (phy->radio_rev == 8) {
2231 b43_phy_write(dev, B43_PHY_EXTG(0x01),
2232 b43_phy_read(dev, B43_PHY_EXTG(0x01))
2233 | 0x80);
2234 b43_phy_write(dev, B43_PHY_OFDM(0x3E),
2235 b43_phy_read(dev, B43_PHY_OFDM(0x3E))
2236 | 0x4);
2237 }
2238 if (has_loopback_gain(phy))
2239 b43_calc_loopback_gain(dev);
2240
2241 if (phy->radio_rev != 8) {
2242 if (gphy->initval == 0xFFFF)
2243 gphy->initval = b43_radio_init2050(dev);
2244 else
2245 b43_radio_write16(dev, 0x0078, gphy->initval);
2246 }
2247 b43_lo_g_init(dev);
2248 if (has_tx_magnification(phy)) {
2249 b43_radio_write16(dev, 0x52,
2250 (b43_radio_read16(dev, 0x52) & 0xFF00)
2251 | gphy->lo_control->tx_bias | gphy->
2252 lo_control->tx_magn);
2253 } else {
2254 b43_radio_write16(dev, 0x52,
2255 (b43_radio_read16(dev, 0x52) & 0xFFF0)
2256 | gphy->lo_control->tx_bias);
2257 }
2258 if (phy->rev >= 6) {
2259 b43_phy_write(dev, B43_PHY_CCK(0x36),
2260 (b43_phy_read(dev, B43_PHY_CCK(0x36))
2261 & 0x0FFF) | (gphy->lo_control->
2262 tx_bias << 12));
2263 }
2264 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
2265 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
2266 else
2267 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
2268 if (phy->rev < 2)
2269 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
2270 else
2271 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
2272 if (phy->gmode || phy->rev >= 2) {
2273 b43_lo_g_adjust(dev);
2274 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
3712 } 2275 }
3713 2276
3714 /* First we set the channel radio code to prevent the 2277 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
3715 * firmware from sending ghost packets. 2278 /* The specs state to update the NRSSI LT with
3716 */ 2279 * the value 0x7FFFFFFF here. I think that is some weird
3717 channelcookie = channel; 2280 * compiler optimization in the original driver.
3718 if (0 /*FIXME on 5Ghz */) 2281 * Essentially, what we do here is resetting all NRSSI LT
3719 channelcookie |= 0x100; 2282 * entries to -32 (see the clamp_val() in nrssi_hw_update())
3720 //FIXME set 40Mhz flag if required 2283 */
3721 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN); 2284 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
3722 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); 2285 b43_calc_nrssi_threshold(dev);
3723 2286 } else if (phy->gmode || phy->rev >= 2) {
3724 switch (phy->type) { 2287 if (gphy->nrssi[0] == -1000) {
3725 case B43_PHYTYPE_A: 2288 B43_WARN_ON(gphy->nrssi[1] != -1000);
3726 if (channel > 200) { 2289 b43_calc_nrssi_slope(dev);
3727 err = -EINVAL; 2290 } else
3728 goto out; 2291 b43_calc_nrssi_threshold(dev);
2292 }
2293 if (phy->radio_rev == 8)
2294 b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
2295 b43_phy_init_pctl(dev);
2296 /* FIXME: The spec says in the following if, the 0 should be replaced
2297 'if OFDM may not be used in the current locale'
2298 but OFDM is legal everywhere */
2299 if ((dev->dev->bus->chip_id == 0x4306
2300 && dev->dev->bus->chip_package == 2) || 0) {
2301 b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
2302 & 0xBFFF);
2303 b43_phy_write(dev, B43_PHY_OFDM(0xC3),
2304 b43_phy_read(dev, B43_PHY_OFDM(0xC3))
2305 & 0x7FFF);
2306 }
2307}
2308
2309void b43_gphy_channel_switch(struct b43_wldev *dev,
2310 unsigned int channel,
2311 bool synthetic_pu_workaround)
2312{
2313 if (synthetic_pu_workaround)
2314 b43_synth_pu_workaround(dev, channel);
2315
2316 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
2317
2318 if (channel == 14) {
2319 if (dev->dev->bus->sprom.country_code ==
2320 SSB_SPROM1CCODE_JAPAN)
2321 b43_hf_write(dev,
2322 b43_hf_read(dev) & ~B43_HF_ACPR);
2323 else
2324 b43_hf_write(dev,
2325 b43_hf_read(dev) | B43_HF_ACPR);
2326 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2327 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2328 | (1 << 11));
2329 } else {
2330 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2331 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2332 & 0xF7BF);
2333 }
2334}
2335
2336static void default_baseband_attenuation(struct b43_wldev *dev,
2337 struct b43_bbatt *bb)
2338{
2339 struct b43_phy *phy = &dev->phy;
2340
2341 if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
2342 bb->att = 0;
2343 else
2344 bb->att = 2;
2345}
2346
2347static void default_radio_attenuation(struct b43_wldev *dev,
2348 struct b43_rfatt *rf)
2349{
2350 struct ssb_bus *bus = dev->dev->bus;
2351 struct b43_phy *phy = &dev->phy;
2352
2353 rf->with_padmix = 0;
2354
2355 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
2356 bus->boardinfo.type == SSB_BOARD_BCM4309G) {
2357 if (bus->boardinfo.rev < 0x43) {
2358 rf->att = 2;
2359 return;
2360 } else if (bus->boardinfo.rev < 0x51) {
2361 rf->att = 3;
2362 return;
3729 } 2363 }
3730 freq = channel2freq_a(channel); 2364 }
3731 2365
3732 r8 = b43_radio_read16(dev, 0x0008); 2366 if (phy->type == B43_PHYTYPE_A) {
3733 b43_write16(dev, 0x03F0, freq); 2367 rf->att = 0x60;
3734 b43_radio_write16(dev, 0x0008, r8); 2368 return;
3735 2369 }
3736 //TODO: write max channel TX power? to Radio 0x2D 2370
3737 tmp = b43_radio_read16(dev, 0x002E); 2371 switch (phy->radio_ver) {
3738 tmp &= 0x0080; 2372 case 0x2053:
3739 //TODO: OR tmp with the Power out estimation for this channel? 2373 switch (phy->radio_rev) {
3740 b43_radio_write16(dev, 0x002E, tmp); 2374 case 1:
3741 2375 rf->att = 6;
3742 if (freq >= 4920 && freq <= 5500) { 2376 return;
3743 /*
3744 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
3745 * = (freq * 0.025862069
3746 */
3747 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
3748 } 2377 }
3749 b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
3750 b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
3751 b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
3752 b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
3753 & 0x000F) | (r8 << 4));
3754 b43_radio_write16(dev, 0x002A, (r8 << 4));
3755 b43_radio_write16(dev, 0x002B, (r8 << 4));
3756 b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
3757 & 0x00F0) | (r8 << 4));
3758 b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
3759 & 0xFF0F) | 0x00B0);
3760 b43_radio_write16(dev, 0x0035, 0x00AA);
3761 b43_radio_write16(dev, 0x0036, 0x0085);
3762 b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
3763 & 0xFF20) |
3764 freq_r3A_value(freq));
3765 b43_radio_write16(dev, 0x003D,
3766 b43_radio_read16(dev, 0x003D) & 0x00FF);
3767 b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
3768 & 0xFF7F) | 0x0080);
3769 b43_radio_write16(dev, 0x0035,
3770 b43_radio_read16(dev, 0x0035) & 0xFFEF);
3771 b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
3772 & 0xFFEF) | 0x0010);
3773 b43_radio_set_tx_iq(dev);
3774 //TODO: TSSI2dbm workaround
3775 b43_phy_xmitpower(dev); //FIXME correct?
3776 break; 2378 break;
3777 case B43_PHYTYPE_G: 2379 case 0x2050:
3778 if ((channel < 1) || (channel > 14)) { 2380 switch (phy->radio_rev) {
3779 err = -EINVAL; 2381 case 0:
3780 goto out; 2382 rf->att = 5;
2383 return;
2384 case 1:
2385 if (phy->type == B43_PHYTYPE_G) {
2386 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2387 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2388 && bus->boardinfo.rev >= 30)
2389 rf->att = 3;
2390 else if (bus->boardinfo.vendor ==
2391 SSB_BOARDVENDOR_BCM
2392 && bus->boardinfo.type ==
2393 SSB_BOARD_BU4306)
2394 rf->att = 3;
2395 else
2396 rf->att = 1;
2397 } else {
2398 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2399 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2400 && bus->boardinfo.rev >= 30)
2401 rf->att = 7;
2402 else
2403 rf->att = 6;
2404 }
2405 return;
2406 case 2:
2407 if (phy->type == B43_PHYTYPE_G) {
2408 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2409 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2410 && bus->boardinfo.rev >= 30)
2411 rf->att = 3;
2412 else if (bus->boardinfo.vendor ==
2413 SSB_BOARDVENDOR_BCM
2414 && bus->boardinfo.type ==
2415 SSB_BOARD_BU4306)
2416 rf->att = 5;
2417 else if (bus->chip_id == 0x4320)
2418 rf->att = 4;
2419 else
2420 rf->att = 3;
2421 } else
2422 rf->att = 6;
2423 return;
2424 case 3:
2425 rf->att = 5;
2426 return;
2427 case 4:
2428 case 5:
2429 rf->att = 1;
2430 return;
2431 case 6:
2432 case 7:
2433 rf->att = 5;
2434 return;
2435 case 8:
2436 rf->att = 0xA;
2437 rf->with_padmix = 1;
2438 return;
2439 case 9:
2440 default:
2441 rf->att = 5;
2442 return;
3781 } 2443 }
2444 }
2445 rf->att = 5;
2446}
3782 2447
3783 if (synthetic_pu_workaround) 2448static u16 default_tx_control(struct b43_wldev *dev)
3784 b43_synth_pu_workaround(dev, channel); 2449{
2450 struct b43_phy *phy = &dev->phy;
3785 2451
3786 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); 2452 if (phy->radio_ver != 0x2050)
2453 return 0;
2454 if (phy->radio_rev == 1)
2455 return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
2456 if (phy->radio_rev < 6)
2457 return B43_TXCTL_PA2DB;
2458 if (phy->radio_rev == 8)
2459 return B43_TXCTL_TXMIX;
2460 return 0;
2461}
3787 2462
3788 if (channel == 14) { 2463static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel)
3789 if (dev->dev->bus->sprom.country_code == 2464{
3790 SSB_SPROM1CCODE_JAPAN) 2465 struct b43_phy *phy = &dev->phy;
3791 b43_hf_write(dev, 2466 struct b43_phy_g *gphy = phy->g;
3792 b43_hf_read(dev) & ~B43_HF_ACPR); 2467 u8 ret = 0;
3793 else 2468 u16 saved, rssi, temp;
3794 b43_hf_write(dev, 2469 int i, j = 0;
3795 b43_hf_read(dev) | B43_HF_ACPR); 2470
3796 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 2471 saved = b43_phy_read(dev, 0x0403);
3797 b43_read16(dev, B43_MMIO_CHANNEL_EXT) 2472 b43_switch_channel(dev, channel);
3798 | (1 << 11)); 2473 b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
3799 } else { 2474 if (gphy->aci_hw_rssi)
3800 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 2475 rssi = b43_phy_read(dev, 0x048A) & 0x3F;
3801 b43_read16(dev, B43_MMIO_CHANNEL_EXT) 2476 else
3802 & 0xF7BF); 2477 rssi = saved & 0x3F;
2478 /* clamp temp to signed 5bit */
2479 if (rssi > 32)
2480 rssi -= 64;
2481 for (i = 0; i < 100; i++) {
2482 temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
2483 if (temp > 32)
2484 temp -= 64;
2485 if (temp < rssi)
2486 j++;
2487 if (j >= 20)
2488 ret = 1;
2489 }
2490 b43_phy_write(dev, 0x0403, saved);
2491
2492 return ret;
2493}
2494
2495static u8 b43_gphy_aci_scan(struct b43_wldev *dev)
2496{
2497 struct b43_phy *phy = &dev->phy;
2498 u8 ret[13];
2499 unsigned int channel = phy->channel;
2500 unsigned int i, j, start, end;
2501
2502 if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
2503 return 0;
2504
2505 b43_phy_lock(dev);
2506 b43_radio_lock(dev);
2507 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2508 b43_phy_write(dev, B43_PHY_G_CRS,
2509 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2510 b43_set_all_gains(dev, 3, 8, 1);
2511
2512 start = (channel - 5 > 0) ? channel - 5 : 1;
2513 end = (channel + 5 < 14) ? channel + 5 : 13;
2514
2515 for (i = start; i <= end; i++) {
2516 if (abs(channel - i) > 2)
2517 ret[i - 1] = b43_gphy_aci_detect(dev, i);
2518 }
2519 b43_switch_channel(dev, channel);
2520 b43_phy_write(dev, 0x0802,
2521 (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
2522 b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
2523 b43_phy_write(dev, B43_PHY_G_CRS,
2524 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
2525 b43_set_original_gains(dev);
2526 for (i = 0; i < 13; i++) {
2527 if (!ret[i])
2528 continue;
2529 end = (i + 5 < 13) ? i + 5 : 13;
2530 for (j = i; j < end; j++)
2531 ret[j] = 1;
2532 }
2533 b43_radio_unlock(dev);
2534 b43_phy_unlock(dev);
2535
2536 return ret[channel - 1];
2537}
2538
2539static s32 b43_tssi2dbm_ad(s32 num, s32 den)
2540{
2541 if (num < 0)
2542 return num / den;
2543 else
2544 return (num + den / 2) / den;
2545}
2546
2547static s8 b43_tssi2dbm_entry(s8 entry[], u8 index,
2548 s16 pab0, s16 pab1, s16 pab2)
2549{
2550 s32 m1, m2, f = 256, q, delta;
2551 s8 i = 0;
2552
2553 m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
2554 m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
2555 do {
2556 if (i > 15)
2557 return -EINVAL;
2558 q = b43_tssi2dbm_ad(f * 4096 -
2559 b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
2560 delta = abs(q - f);
2561 f = q;
2562 i++;
2563 } while (delta >= 2);
2564 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
2565 return 0;
2566}
2567
2568u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
2569 s16 pab0, s16 pab1, s16 pab2)
2570{
2571 unsigned int i;
2572 u8 *tab;
2573 int err;
2574
2575 tab = kmalloc(64, GFP_KERNEL);
2576 if (!tab) {
2577 b43err(dev->wl, "Could not allocate memory "
2578 "for tssi2dbm table\n");
2579 return NULL;
2580 }
2581 for (i = 0; i < 64; i++) {
2582 err = b43_tssi2dbm_entry(tab, i, pab0, pab1, pab2);
2583 if (err) {
2584 b43err(dev->wl, "Could not generate "
2585 "tssi2dBm table\n");
2586 kfree(tab);
2587 return NULL;
3803 } 2588 }
3804 break;
3805 case B43_PHYTYPE_N:
3806 err = b43_nphy_selectchannel(dev, channel);
3807 if (err)
3808 goto out;
3809 break;
3810 default:
3811 B43_WARN_ON(1);
3812 } 2589 }
3813 2590
3814 phy->channel = channel; 2591 return tab;
3815 /* Wait for the radio to tune to the channel and stabilize. */ 2592}
3816 msleep(8); 2593
3817out: 2594/* Initialise the TSSI->dBm lookup table */
3818 if (err) { 2595static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
3819 b43_shm_write16(dev, B43_SHM_SHARED, 2596{
3820 B43_SHM_SH_CHAN, savedcookie); 2597 struct b43_phy *phy = &dev->phy;
2598 struct b43_phy_g *gphy = phy->g;
2599 s16 pab0, pab1, pab2;
2600
2601 pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
2602 pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
2603 pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
2604
2605 B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) &&
2606 (phy->radio_ver != 0x2050)); /* Not supported anymore */
2607
2608 gphy->dyn_tssi_tbl = 0;
2609
2610 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
2611 pab0 != -1 && pab1 != -1 && pab2 != -1) {
2612 /* The pabX values are set in SPROM. Use them. */
2613 if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
2614 (s8) dev->dev->bus->sprom.itssi_bg != -1) {
2615 gphy->tgt_idle_tssi =
2616 (s8) (dev->dev->bus->sprom.itssi_bg);
2617 } else
2618 gphy->tgt_idle_tssi = 62;
2619 gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
2620 pab1, pab2);
2621 if (!gphy->tssi2dbm)
2622 return -ENOMEM;
2623 gphy->dyn_tssi_tbl = 1;
2624 } else {
2625 /* pabX values not set in SPROM. */
2626 gphy->tgt_idle_tssi = 52;
2627 gphy->tssi2dbm = b43_tssi2dbm_g_table;
3821 } 2628 }
2629
2630 return 0;
2631}
2632
2633static int b43_gphy_op_allocate(struct b43_wldev *dev)
2634{
2635 struct b43_phy_g *gphy;
2636 struct b43_txpower_lo_control *lo;
2637 int err;
2638
2639 gphy = kzalloc(sizeof(*gphy), GFP_KERNEL);
2640 if (!gphy) {
2641 err = -ENOMEM;
2642 goto error;
2643 }
2644 dev->phy.g = gphy;
2645
2646 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2647 if (!lo) {
2648 err = -ENOMEM;
2649 goto err_free_gphy;
2650 }
2651 gphy->lo_control = lo;
2652
2653 err = b43_gphy_init_tssi2dbm_table(dev);
2654 if (err)
2655 goto err_free_lo;
2656
2657 return 0;
2658
2659err_free_lo:
2660 kfree(lo);
2661err_free_gphy:
2662 kfree(gphy);
2663error:
3822 return err; 2664 return err;
3823} 2665}
3824 2666
3825void b43_radio_turn_on(struct b43_wldev *dev) 2667static void b43_gphy_op_prepare_structs(struct b43_wldev *dev)
3826{ 2668{
3827 struct b43_phy *phy = &dev->phy; 2669 struct b43_phy *phy = &dev->phy;
3828 int err; 2670 struct b43_phy_g *gphy = phy->g;
3829 u8 channel; 2671 const void *tssi2dbm;
2672 int tgt_idle_tssi;
2673 struct b43_txpower_lo_control *lo;
2674 unsigned int i;
2675
2676 /* tssi2dbm table is constant, so it is initialized at alloc time.
2677 * Save a copy of the pointer. */
2678 tssi2dbm = gphy->tssi2dbm;
2679 tgt_idle_tssi = gphy->tgt_idle_tssi;
2680 /* Save the LO pointer. */
2681 lo = gphy->lo_control;
2682
2683 /* Zero out the whole PHY structure. */
2684 memset(gphy, 0, sizeof(*gphy));
2685
2686 /* Restore pointers. */
2687 gphy->tssi2dbm = tssi2dbm;
2688 gphy->tgt_idle_tssi = tgt_idle_tssi;
2689 gphy->lo_control = lo;
2690
2691 memset(gphy->minlowsig, 0xFF, sizeof(gphy->minlowsig));
2692
2693 /* NRSSI */
2694 for (i = 0; i < ARRAY_SIZE(gphy->nrssi); i++)
2695 gphy->nrssi[i] = -1000;
2696 for (i = 0; i < ARRAY_SIZE(gphy->nrssi_lt); i++)
2697 gphy->nrssi_lt[i] = i;
2698
2699 gphy->lofcal = 0xFFFF;
2700 gphy->initval = 0xFFFF;
2701
2702 gphy->interfmode = B43_INTERFMODE_NONE;
2703
2704 /* OFDM-table address caching. */
2705 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
2706
2707 gphy->average_tssi = 0xFF;
2708
2709 /* Local Osciallator structure */
2710 lo->tx_bias = 0xFF;
2711 INIT_LIST_HEAD(&lo->calib_list);
2712}
2713
2714static void b43_gphy_op_free(struct b43_wldev *dev)
2715{
2716 struct b43_phy *phy = &dev->phy;
2717 struct b43_phy_g *gphy = phy->g;
2718
2719 kfree(gphy->lo_control);
2720
2721 if (gphy->dyn_tssi_tbl)
2722 kfree(gphy->tssi2dbm);
2723 gphy->dyn_tssi_tbl = 0;
2724 gphy->tssi2dbm = NULL;
2725
2726 kfree(gphy);
2727 dev->phy.g = NULL;
2728}
2729
2730static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
2731{
2732 struct b43_phy *phy = &dev->phy;
2733 struct b43_phy_g *gphy = phy->g;
2734 struct b43_txpower_lo_control *lo = gphy->lo_control;
2735
2736 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2737
2738 default_baseband_attenuation(dev, &gphy->bbatt);
2739 default_radio_attenuation(dev, &gphy->rfatt);
2740 gphy->tx_control = (default_tx_control(dev) << 4);
2741 generate_rfatt_list(dev, &lo->rfatt_list);
2742 generate_bbatt_list(dev, &lo->bbatt_list);
2743
2744 /* Commit previous writes */
2745 b43_read32(dev, B43_MMIO_MACCTL);
2746
2747 if (phy->rev == 1) {
2748 /* Workaround: Temporarly disable gmode through the early init
2749 * phase, as the gmode stuff is not needed for phy rev 1 */
2750 phy->gmode = 0;
2751 b43_wireless_core_reset(dev, 0);
2752 b43_phy_initg(dev);
2753 phy->gmode = 1;
2754 b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
2755 }
2756
2757 return 0;
2758}
2759
2760static int b43_gphy_op_init(struct b43_wldev *dev)
2761{
2762 b43_phy_initg(dev);
2763
2764 return 0;
2765}
2766
2767static void b43_gphy_op_exit(struct b43_wldev *dev)
2768{
2769 b43_lo_g_cleanup(dev);
2770}
2771
2772static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg)
2773{
2774 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
2775 return b43_read16(dev, B43_MMIO_PHY_DATA);
2776}
2777
2778static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
2779{
2780 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
2781 b43_write16(dev, B43_MMIO_PHY_DATA, value);
2782}
2783
2784static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg)
2785{
2786 /* Register 1 is a 32-bit register. */
2787 B43_WARN_ON(reg == 1);
2788 /* G-PHY needs 0x80 for read access. */
2789 reg |= 0x80;
2790
2791 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
2792 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
2793}
2794
2795static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
2796{
2797 /* Register 1 is a 32-bit register. */
2798 B43_WARN_ON(reg == 1);
2799
2800 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
2801 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
2802}
2803
2804static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev)
2805{
2806 return (dev->phy.rev >= 6);
2807}
2808
2809static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
2810 enum rfkill_state state)
2811{
2812 struct b43_phy *phy = &dev->phy;
2813 struct b43_phy_g *gphy = phy->g;
2814 unsigned int channel;
3830 2815
3831 might_sleep(); 2816 might_sleep();
3832 2817
3833 if (phy->radio_on) 2818 if (state == RFKILL_STATE_UNBLOCKED) {
3834 return; 2819 /* Turn radio ON */
2820 if (phy->radio_on)
2821 return;
3835 2822
3836 switch (phy->type) {
3837 case B43_PHYTYPE_A:
3838 b43_radio_write16(dev, 0x0004, 0x00C0);
3839 b43_radio_write16(dev, 0x0005, 0x0008);
3840 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
3841 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
3842 b43_radio_init2060(dev);
3843 break;
3844 case B43_PHYTYPE_B:
3845 case B43_PHYTYPE_G:
3846 b43_phy_write(dev, 0x0015, 0x8000); 2823 b43_phy_write(dev, 0x0015, 0x8000);
3847 b43_phy_write(dev, 0x0015, 0xCC00); 2824 b43_phy_write(dev, 0x0015, 0xCC00);
3848 b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); 2825 b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000));
3849 if (phy->radio_off_context.valid) { 2826 if (gphy->radio_off_context.valid) {
3850 /* Restore the RFover values. */ 2827 /* Restore the RFover values. */
3851 b43_phy_write(dev, B43_PHY_RFOVER, 2828 b43_phy_write(dev, B43_PHY_RFOVER,
3852 phy->radio_off_context.rfover); 2829 gphy->radio_off_context.rfover);
3853 b43_phy_write(dev, B43_PHY_RFOVERVAL, 2830 b43_phy_write(dev, B43_PHY_RFOVERVAL,
3854 phy->radio_off_context.rfoverval); 2831 gphy->radio_off_context.rfoverval);
3855 phy->radio_off_context.valid = 0; 2832 gphy->radio_off_context.valid = 0;
3856 } 2833 }
3857 channel = phy->channel; 2834 channel = phy->channel;
3858 err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_BG, 1); 2835 b43_gphy_channel_switch(dev, 6, 1);
3859 err |= b43_radio_selectchannel(dev, channel, 0); 2836 b43_gphy_channel_switch(dev, channel, 0);
3860 B43_WARN_ON(err); 2837 } else {
3861 break; 2838 /* Turn radio OFF */
3862 case B43_PHYTYPE_N: 2839 u16 rfover, rfoverval;
3863 b43_nphy_radio_turn_on(dev); 2840
3864 break; 2841 rfover = b43_phy_read(dev, B43_PHY_RFOVER);
3865 default: 2842 rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
3866 B43_WARN_ON(1); 2843 gphy->radio_off_context.rfover = rfover;
2844 gphy->radio_off_context.rfoverval = rfoverval;
2845 gphy->radio_off_context.valid = 1;
2846 b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
2847 b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
3867 } 2848 }
3868 phy->radio_on = 1;
3869} 2849}
3870 2850
3871void b43_radio_turn_off(struct b43_wldev *dev, bool force) 2851static int b43_gphy_op_switch_channel(struct b43_wldev *dev,
2852 unsigned int new_channel)
2853{
2854 if ((new_channel < 1) || (new_channel > 14))
2855 return -EINVAL;
2856 b43_gphy_channel_switch(dev, new_channel, 0);
2857
2858 return 0;
2859}
2860
2861static unsigned int b43_gphy_op_get_default_chan(struct b43_wldev *dev)
2862{
2863 return 1; /* Default to channel 1 */
2864}
2865
2866static void b43_gphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
3872{ 2867{
3873 struct b43_phy *phy = &dev->phy; 2868 struct b43_phy *phy = &dev->phy;
2869 u64 hf;
2870 u16 tmp;
2871 int autodiv = 0;
3874 2872
3875 if (!phy->radio_on && !force) 2873 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
3876 return; 2874 autodiv = 1;
2875
2876 hf = b43_hf_read(dev);
2877 hf &= ~B43_HF_ANTDIVHELP;
2878 b43_hf_write(dev, hf);
2879
2880 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
2881 tmp &= ~B43_PHY_BBANDCFG_RXANT;
2882 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
2883 << B43_PHY_BBANDCFG_RXANT_SHIFT;
2884 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
2885
2886 if (autodiv) {
2887 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
2888 if (antenna == B43_ANTENNA_AUTO0)
2889 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
2890 else
2891 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
2892 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
2893 }
2894 tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
2895 if (autodiv)
2896 tmp |= B43_PHY_ANTWRSETT_ARXDIV;
2897 else
2898 tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
2899 b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
2900 if (phy->rev >= 2) {
2901 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
2902 tmp |= B43_PHY_OFDM61_10;
2903 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
2904
2905 tmp =
2906 b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
2907 tmp = (tmp & 0xFF00) | 0x15;
2908 b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
2909 tmp);
2910
2911 if (phy->rev == 2) {
2912 b43_phy_write(dev, B43_PHY_ADIVRELATED,
2913 8);
2914 } else {
2915 tmp =
2916 b43_phy_read(dev,
2917 B43_PHY_ADIVRELATED);
2918 tmp = (tmp & 0xFF00) | 8;
2919 b43_phy_write(dev, B43_PHY_ADIVRELATED,
2920 tmp);
2921 }
2922 }
2923 if (phy->rev >= 6)
2924 b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
2925
2926 hf |= B43_HF_ANTDIVHELP;
2927 b43_hf_write(dev, hf);
2928}
3877 2929
3878 switch (phy->type) { 2930static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
3879 case B43_PHYTYPE_N: 2931 enum b43_interference_mitigation mode)
3880 b43_nphy_radio_turn_off(dev); 2932{
2933 struct b43_phy *phy = &dev->phy;
2934 struct b43_phy_g *gphy = phy->g;
2935 int currentmode;
2936
2937 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2938 if ((phy->rev == 0) || (!phy->gmode))
2939 return -ENODEV;
2940
2941 gphy->aci_wlan_automatic = 0;
2942 switch (mode) {
2943 case B43_INTERFMODE_AUTOWLAN:
2944 gphy->aci_wlan_automatic = 1;
2945 if (gphy->aci_enable)
2946 mode = B43_INTERFMODE_MANUALWLAN;
2947 else
2948 mode = B43_INTERFMODE_NONE;
3881 break; 2949 break;
3882 case B43_PHYTYPE_A: 2950 case B43_INTERFMODE_NONE:
3883 b43_radio_write16(dev, 0x0004, 0x00FF); 2951 case B43_INTERFMODE_NONWLAN:
3884 b43_radio_write16(dev, 0x0005, 0x00FB); 2952 case B43_INTERFMODE_MANUALWLAN:
3885 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
3886 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
3887 break; 2953 break;
3888 case B43_PHYTYPE_G: { 2954 default:
3889 u16 rfover, rfoverval; 2955 return -EINVAL;
2956 }
3890 2957
3891 rfover = b43_phy_read(dev, B43_PHY_RFOVER); 2958 currentmode = gphy->interfmode;
3892 rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); 2959 if (currentmode == mode)
3893 if (!force) { 2960 return 0;
3894 phy->radio_off_context.rfover = rfover; 2961 if (currentmode != B43_INTERFMODE_NONE)
3895 phy->radio_off_context.rfoverval = rfoverval; 2962 b43_radio_interference_mitigation_disable(dev, currentmode);
3896 phy->radio_off_context.valid = 1; 2963
2964 if (mode == B43_INTERFMODE_NONE) {
2965 gphy->aci_enable = 0;
2966 gphy->aci_hw_rssi = 0;
2967 } else
2968 b43_radio_interference_mitigation_enable(dev, mode);
2969 gphy->interfmode = mode;
2970
2971 return 0;
2972}
2973
2974/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
2975 * This function converts a TSSI value to dBm in Q5.2
2976 */
2977static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
2978{
2979 struct b43_phy_g *gphy = dev->phy.g;
2980 s8 dbm;
2981 s32 tmp;
2982
2983 tmp = (gphy->tgt_idle_tssi - gphy->cur_idle_tssi + tssi);
2984 tmp = clamp_val(tmp, 0x00, 0x3F);
2985 dbm = gphy->tssi2dbm[tmp];
2986
2987 return dbm;
2988}
2989
2990static void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
2991 int *_bbatt, int *_rfatt)
2992{
2993 int rfatt = *_rfatt;
2994 int bbatt = *_bbatt;
2995 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
2996
2997 /* Get baseband and radio attenuation values into their permitted ranges.
2998 * Radio attenuation affects power level 4 times as much as baseband. */
2999
3000 /* Range constants */
3001 const int rf_min = lo->rfatt_list.min_val;
3002 const int rf_max = lo->rfatt_list.max_val;
3003 const int bb_min = lo->bbatt_list.min_val;
3004 const int bb_max = lo->bbatt_list.max_val;
3005
3006 while (1) {
3007 if (rfatt > rf_max && bbatt > bb_max - 4)
3008 break; /* Can not get it into ranges */
3009 if (rfatt < rf_min && bbatt < bb_min + 4)
3010 break; /* Can not get it into ranges */
3011 if (bbatt > bb_max && rfatt > rf_max - 1)
3012 break; /* Can not get it into ranges */
3013 if (bbatt < bb_min && rfatt < rf_min + 1)
3014 break; /* Can not get it into ranges */
3015
3016 if (bbatt > bb_max) {
3017 bbatt -= 4;
3018 rfatt += 1;
3019 continue;
3020 }
3021 if (bbatt < bb_min) {
3022 bbatt += 4;
3023 rfatt -= 1;
3024 continue;
3025 }
3026 if (rfatt > rf_max) {
3027 rfatt -= 1;
3028 bbatt += 4;
3029 continue;
3030 }
3031 if (rfatt < rf_min) {
3032 rfatt += 1;
3033 bbatt -= 4;
3034 continue;
3897 } 3035 }
3898 b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
3899 b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
3900 break; 3036 break;
3901 } 3037 }
3902 default: 3038
3903 B43_WARN_ON(1); 3039 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
3040 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
3041}
3042
3043static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
3044{
3045 struct b43_phy *phy = &dev->phy;
3046 struct b43_phy_g *gphy = phy->g;
3047 int rfatt, bbatt;
3048 u8 tx_control;
3049
3050 spin_lock_irq(&dev->wl->irq_lock);
3051
3052 /* Calculate the new attenuation values. */
3053 bbatt = gphy->bbatt.att;
3054 bbatt += gphy->bbatt_delta;
3055 rfatt = gphy->rfatt.att;
3056 rfatt += gphy->rfatt_delta;
3057
3058 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
3059 tx_control = gphy->tx_control;
3060 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
3061 if (rfatt <= 1) {
3062 if (tx_control == 0) {
3063 tx_control =
3064 B43_TXCTL_PA2DB |
3065 B43_TXCTL_TXMIX;
3066 rfatt += 2;
3067 bbatt += 2;
3068 } else if (dev->dev->bus->sprom.
3069 boardflags_lo &
3070 B43_BFL_PACTRL) {
3071 bbatt += 4 * (rfatt - 2);
3072 rfatt = 2;
3073 }
3074 } else if (rfatt > 4 && tx_control) {
3075 tx_control = 0;
3076 if (bbatt < 3) {
3077 rfatt -= 3;
3078 bbatt += 2;
3079 } else {
3080 rfatt -= 2;
3081 bbatt -= 2;
3082 }
3083 }
3904 } 3084 }
3905 phy->radio_on = 0; 3085 /* Save the control values */
3086 gphy->tx_control = tx_control;
3087 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
3088 gphy->rfatt.att = rfatt;
3089 gphy->bbatt.att = bbatt;
3090
3091 /* We drop the lock early, so we can sleep during hardware
3092 * adjustment. Possible races with op_recalc_txpower are harmless,
3093 * as we will be called once again in case we raced. */
3094 spin_unlock_irq(&dev->wl->irq_lock);
3095
3096 if (b43_debug(dev, B43_DBG_XMITPOWER))
3097 b43dbg(dev->wl, "Adjusting TX power\n");
3098
3099 /* Adjust the hardware */
3100 b43_phy_lock(dev);
3101 b43_radio_lock(dev);
3102 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt,
3103 gphy->tx_control);
3104 b43_radio_unlock(dev);
3105 b43_phy_unlock(dev);
3906} 3106}
3107
3108static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
3109 bool ignore_tssi)
3110{
3111 struct b43_phy *phy = &dev->phy;
3112 struct b43_phy_g *gphy = phy->g;
3113 unsigned int average_tssi;
3114 int cck_result, ofdm_result;
3115 int estimated_pwr, desired_pwr, pwr_adjust;
3116 int rfatt_delta, bbatt_delta;
3117 unsigned int max_pwr;
3118
3119 /* First get the average TSSI */
3120 cck_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_CCK);
3121 ofdm_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_OFDM_G);
3122 if ((cck_result < 0) && (ofdm_result < 0)) {
3123 /* No TSSI information available */
3124 if (!ignore_tssi)
3125 goto no_adjustment_needed;
3126 cck_result = 0;
3127 ofdm_result = 0;
3128 }
3129 if (cck_result < 0)
3130 average_tssi = ofdm_result;
3131 else if (ofdm_result < 0)
3132 average_tssi = cck_result;
3133 else
3134 average_tssi = (cck_result + ofdm_result) / 2;
3135 /* Merge the average with the stored value. */
3136 if (likely(gphy->average_tssi != 0xFF))
3137 average_tssi = (average_tssi + gphy->average_tssi) / 2;
3138 gphy->average_tssi = average_tssi;
3139 B43_WARN_ON(average_tssi >= B43_TSSI_MAX);
3140
3141 /* Estimate the TX power emission based on the TSSI */
3142 estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
3143
3144 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
3145 max_pwr = dev->dev->bus->sprom.maxpwr_bg;
3146 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
3147 max_pwr -= 3; /* minus 0.75 */
3148 if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
3149 b43warn(dev->wl,
3150 "Invalid max-TX-power value in SPROM.\n");
3151 max_pwr = INT_TO_Q52(20); /* fake it */
3152 dev->dev->bus->sprom.maxpwr_bg = max_pwr;
3153 }
3154
3155 /* Get desired power (in Q5.2) */
3156 if (phy->desired_txpower < 0)
3157 desired_pwr = INT_TO_Q52(0);
3158 else
3159 desired_pwr = INT_TO_Q52(phy->desired_txpower);
3160 /* And limit it. max_pwr already is Q5.2 */
3161 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
3162 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
3163 b43dbg(dev->wl,
3164 "[TX power] current = " Q52_FMT
3165 " dBm, desired = " Q52_FMT
3166 " dBm, max = " Q52_FMT "\n",
3167 Q52_ARG(estimated_pwr),
3168 Q52_ARG(desired_pwr),
3169 Q52_ARG(max_pwr));
3170 }
3171
3172 /* Calculate the adjustment delta. */
3173 pwr_adjust = desired_pwr - estimated_pwr;
3174 if (pwr_adjust == 0)
3175 goto no_adjustment_needed;
3176
3177 /* RF attenuation delta. */
3178 rfatt_delta = ((pwr_adjust + 7) / 8);
3179 /* Lower attenuation => Bigger power output. Negate it. */
3180 rfatt_delta = -rfatt_delta;
3181
3182 /* Baseband attenuation delta. */
3183 bbatt_delta = pwr_adjust / 2;
3184 /* Lower attenuation => Bigger power output. Negate it. */
3185 bbatt_delta = -bbatt_delta;
3186 /* RF att affects power level 4 times as much as
3187 * Baseband attennuation. Subtract it. */
3188 bbatt_delta -= 4 * rfatt_delta;
3189
3190 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
3191 int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust;
3192 b43dbg(dev->wl,
3193 "[TX power deltas] %s" Q52_FMT " dBm => "
3194 "bbatt-delta = %d, rfatt-delta = %d\n",
3195 (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm),
3196 bbatt_delta, rfatt_delta);
3197 }
3198 /* So do we finally need to adjust something in hardware? */
3199 if ((rfatt_delta == 0) && (bbatt_delta == 0))
3200 goto no_adjustment_needed;
3201
3202 /* Save the deltas for later when we adjust the power. */
3203 gphy->bbatt_delta = bbatt_delta;
3204 gphy->rfatt_delta = rfatt_delta;
3205
3206 /* We need to adjust the TX power on the device. */
3207 return B43_TXPWR_RES_NEED_ADJUST;
3208
3209no_adjustment_needed:
3210 return B43_TXPWR_RES_DONE;
3211}
3212
3213static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
3214{
3215 struct b43_phy *phy = &dev->phy;
3216 struct b43_phy_g *gphy = phy->g;
3217
3218 //TODO: update_aci_moving_average
3219 if (gphy->aci_enable && gphy->aci_wlan_automatic) {
3220 b43_mac_suspend(dev);
3221 if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) {
3222 if (0 /*TODO: bunch of conditions */ ) {
3223 phy->ops->interf_mitigation(dev,
3224 B43_INTERFMODE_MANUALWLAN);
3225 }
3226 } else if (0 /*TODO*/) {
3227 if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev))
3228 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
3229 }
3230 b43_mac_enable(dev);
3231 } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN &&
3232 phy->rev == 1) {
3233 //TODO: implement rev1 workaround
3234 }
3235 b43_lo_g_maintanance_work(dev);
3236}
3237
3238static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
3239{
3240 struct b43_phy *phy = &dev->phy;
3241
3242 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
3243 return;
3244
3245 b43_mac_suspend(dev);
3246 b43_calc_nrssi_slope(dev);
3247 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
3248 u8 old_chan = phy->channel;
3249
3250 /* VCO Calibration */
3251 if (old_chan >= 8)
3252 b43_switch_channel(dev, 1);
3253 else
3254 b43_switch_channel(dev, 13);
3255 b43_switch_channel(dev, old_chan);
3256 }
3257 b43_mac_enable(dev);
3258}
3259
3260const struct b43_phy_operations b43_phyops_g = {
3261 .allocate = b43_gphy_op_allocate,
3262 .free = b43_gphy_op_free,
3263 .prepare_structs = b43_gphy_op_prepare_structs,
3264 .prepare_hardware = b43_gphy_op_prepare_hardware,
3265 .init = b43_gphy_op_init,
3266 .exit = b43_gphy_op_exit,
3267 .phy_read = b43_gphy_op_read,
3268 .phy_write = b43_gphy_op_write,
3269 .radio_read = b43_gphy_op_radio_read,
3270 .radio_write = b43_gphy_op_radio_write,
3271 .supports_hwpctl = b43_gphy_op_supports_hwpctl,
3272 .software_rfkill = b43_gphy_op_software_rfkill,
3273 .switch_analog = b43_phyop_switch_analog_generic,
3274 .switch_channel = b43_gphy_op_switch_channel,
3275 .get_default_chan = b43_gphy_op_get_default_chan,
3276 .set_rx_antenna = b43_gphy_op_set_rx_antenna,
3277 .interf_mitigation = b43_gphy_op_interf_mitigation,
3278 .recalc_txpower = b43_gphy_op_recalc_txpower,
3279 .adjust_txpower = b43_gphy_op_adjust_txpower,
3280 .pwork_15sec = b43_gphy_op_pwork_15sec,
3281 .pwork_60sec = b43_gphy_op_pwork_60sec,
3282};
diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/b43/phy_g.h
new file mode 100644
index 000000000000..718947fd41ae
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_g.h
@@ -0,0 +1,209 @@
1#ifndef LINUX_B43_PHY_G_H_
2#define LINUX_B43_PHY_G_H_
3
4/* OFDM PHY registers are defined in the A-PHY header. */
5#include "phy_a.h"
6
7/* CCK (B) PHY Registers */
8#define B43_PHY_VERSION_CCK B43_PHY_CCK(0x00) /* Versioning register for B-PHY */
9#define B43_PHY_CCKBBANDCFG B43_PHY_CCK(0x01) /* Contains antenna 0/1 control bit */
10#define B43_PHY_PGACTL B43_PHY_CCK(0x15) /* PGA control */
11#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */
12#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */
13#define B43_PHY_PGACTL_UNKNOWN 0xEFA0
14#define B43_PHY_FBCTL1 B43_PHY_CCK(0x18) /* Frequency bandwidth control 1 */
15#define B43_PHY_ITSSI B43_PHY_CCK(0x29) /* Idle TSSI */
16#define B43_PHY_LO_LEAKAGE B43_PHY_CCK(0x2D) /* Measured LO leakage */
17#define B43_PHY_ENERGY B43_PHY_CCK(0x33) /* Energy */
18#define B43_PHY_SYNCCTL B43_PHY_CCK(0x35)
19#define B43_PHY_FBCTL2 B43_PHY_CCK(0x38) /* Frequency bandwidth control 2 */
20#define B43_PHY_DACCTL B43_PHY_CCK(0x60) /* DAC control */
21#define B43_PHY_RCCALOVER B43_PHY_CCK(0x78) /* RC calibration override */
22
23/* Extended G-PHY Registers */
24#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */
25#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */
26#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */
27#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */
28#define B43_PHY_GTABNR_SHIFT 10
29#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */
30#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */
31#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */
32#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */
33#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */
34#define B43_PHY_RFOVERVAL_EXTLNA 0x8000
35#define B43_PHY_RFOVERVAL_LNA 0x7000
36#define B43_PHY_RFOVERVAL_LNA_SHIFT 12
37#define B43_PHY_RFOVERVAL_PGA 0x0F00
38#define B43_PHY_RFOVERVAL_PGA_SHIFT 8
39#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */
40#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0
41#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */
42#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */
43#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */
44#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */
45#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */
46
47
48/*** G-PHY table numbers */
49#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset))
50#define B43_GTAB_NRSSI B43_GTAB(0x00, 0)
51#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120)
52#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298)
53
54u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset);
55void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value);
56
57
58/* Returns the boolean whether "TX Magnification" is enabled. */
59#define has_tx_magnification(phy) \
60 (((phy)->rev >= 2) && \
61 ((phy)->radio_ver == 0x2050) && \
62 ((phy)->radio_rev == 8))
63/* Card uses the loopback gain stuff */
64#define has_loopback_gain(phy) \
65 (((phy)->rev > 1) || ((phy)->gmode))
66
67/* Radio Attenuation (RF Attenuation) */
68struct b43_rfatt {
69 u8 att; /* Attenuation value */
70 bool with_padmix; /* Flag, PAD Mixer enabled. */
71};
72struct b43_rfatt_list {
73 /* Attenuation values list */
74 const struct b43_rfatt *list;
75 u8 len;
76 /* Minimum/Maximum attenuation values */
77 u8 min_val;
78 u8 max_val;
79};
80
81/* Returns true, if the values are the same. */
82static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
83 const struct b43_rfatt *b)
84{
85 return ((a->att == b->att) &&
86 (a->with_padmix == b->with_padmix));
87}
88
89/* Baseband Attenuation */
90struct b43_bbatt {
91 u8 att; /* Attenuation value */
92};
93struct b43_bbatt_list {
94 /* Attenuation values list */
95 const struct b43_bbatt *list;
96 u8 len;
97 /* Minimum/Maximum attenuation values */
98 u8 min_val;
99 u8 max_val;
100};
101
102/* Returns true, if the values are the same. */
103static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
104 const struct b43_bbatt *b)
105{
106 return (a->att == b->att);
107}
108
109/* tx_control bits. */
110#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
111#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
112#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */
113
114struct b43_txpower_lo_control;
115
116struct b43_phy_g {
117 /* ACI (adjacent channel interference) flags. */
118 bool aci_enable;
119 bool aci_wlan_automatic;
120 bool aci_hw_rssi;
121
122 /* Radio switched on/off */
123 bool radio_on;
124 struct {
125 /* Values saved when turning the radio off.
126 * They are needed when turning it on again. */
127 bool valid;
128 u16 rfover;
129 u16 rfoverval;
130 } radio_off_context;
131
132 u16 minlowsig[2];
133 u16 minlowsigpos[2];
134
135 /* Pointer to the table used to convert a
136 * TSSI value to dBm-Q5.2 */
137 const s8 *tssi2dbm;
138 /* tssi2dbm is kmalloc()ed. Only used for free()ing. */
139 bool dyn_tssi_tbl;
140 /* Target idle TSSI */
141 int tgt_idle_tssi;
142 /* Current idle TSSI */
143 int cur_idle_tssi;
144 /* The current average TSSI.
145 * Needs irq_lock, as it's updated in the IRQ path. */
146 u8 average_tssi;
147 /* Current TX power level attenuation control values */
148 struct b43_bbatt bbatt;
149 struct b43_rfatt rfatt;
150 u8 tx_control; /* B43_TXCTL_XXX */
151 /* The calculated attenuation deltas that are used later
152 * when adjusting the actual power output. */
153 int bbatt_delta;
154 int rfatt_delta;
155
156 /* LocalOscillator control values. */
157 struct b43_txpower_lo_control *lo_control;
158 /* Values from b43_calc_loopback_gain() */
159 s16 max_lb_gain; /* Maximum Loopback gain in hdB */
160 s16 trsw_rx_gain; /* TRSW RX gain in hdB */
161 s16 lna_lod_gain; /* LNA lod */
162 s16 lna_gain; /* LNA */
163 s16 pga_gain; /* PGA */
164
165 /* Current Interference Mitigation mode */
166 int interfmode;
167 /* Stack of saved values from the Interference Mitigation code.
168 * Each value in the stack is layed out as follows:
169 * bit 0-11: offset
170 * bit 12-15: register ID
171 * bit 16-32: value
172 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
173 */
174#define B43_INTERFSTACK_SIZE 26
175 u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure
176
177 /* Saved values from the NRSSI Slope calculation */
178 s16 nrssi[2];
179 s32 nrssislope;
180 /* In memory nrssi lookup table. */
181 s8 nrssi_lt[64];
182
183 u16 lofcal;
184
185 u16 initval; //FIXME rename?
186
187 /* The device does address auto increment for the OFDM tables.
188 * We cache the previously used address here and omit the address
189 * write on the next table access, if possible. */
190 u16 ofdmtab_addr; /* The address currently set in hardware. */
191 enum { /* The last data flow direction. */
192 B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
193 B43_OFDMTAB_DIRECTION_READ,
194 B43_OFDMTAB_DIRECTION_WRITE,
195 } ofdmtab_addr_direction;
196};
197
198void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
199 u16 baseband_attenuation);
200void b43_gphy_channel_switch(struct b43_wldev *dev,
201 unsigned int channel,
202 bool synthetic_pu_workaround);
203u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
204 s16 pab0, s16 pab1, s16 pab2);
205
206struct b43_phy_operations;
207extern const struct b43_phy_operations b43_phyops_g;
208
209#endif /* LINUX_B43_PHY_G_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
new file mode 100644
index 000000000000..c5d9dc3667c0
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -0,0 +1,155 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11g LP-PHY driver
5
6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "phy_lp.h"
27#include "phy_common.h"
28
29
30static int b43_lpphy_op_allocate(struct b43_wldev *dev)
31{
32 struct b43_phy_lp *lpphy;
33
34 lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL);
35 if (!lpphy)
36 return -ENOMEM;
37 dev->phy.lp = lpphy;
38
39 return 0;
40}
41
42static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
43{
44 struct b43_phy *phy = &dev->phy;
45 struct b43_phy_lp *lpphy = phy->lp;
46
47 memset(lpphy, 0, sizeof(*lpphy));
48
49 //TODO
50}
51
52static void b43_lpphy_op_free(struct b43_wldev *dev)
53{
54 struct b43_phy_lp *lpphy = dev->phy.lp;
55
56 kfree(lpphy);
57 dev->phy.lp = NULL;
58}
59
60static int b43_lpphy_op_init(struct b43_wldev *dev)
61{
62 //TODO
63
64 return 0;
65}
66
67static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
68{
69 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
70 return b43_read16(dev, B43_MMIO_PHY_DATA);
71}
72
73static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
74{
75 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
76 b43_write16(dev, B43_MMIO_PHY_DATA, value);
77}
78
79static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg)
80{
81 /* Register 1 is a 32-bit register. */
82 B43_WARN_ON(reg == 1);
83 /* LP-PHY needs a special bit set for read access */
84 if (dev->phy.rev < 2) {
85 if (reg != 0x4001)
86 reg |= 0x100;
87 } else
88 reg |= 0x200;
89
90 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
91 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
92}
93
94static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
95{
96 /* Register 1 is a 32-bit register. */
97 B43_WARN_ON(reg == 1);
98
99 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
100 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
101}
102
103static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
104 enum rfkill_state state)
105{
106 //TODO
107}
108
109static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
110 unsigned int new_channel)
111{
112 //TODO
113 return 0;
114}
115
116static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
117{
118 return 1; /* Default to channel 1 */
119}
120
121static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
122{
123 //TODO
124}
125
126static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
127{
128 //TODO
129}
130
131static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
132 bool ignore_tssi)
133{
134 //TODO
135 return B43_TXPWR_RES_DONE;
136}
137
138
139const struct b43_phy_operations b43_phyops_lp = {
140 .allocate = b43_lpphy_op_allocate,
141 .free = b43_lpphy_op_free,
142 .prepare_structs = b43_lpphy_op_prepare_structs,
143 .init = b43_lpphy_op_init,
144 .phy_read = b43_lpphy_op_read,
145 .phy_write = b43_lpphy_op_write,
146 .radio_read = b43_lpphy_op_radio_read,
147 .radio_write = b43_lpphy_op_radio_write,
148 .software_rfkill = b43_lpphy_op_software_rfkill,
149 .switch_analog = b43_phyop_switch_analog_generic,
150 .switch_channel = b43_lpphy_op_switch_channel,
151 .get_default_chan = b43_lpphy_op_get_default_chan,
152 .set_rx_antenna = b43_lpphy_op_set_rx_antenna,
153 .recalc_txpower = b43_lpphy_op_recalc_txpower,
154 .adjust_txpower = b43_lpphy_op_adjust_txpower,
155};
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
new file mode 100644
index 000000000000..b0b5357abf93
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -0,0 +1,540 @@
1#ifndef LINUX_B43_PHY_LP_H_
2#define LINUX_B43_PHY_LP_H_
3
4/* Definitions for the LP-PHY */
5
6
7
8
9#define B43_LP_RADIO(radio_reg) (radio_reg)
10#define B43_LP_NORTH(radio_reg) B43_LP_RADIO(radio_reg)
11#define B43_LP_SOUTH(radio_reg) B43_LP_RADIO((radio_reg) | 0x4000)
12
13
14/*** Broadcom 2062 NORTH radio registers ***/
15#define B2062_N_COMM1 B43_LP_NORTH(0x000) /* Common 01 (north) */
16#define B2062_N_COMM2 B43_LP_NORTH(0x002) /* Common 02 (north) */
17#define B2062_N_COMM3 B43_LP_NORTH(0x003) /* Common 03 (north) */
18#define B2062_N_COMM4 B43_LP_NORTH(0x004) /* Common 04 (north) */
19#define B2062_N_COMM5 B43_LP_NORTH(0x005) /* Common 05 (north) */
20#define B2062_N_COMM6 B43_LP_NORTH(0x006) /* Common 06 (north) */
21#define B2062_N_COMM7 B43_LP_NORTH(0x007) /* Common 07 (north) */
22#define B2062_N_COMM8 B43_LP_NORTH(0x008) /* Common 08 (north) */
23#define B2062_N_COMM9 B43_LP_NORTH(0x009) /* Common 09 (north) */
24#define B2062_N_COMM10 B43_LP_NORTH(0x00A) /* Common 10 (north) */
25#define B2062_N_COMM11 B43_LP_NORTH(0x00B) /* Common 11 (north) */
26#define B2062_N_COMM12 B43_LP_NORTH(0x00C) /* Common 12 (north) */
27#define B2062_N_COMM13 B43_LP_NORTH(0x00D) /* Common 13 (north) */
28#define B2062_N_COMM14 B43_LP_NORTH(0x00E) /* Common 14 (north) */
29#define B2062_N_COMM15 B43_LP_NORTH(0x00F) /* Common 15 (north) */
30#define B2062_N_PDN_CTL0 B43_LP_NORTH(0x010) /* PDN Control 0 (north) */
31#define B2062_N_PDN_CTL1 B43_LP_NORTH(0x011) /* PDN Control 1 (north) */
32#define B2062_N_PDN_CTL2 B43_LP_NORTH(0x012) /* PDN Control 2 (north) */
33#define B2062_N_PDN_CTL3 B43_LP_NORTH(0x013) /* PDN Control 3 (north) */
34#define B2062_N_PDN_CTL4 B43_LP_NORTH(0x014) /* PDN Control 4 (north) */
35#define B2062_N_GEN_CTL0 B43_LP_NORTH(0x015) /* GEN Control 0 (north) */
36#define B2062_N_IQ_CALIB B43_LP_NORTH(0x016) /* IQ Calibration (north) */
37#define B2062_N_LGENC B43_LP_NORTH(0x017) /* LGENC (north) */
38#define B2062_N_LGENA_LPF B43_LP_NORTH(0x018) /* LGENA LPF (north) */
39#define B2062_N_LGENA_BIAS0 B43_LP_NORTH(0x019) /* LGENA Bias 0 (north) */
40#define B2062_N_LGNEA_BIAS1 B43_LP_NORTH(0x01A) /* LGNEA Bias 1 (north) */
41#define B2062_N_LGENA_CTL0 B43_LP_NORTH(0x01B) /* LGENA Control 0 (north) */
42#define B2062_N_LGENA_CTL1 B43_LP_NORTH(0x01C) /* LGENA Control 1 (north) */
43#define B2062_N_LGENA_CTL2 B43_LP_NORTH(0x01D) /* LGENA Control 2 (north) */
44#define B2062_N_LGENA_TUNE0 B43_LP_NORTH(0x01E) /* LGENA Tune 0 (north) */
45#define B2062_N_LGENA_TUNE1 B43_LP_NORTH(0x01F) /* LGENA Tune 1 (north) */
46#define B2062_N_LGENA_TUNE2 B43_LP_NORTH(0x020) /* LGENA Tune 2 (north) */
47#define B2062_N_LGENA_TUNE3 B43_LP_NORTH(0x021) /* LGENA Tune 3 (north) */
48#define B2062_N_LGENA_CTL3 B43_LP_NORTH(0x022) /* LGENA Control 3 (north) */
49#define B2062_N_LGENA_CTL4 B43_LP_NORTH(0x023) /* LGENA Control 4 (north) */
50#define B2062_N_LGENA_CTL5 B43_LP_NORTH(0x024) /* LGENA Control 5 (north) */
51#define B2062_N_LGENA_CTL6 B43_LP_NORTH(0x025) /* LGENA Control 6 (north) */
52#define B2062_N_LGENA_CTL7 B43_LP_NORTH(0x026) /* LGENA Control 7 (north) */
53#define B2062_N_RXA_CTL0 B43_LP_NORTH(0x027) /* RXA Control 0 (north) */
54#define B2062_N_RXA_CTL1 B43_LP_NORTH(0x028) /* RXA Control 1 (north) */
55#define B2062_N_RXA_CTL2 B43_LP_NORTH(0x029) /* RXA Control 2 (north) */
56#define B2062_N_RXA_CTL3 B43_LP_NORTH(0x02A) /* RXA Control 3 (north) */
57#define B2062_N_RXA_CTL4 B43_LP_NORTH(0x02B) /* RXA Control 4 (north) */
58#define B2062_N_RXA_CTL5 B43_LP_NORTH(0x02C) /* RXA Control 5 (north) */
59#define B2062_N_RXA_CTL6 B43_LP_NORTH(0x02D) /* RXA Control 6 (north) */
60#define B2062_N_RXA_CTL7 B43_LP_NORTH(0x02E) /* RXA Control 7 (north) */
61#define B2062_N_RXBB_CTL0 B43_LP_NORTH(0x02F) /* RXBB Control 0 (north) */
62#define B2062_N_RXBB_CTL1 B43_LP_NORTH(0x030) /* RXBB Control 1 (north) */
63#define B2062_N_RXBB_CTL2 B43_LP_NORTH(0x031) /* RXBB Control 2 (north) */
64#define B2062_N_RXBB_GAIN0 B43_LP_NORTH(0x032) /* RXBB Gain 0 (north) */
65#define B2062_N_RXBB_GAIN1 B43_LP_NORTH(0x033) /* RXBB Gain 1 (north) */
66#define B2062_N_RXBB_GAIN2 B43_LP_NORTH(0x034) /* RXBB Gain 2 (north) */
67#define B2062_N_RXBB_GAIN3 B43_LP_NORTH(0x035) /* RXBB Gain 3 (north) */
68#define B2062_N_RXBB_RSSI0 B43_LP_NORTH(0x036) /* RXBB RSSI 0 (north) */
69#define B2062_N_RXBB_RSSI1 B43_LP_NORTH(0x037) /* RXBB RSSI 1 (north) */
70#define B2062_N_RXBB_CALIB0 B43_LP_NORTH(0x038) /* RXBB Calibration0 (north) */
71#define B2062_N_RXBB_CALIB1 B43_LP_NORTH(0x039) /* RXBB Calibration1 (north) */
72#define B2062_N_RXBB_CALIB2 B43_LP_NORTH(0x03A) /* RXBB Calibration2 (north) */
73#define B2062_N_RXBB_BIAS0 B43_LP_NORTH(0x03B) /* RXBB Bias 0 (north) */
74#define B2062_N_RXBB_BIAS1 B43_LP_NORTH(0x03C) /* RXBB Bias 1 (north) */
75#define B2062_N_RXBB_BIAS2 B43_LP_NORTH(0x03D) /* RXBB Bias 2 (north) */
76#define B2062_N_RXBB_BIAS3 B43_LP_NORTH(0x03E) /* RXBB Bias 3 (north) */
77#define B2062_N_RXBB_BIAS4 B43_LP_NORTH(0x03F) /* RXBB Bias 4 (north) */
78#define B2062_N_RXBB_BIAS5 B43_LP_NORTH(0x040) /* RXBB Bias 5 (north) */
79#define B2062_N_RXBB_RSSI2 B43_LP_NORTH(0x041) /* RXBB RSSI 2 (north) */
80#define B2062_N_RXBB_RSSI3 B43_LP_NORTH(0x042) /* RXBB RSSI 3 (north) */
81#define B2062_N_RXBB_RSSI4 B43_LP_NORTH(0x043) /* RXBB RSSI 4 (north) */
82#define B2062_N_RXBB_RSSI5 B43_LP_NORTH(0x044) /* RXBB RSSI 5 (north) */
83#define B2062_N_TX_CTL0 B43_LP_NORTH(0x045) /* TX Control 0 (north) */
84#define B2062_N_TX_CTL1 B43_LP_NORTH(0x046) /* TX Control 1 (north) */
85#define B2062_N_TX_CTL2 B43_LP_NORTH(0x047) /* TX Control 2 (north) */
86#define B2062_N_TX_CTL3 B43_LP_NORTH(0x048) /* TX Control 3 (north) */
87#define B2062_N_TX_CTL4 B43_LP_NORTH(0x049) /* TX Control 4 (north) */
88#define B2062_N_TX_CTL5 B43_LP_NORTH(0x04A) /* TX Control 5 (north) */
89#define B2062_N_TX_CTL6 B43_LP_NORTH(0x04B) /* TX Control 6 (north) */
90#define B2062_N_TX_CTL7 B43_LP_NORTH(0x04C) /* TX Control 7 (north) */
91#define B2062_N_TX_CTL8 B43_LP_NORTH(0x04D) /* TX Control 8 (north) */
92#define B2062_N_TX_CTL9 B43_LP_NORTH(0x04E) /* TX Control 9 (north) */
93#define B2062_N_TX_CTL_A B43_LP_NORTH(0x04F) /* TX Control A (north) */
94#define B2062_N_TX_GC2G B43_LP_NORTH(0x050) /* TX GC2G (north) */
95#define B2062_N_TX_GC5G B43_LP_NORTH(0x051) /* TX GC5G (north) */
96#define B2062_N_TX_TUNE B43_LP_NORTH(0x052) /* TX Tune (north) */
97#define B2062_N_TX_PAD B43_LP_NORTH(0x053) /* TX PAD (north) */
98#define B2062_N_TX_PGA B43_LP_NORTH(0x054) /* TX PGA (north) */
99#define B2062_N_TX_PADAUX B43_LP_NORTH(0x055) /* TX PADAUX (north) */
100#define B2062_N_TX_PGAAUX B43_LP_NORTH(0x056) /* TX PGAAUX (north) */
101#define B2062_N_TSSI_CTL0 B43_LP_NORTH(0x057) /* TSSI Control 0 (north) */
102#define B2062_N_TSSI_CTL1 B43_LP_NORTH(0x058) /* TSSI Control 1 (north) */
103#define B2062_N_TSSI_CTL2 B43_LP_NORTH(0x059) /* TSSI Control 2 (north) */
104#define B2062_N_IQ_CALIB_CTL0 B43_LP_NORTH(0x05A) /* IQ Calibration Control 0 (north) */
105#define B2062_N_IQ_CALIB_CTL1 B43_LP_NORTH(0x05B) /* IQ Calibration Control 1 (north) */
106#define B2062_N_IQ_CALIB_CTL2 B43_LP_NORTH(0x05C) /* IQ Calibration Control 2 (north) */
107#define B2062_N_CALIB_TS B43_LP_NORTH(0x05D) /* Calibration TS (north) */
108#define B2062_N_CALIB_CTL0 B43_LP_NORTH(0x05E) /* Calibration Control 0 (north) */
109#define B2062_N_CALIB_CTL1 B43_LP_NORTH(0x05F) /* Calibration Control 1 (north) */
110#define B2062_N_CALIB_CTL2 B43_LP_NORTH(0x060) /* Calibration Control 2 (north) */
111#define B2062_N_CALIB_CTL3 B43_LP_NORTH(0x061) /* Calibration Control 3 (north) */
112#define B2062_N_CALIB_CTL4 B43_LP_NORTH(0x062) /* Calibration Control 4 (north) */
113#define B2062_N_CALIB_DBG0 B43_LP_NORTH(0x063) /* Calibration Debug 0 (north) */
114#define B2062_N_CALIB_DBG1 B43_LP_NORTH(0x064) /* Calibration Debug 1 (north) */
115#define B2062_N_CALIB_DBG2 B43_LP_NORTH(0x065) /* Calibration Debug 2 (north) */
116#define B2062_N_CALIB_DBG3 B43_LP_NORTH(0x066) /* Calibration Debug 3 (north) */
117#define B2062_N_PSENSE_CTL0 B43_LP_NORTH(0x069) /* PSENSE Control 0 (north) */
118#define B2062_N_PSENSE_CTL1 B43_LP_NORTH(0x06A) /* PSENSE Control 1 (north) */
119#define B2062_N_PSENSE_CTL2 B43_LP_NORTH(0x06B) /* PSENSE Control 2 (north) */
120#define B2062_N_TEST_BUF0 B43_LP_NORTH(0x06C) /* TEST BUF0 (north) */
121
122/*** Broadcom 2062 SOUTH radio registers ***/
123#define B2062_S_COMM1 B43_LP_SOUTH(0x000) /* Common 01 (south) */
124#define B2062_S_RADIO_ID_CODE B43_LP_SOUTH(0x001) /* Radio ID code (south) */
125#define B2062_S_COMM2 B43_LP_SOUTH(0x002) /* Common 02 (south) */
126#define B2062_S_COMM3 B43_LP_SOUTH(0x003) /* Common 03 (south) */
127#define B2062_S_COMM4 B43_LP_SOUTH(0x004) /* Common 04 (south) */
128#define B2062_S_COMM5 B43_LP_SOUTH(0x005) /* Common 05 (south) */
129#define B2062_S_COMM6 B43_LP_SOUTH(0x006) /* Common 06 (south) */
130#define B2062_S_COMM7 B43_LP_SOUTH(0x007) /* Common 07 (south) */
131#define B2062_S_COMM8 B43_LP_SOUTH(0x008) /* Common 08 (south) */
132#define B2062_S_COMM9 B43_LP_SOUTH(0x009) /* Common 09 (south) */
133#define B2062_S_COMM10 B43_LP_SOUTH(0x00A) /* Common 10 (south) */
134#define B2062_S_COMM11 B43_LP_SOUTH(0x00B) /* Common 11 (south) */
135#define B2062_S_COMM12 B43_LP_SOUTH(0x00C) /* Common 12 (south) */
136#define B2062_S_COMM13 B43_LP_SOUTH(0x00D) /* Common 13 (south) */
137#define B2062_S_COMM14 B43_LP_SOUTH(0x00E) /* Common 14 (south) */
138#define B2062_S_COMM15 B43_LP_SOUTH(0x00F) /* Common 15 (south) */
139#define B2062_S_PDS_CTL0 B43_LP_SOUTH(0x010) /* PDS Control 0 (south) */
140#define B2062_S_PDS_CTL1 B43_LP_SOUTH(0x011) /* PDS Control 1 (south) */
141#define B2062_S_PDS_CTL2 B43_LP_SOUTH(0x012) /* PDS Control 2 (south) */
142#define B2062_S_PDS_CTL3 B43_LP_SOUTH(0x013) /* PDS Control 3 (south) */
143#define B2062_S_BG_CTL0 B43_LP_SOUTH(0x014) /* BG Control 0 (south) */
144#define B2062_S_BG_CTL1 B43_LP_SOUTH(0x015) /* BG Control 1 (south) */
145#define B2062_S_BG_CTL2 B43_LP_SOUTH(0x016) /* BG Control 2 (south) */
146#define B2062_S_LGENG_CTL0 B43_LP_SOUTH(0x017) /* LGENG Control 00 (south) */
147#define B2062_S_LGENG_CTL1 B43_LP_SOUTH(0x018) /* LGENG Control 01 (south) */
148#define B2062_S_LGENG_CTL2 B43_LP_SOUTH(0x019) /* LGENG Control 02 (south) */
149#define B2062_S_LGENG_CTL3 B43_LP_SOUTH(0x01A) /* LGENG Control 03 (south) */
150#define B2062_S_LGENG_CTL4 B43_LP_SOUTH(0x01B) /* LGENG Control 04 (south) */
151#define B2062_S_LGENG_CTL5 B43_LP_SOUTH(0x01C) /* LGENG Control 05 (south) */
152#define B2062_S_LGENG_CTL6 B43_LP_SOUTH(0x01D) /* LGENG Control 06 (south) */
153#define B2062_S_LGENG_CTL7 B43_LP_SOUTH(0x01E) /* LGENG Control 07 (south) */
154#define B2062_S_LGENG_CTL8 B43_LP_SOUTH(0x01F) /* LGENG Control 08 (south) */
155#define B2062_S_LGENG_CTL9 B43_LP_SOUTH(0x020) /* LGENG Control 09 (south) */
156#define B2062_S_LGENG_CTL10 B43_LP_SOUTH(0x021) /* LGENG Control 10 (south) */
157#define B2062_S_LGENG_CTL11 B43_LP_SOUTH(0x022) /* LGENG Control 11 (south) */
158#define B2062_S_REFPLL_CTL0 B43_LP_SOUTH(0x023) /* REFPLL Control 00 (south) */
159#define B2062_S_REFPLL_CTL1 B43_LP_SOUTH(0x024) /* REFPLL Control 01 (south) */
160#define B2062_S_REFPLL_CTL2 B43_LP_SOUTH(0x025) /* REFPLL Control 02 (south) */
161#define B2062_S_REFPLL_CTL3 B43_LP_SOUTH(0x026) /* REFPLL Control 03 (south) */
162#define B2062_S_REFPLL_CTL4 B43_LP_SOUTH(0x027) /* REFPLL Control 04 (south) */
163#define B2062_S_REFPLL_CTL5 B43_LP_SOUTH(0x028) /* REFPLL Control 05 (south) */
164#define B2062_S_REFPLL_CTL6 B43_LP_SOUTH(0x029) /* REFPLL Control 06 (south) */
165#define B2062_S_REFPLL_CTL7 B43_LP_SOUTH(0x02A) /* REFPLL Control 07 (south) */
166#define B2062_S_REFPLL_CTL8 B43_LP_SOUTH(0x02B) /* REFPLL Control 08 (south) */
167#define B2062_S_REFPLL_CTL9 B43_LP_SOUTH(0x02C) /* REFPLL Control 09 (south) */
168#define B2062_S_REFPLL_CTL10 B43_LP_SOUTH(0x02D) /* REFPLL Control 10 (south) */
169#define B2062_S_REFPLL_CTL11 B43_LP_SOUTH(0x02E) /* REFPLL Control 11 (south) */
170#define B2062_S_REFPLL_CTL12 B43_LP_SOUTH(0x02F) /* REFPLL Control 12 (south) */
171#define B2062_S_REFPLL_CTL13 B43_LP_SOUTH(0x030) /* REFPLL Control 13 (south) */
172#define B2062_S_REFPLL_CTL14 B43_LP_SOUTH(0x031) /* REFPLL Control 14 (south) */
173#define B2062_S_REFPLL_CTL15 B43_LP_SOUTH(0x032) /* REFPLL Control 15 (south) */
174#define B2062_S_REFPLL_CTL16 B43_LP_SOUTH(0x033) /* REFPLL Control 16 (south) */
175#define B2062_S_RFPLL_CTL0 B43_LP_SOUTH(0x034) /* RFPLL Control 00 (south) */
176#define B2062_S_RFPLL_CTL1 B43_LP_SOUTH(0x035) /* RFPLL Control 01 (south) */
177#define B2062_S_RFPLL_CTL2 B43_LP_SOUTH(0x036) /* RFPLL Control 02 (south) */
178#define B2062_S_RFPLL_CTL3 B43_LP_SOUTH(0x037) /* RFPLL Control 03 (south) */
179#define B2062_S_RFPLL_CTL4 B43_LP_SOUTH(0x038) /* RFPLL Control 04 (south) */
180#define B2062_S_RFPLL_CTL5 B43_LP_SOUTH(0x039) /* RFPLL Control 05 (south) */
181#define B2062_S_RFPLL_CTL6 B43_LP_SOUTH(0x03A) /* RFPLL Control 06 (south) */
182#define B2062_S_RFPLL_CTL7 B43_LP_SOUTH(0x03B) /* RFPLL Control 07 (south) */
183#define B2062_S_RFPLL_CTL8 B43_LP_SOUTH(0x03C) /* RFPLL Control 08 (south) */
184#define B2062_S_RFPLL_CTL9 B43_LP_SOUTH(0x03D) /* RFPLL Control 09 (south) */
185#define B2062_S_RFPLL_CTL10 B43_LP_SOUTH(0x03E) /* RFPLL Control 10 (south) */
186#define B2062_S_RFPLL_CTL11 B43_LP_SOUTH(0x03F) /* RFPLL Control 11 (south) */
187#define B2062_S_RFPLL_CTL12 B43_LP_SOUTH(0x040) /* RFPLL Control 12 (south) */
188#define B2062_S_RFPLL_CTL13 B43_LP_SOUTH(0x041) /* RFPLL Control 13 (south) */
189#define B2062_S_RFPLL_CTL14 B43_LP_SOUTH(0x042) /* RFPLL Control 14 (south) */
190#define B2062_S_RFPLL_CTL15 B43_LP_SOUTH(0x043) /* RFPLL Control 15 (south) */
191#define B2062_S_RFPLL_CTL16 B43_LP_SOUTH(0x044) /* RFPLL Control 16 (south) */
192#define B2062_S_RFPLL_CTL17 B43_LP_SOUTH(0x045) /* RFPLL Control 17 (south) */
193#define B2062_S_RFPLL_CTL18 B43_LP_SOUTH(0x046) /* RFPLL Control 18 (south) */
194#define B2062_S_RFPLL_CTL19 B43_LP_SOUTH(0x047) /* RFPLL Control 19 (south) */
195#define B2062_S_RFPLL_CTL20 B43_LP_SOUTH(0x048) /* RFPLL Control 20 (south) */
196#define B2062_S_RFPLL_CTL21 B43_LP_SOUTH(0x049) /* RFPLL Control 21 (south) */
197#define B2062_S_RFPLL_CTL22 B43_LP_SOUTH(0x04A) /* RFPLL Control 22 (south) */
198#define B2062_S_RFPLL_CTL23 B43_LP_SOUTH(0x04B) /* RFPLL Control 23 (south) */
199#define B2062_S_RFPLL_CTL24 B43_LP_SOUTH(0x04C) /* RFPLL Control 24 (south) */
200#define B2062_S_RFPLL_CTL25 B43_LP_SOUTH(0x04D) /* RFPLL Control 25 (south) */
201#define B2062_S_RFPLL_CTL26 B43_LP_SOUTH(0x04E) /* RFPLL Control 26 (south) */
202#define B2062_S_RFPLL_CTL27 B43_LP_SOUTH(0x04F) /* RFPLL Control 27 (south) */
203#define B2062_S_RFPLL_CTL28 B43_LP_SOUTH(0x050) /* RFPLL Control 28 (south) */
204#define B2062_S_RFPLL_CTL29 B43_LP_SOUTH(0x051) /* RFPLL Control 29 (south) */
205#define B2062_S_RFPLL_CTL30 B43_LP_SOUTH(0x052) /* RFPLL Control 30 (south) */
206#define B2062_S_RFPLL_CTL31 B43_LP_SOUTH(0x053) /* RFPLL Control 31 (south) */
207#define B2062_S_RFPLL_CTL32 B43_LP_SOUTH(0x054) /* RFPLL Control 32 (south) */
208#define B2062_S_RFPLL_CTL33 B43_LP_SOUTH(0x055) /* RFPLL Control 33 (south) */
209#define B2062_S_RFPLL_CTL34 B43_LP_SOUTH(0x056) /* RFPLL Control 34 (south) */
210#define B2062_S_RXG_CNT0 B43_LP_SOUTH(0x057) /* RXG Counter 00 (south) */
211#define B2062_S_RXG_CNT1 B43_LP_SOUTH(0x058) /* RXG Counter 01 (south) */
212#define B2062_S_RXG_CNT2 B43_LP_SOUTH(0x059) /* RXG Counter 02 (south) */
213#define B2062_S_RXG_CNT3 B43_LP_SOUTH(0x05A) /* RXG Counter 03 (south) */
214#define B2062_S_RXG_CNT4 B43_LP_SOUTH(0x05B) /* RXG Counter 04 (south) */
215#define B2062_S_RXG_CNT5 B43_LP_SOUTH(0x05C) /* RXG Counter 05 (south) */
216#define B2062_S_RXG_CNT6 B43_LP_SOUTH(0x05D) /* RXG Counter 06 (south) */
217#define B2062_S_RXG_CNT7 B43_LP_SOUTH(0x05E) /* RXG Counter 07 (south) */
218#define B2062_S_RXG_CNT8 B43_LP_SOUTH(0x05F) /* RXG Counter 08 (south) */
219#define B2062_S_RXG_CNT9 B43_LP_SOUTH(0x060) /* RXG Counter 09 (south) */
220#define B2062_S_RXG_CNT10 B43_LP_SOUTH(0x061) /* RXG Counter 10 (south) */
221#define B2062_S_RXG_CNT11 B43_LP_SOUTH(0x062) /* RXG Counter 11 (south) */
222#define B2062_S_RXG_CNT12 B43_LP_SOUTH(0x063) /* RXG Counter 12 (south) */
223#define B2062_S_RXG_CNT13 B43_LP_SOUTH(0x064) /* RXG Counter 13 (south) */
224#define B2062_S_RXG_CNT14 B43_LP_SOUTH(0x065) /* RXG Counter 14 (south) */
225#define B2062_S_RXG_CNT15 B43_LP_SOUTH(0x066) /* RXG Counter 15 (south) */
226#define B2062_S_RXG_CNT16 B43_LP_SOUTH(0x067) /* RXG Counter 16 (south) */
227#define B2062_S_RXG_CNT17 B43_LP_SOUTH(0x068) /* RXG Counter 17 (south) */
228
229
230
231/*** Broadcom 2063 radio registers ***/
232#define B2063_RADIO_ID_CODE B43_LP_RADIO(0x001) /* Radio ID code */
233#define B2063_COMM1 B43_LP_RADIO(0x000) /* Common 01 */
234#define B2063_COMM2 B43_LP_RADIO(0x002) /* Common 02 */
235#define B2063_COMM3 B43_LP_RADIO(0x003) /* Common 03 */
236#define B2063_COMM4 B43_LP_RADIO(0x004) /* Common 04 */
237#define B2063_COMM5 B43_LP_RADIO(0x005) /* Common 05 */
238#define B2063_COMM6 B43_LP_RADIO(0x006) /* Common 06 */
239#define B2063_COMM7 B43_LP_RADIO(0x007) /* Common 07 */
240#define B2063_COMM8 B43_LP_RADIO(0x008) /* Common 08 */
241#define B2063_COMM9 B43_LP_RADIO(0x009) /* Common 09 */
242#define B2063_COMM10 B43_LP_RADIO(0x00A) /* Common 10 */
243#define B2063_COMM11 B43_LP_RADIO(0x00B) /* Common 11 */
244#define B2063_COMM12 B43_LP_RADIO(0x00C) /* Common 12 */
245#define B2063_COMM13 B43_LP_RADIO(0x00D) /* Common 13 */
246#define B2063_COMM14 B43_LP_RADIO(0x00E) /* Common 14 */
247#define B2063_COMM15 B43_LP_RADIO(0x00F) /* Common 15 */
248#define B2063_COMM16 B43_LP_RADIO(0x010) /* Common 16 */
249#define B2063_COMM17 B43_LP_RADIO(0x011) /* Common 17 */
250#define B2063_COMM18 B43_LP_RADIO(0x012) /* Common 18 */
251#define B2063_COMM19 B43_LP_RADIO(0x013) /* Common 19 */
252#define B2063_COMM20 B43_LP_RADIO(0x014) /* Common 20 */
253#define B2063_COMM21 B43_LP_RADIO(0x015) /* Common 21 */
254#define B2063_COMM22 B43_LP_RADIO(0x016) /* Common 22 */
255#define B2063_COMM23 B43_LP_RADIO(0x017) /* Common 23 */
256#define B2063_COMM24 B43_LP_RADIO(0x018) /* Common 24 */
257#define B2063_PWR_SWITCH_CTL B43_LP_RADIO(0x019) /* POWER SWITCH Control */
258#define B2063_PLL_SP1 B43_LP_RADIO(0x01A) /* PLL SP 1 */
259#define B2063_PLL_SP2 B43_LP_RADIO(0x01B) /* PLL SP 2 */
260#define B2063_LOGEN_SP1 B43_LP_RADIO(0x01C) /* LOGEN SP 1 */
261#define B2063_LOGEN_SP2 B43_LP_RADIO(0x01D) /* LOGEN SP 2 */
262#define B2063_LOGEN_SP3 B43_LP_RADIO(0x01E) /* LOGEN SP 3 */
263#define B2063_LOGEN_SP4 B43_LP_RADIO(0x01F) /* LOGEN SP 4 */
264#define B2063_LOGEN_SP5 B43_LP_RADIO(0x020) /* LOGEN SP 5 */
265#define B2063_G_RX_SP1 B43_LP_RADIO(0x021) /* G RX SP 1 */
266#define B2063_G_RX_SP2 B43_LP_RADIO(0x022) /* G RX SP 2 */
267#define B2063_G_RX_SP3 B43_LP_RADIO(0x023) /* G RX SP 3 */
268#define B2063_G_RX_SP4 B43_LP_RADIO(0x024) /* G RX SP 4 */
269#define B2063_G_RX_SP5 B43_LP_RADIO(0x025) /* G RX SP 5 */
270#define B2063_G_RX_SP6 B43_LP_RADIO(0x026) /* G RX SP 6 */
271#define B2063_G_RX_SP7 B43_LP_RADIO(0x027) /* G RX SP 7 */
272#define B2063_G_RX_SP8 B43_LP_RADIO(0x028) /* G RX SP 8 */
273#define B2063_G_RX_SP9 B43_LP_RADIO(0x029) /* G RX SP 9 */
274#define B2063_G_RX_SP10 B43_LP_RADIO(0x02A) /* G RX SP 10 */
275#define B2063_G_RX_SP11 B43_LP_RADIO(0x02B) /* G RX SP 11 */
276#define B2063_A_RX_SP1 B43_LP_RADIO(0x02C) /* A RX SP 1 */
277#define B2063_A_RX_SP2 B43_LP_RADIO(0x02D) /* A RX SP 2 */
278#define B2063_A_RX_SP3 B43_LP_RADIO(0x02E) /* A RX SP 3 */
279#define B2063_A_RX_SP4 B43_LP_RADIO(0x02F) /* A RX SP 4 */
280#define B2063_A_RX_SP5 B43_LP_RADIO(0x030) /* A RX SP 5 */
281#define B2063_A_RX_SP6 B43_LP_RADIO(0x031) /* A RX SP 6 */
282#define B2063_A_RX_SP7 B43_LP_RADIO(0x032) /* A RX SP 7 */
283#define B2063_RX_BB_SP1 B43_LP_RADIO(0x033) /* RX BB SP 1 */
284#define B2063_RX_BB_SP2 B43_LP_RADIO(0x034) /* RX BB SP 2 */
285#define B2063_RX_BB_SP3 B43_LP_RADIO(0x035) /* RX BB SP 3 */
286#define B2063_RX_BB_SP4 B43_LP_RADIO(0x036) /* RX BB SP 4 */
287#define B2063_RX_BB_SP5 B43_LP_RADIO(0x037) /* RX BB SP 5 */
288#define B2063_RX_BB_SP6 B43_LP_RADIO(0x038) /* RX BB SP 6 */
289#define B2063_RX_BB_SP7 B43_LP_RADIO(0x039) /* RX BB SP 7 */
290#define B2063_RX_BB_SP8 B43_LP_RADIO(0x03A) /* RX BB SP 8 */
291#define B2063_TX_RF_SP1 B43_LP_RADIO(0x03B) /* TX RF SP 1 */
292#define B2063_TX_RF_SP2 B43_LP_RADIO(0x03C) /* TX RF SP 2 */
293#define B2063_TX_RF_SP3 B43_LP_RADIO(0x03D) /* TX RF SP 3 */
294#define B2063_TX_RF_SP4 B43_LP_RADIO(0x03E) /* TX RF SP 4 */
295#define B2063_TX_RF_SP5 B43_LP_RADIO(0x03F) /* TX RF SP 5 */
296#define B2063_TX_RF_SP6 B43_LP_RADIO(0x040) /* TX RF SP 6 */
297#define B2063_TX_RF_SP7 B43_LP_RADIO(0x041) /* TX RF SP 7 */
298#define B2063_TX_RF_SP8 B43_LP_RADIO(0x042) /* TX RF SP 8 */
299#define B2063_TX_RF_SP9 B43_LP_RADIO(0x043) /* TX RF SP 9 */
300#define B2063_TX_RF_SP10 B43_LP_RADIO(0x044) /* TX RF SP 10 */
301#define B2063_TX_RF_SP11 B43_LP_RADIO(0x045) /* TX RF SP 11 */
302#define B2063_TX_RF_SP12 B43_LP_RADIO(0x046) /* TX RF SP 12 */
303#define B2063_TX_RF_SP13 B43_LP_RADIO(0x047) /* TX RF SP 13 */
304#define B2063_TX_RF_SP14 B43_LP_RADIO(0x048) /* TX RF SP 14 */
305#define B2063_TX_RF_SP15 B43_LP_RADIO(0x049) /* TX RF SP 15 */
306#define B2063_TX_RF_SP16 B43_LP_RADIO(0x04A) /* TX RF SP 16 */
307#define B2063_TX_RF_SP17 B43_LP_RADIO(0x04B) /* TX RF SP 17 */
308#define B2063_PA_SP1 B43_LP_RADIO(0x04C) /* PA SP 1 */
309#define B2063_PA_SP2 B43_LP_RADIO(0x04D) /* PA SP 2 */
310#define B2063_PA_SP3 B43_LP_RADIO(0x04E) /* PA SP 3 */
311#define B2063_PA_SP4 B43_LP_RADIO(0x04F) /* PA SP 4 */
312#define B2063_PA_SP5 B43_LP_RADIO(0x050) /* PA SP 5 */
313#define B2063_PA_SP6 B43_LP_RADIO(0x051) /* PA SP 6 */
314#define B2063_PA_SP7 B43_LP_RADIO(0x052) /* PA SP 7 */
315#define B2063_TX_BB_SP1 B43_LP_RADIO(0x053) /* TX BB SP 1 */
316#define B2063_TX_BB_SP2 B43_LP_RADIO(0x054) /* TX BB SP 2 */
317#define B2063_TX_BB_SP3 B43_LP_RADIO(0x055) /* TX BB SP 3 */
318#define B2063_REG_SP1 B43_LP_RADIO(0x056) /* REG SP 1 */
319#define B2063_BANDGAP_CTL1 B43_LP_RADIO(0x057) /* BANDGAP Control 1 */
320#define B2063_BANDGAP_CTL2 B43_LP_RADIO(0x058) /* BANDGAP Control 2 */
321#define B2063_LPO_CTL1 B43_LP_RADIO(0x059) /* LPO Control 1 */
322#define B2063_RC_CALIB_CTL1 B43_LP_RADIO(0x05A) /* RC Calibration Control 1 */
323#define B2063_RC_CALIB_CTL2 B43_LP_RADIO(0x05B) /* RC Calibration Control 2 */
324#define B2063_RC_CALIB_CTL3 B43_LP_RADIO(0x05C) /* RC Calibration Control 3 */
325#define B2063_RC_CALIB_CTL4 B43_LP_RADIO(0x05D) /* RC Calibration Control 4 */
326#define B2063_RC_CALIB_CTL5 B43_LP_RADIO(0x05E) /* RC Calibration Control 5 */
327#define B2063_RC_CALIB_CTL6 B43_LP_RADIO(0x05F) /* RC Calibration Control 6 */
328#define B2063_RC_CALIB_CTL7 B43_LP_RADIO(0x060) /* RC Calibration Control 7 */
329#define B2063_RC_CALIB_CTL8 B43_LP_RADIO(0x061) /* RC Calibration Control 8 */
330#define B2063_RC_CALIB_CTL9 B43_LP_RADIO(0x062) /* RC Calibration Control 9 */
331#define B2063_RC_CALIB_CTL10 B43_LP_RADIO(0x063) /* RC Calibration Control 10 */
332#define B2063_PLL_JTAG_CALNRST B43_LP_RADIO(0x064) /* PLL JTAG CALNRST */
333#define B2063_PLL_JTAG_IN_PLL1 B43_LP_RADIO(0x065) /* PLL JTAG IN PLL 1 */
334#define B2063_PLL_JTAG_IN_PLL2 B43_LP_RADIO(0x066) /* PLL JTAG IN PLL 2 */
335#define B2063_PLL_JTAG_PLL_CP1 B43_LP_RADIO(0x067) /* PLL JTAG PLL CP 1 */
336#define B2063_PLL_JTAG_PLL_CP2 B43_LP_RADIO(0x068) /* PLL JTAG PLL CP 2 */
337#define B2063_PLL_JTAG_PLL_CP3 B43_LP_RADIO(0x069) /* PLL JTAG PLL CP 3 */
338#define B2063_PLL_JTAG_PLL_CP4 B43_LP_RADIO(0x06A) /* PLL JTAG PLL CP 4 */
339#define B2063_PLL_JTAG_PLL_CTL1 B43_LP_RADIO(0x06B) /* PLL JTAG PLL Control 1 */
340#define B2063_PLL_JTAG_PLL_LF1 B43_LP_RADIO(0x06C) /* PLL JTAG PLL LF 1 */
341#define B2063_PLL_JTAG_PLL_LF2 B43_LP_RADIO(0x06D) /* PLL JTAG PLL LF 2 */
342#define B2063_PLL_JTAG_PLL_LF3 B43_LP_RADIO(0x06E) /* PLL JTAG PLL LF 3 */
343#define B2063_PLL_JTAG_PLL_LF4 B43_LP_RADIO(0x06F) /* PLL JTAG PLL LF 4 */
344#define B2063_PLL_JTAG_PLL_SG1 B43_LP_RADIO(0x070) /* PLL JTAG PLL SG 1 */
345#define B2063_PLL_JTAG_PLL_SG2 B43_LP_RADIO(0x071) /* PLL JTAG PLL SG 2 */
346#define B2063_PLL_JTAG_PLL_SG3 B43_LP_RADIO(0x072) /* PLL JTAG PLL SG 3 */
347#define B2063_PLL_JTAG_PLL_SG4 B43_LP_RADIO(0x073) /* PLL JTAG PLL SG 4 */
348#define B2063_PLL_JTAG_PLL_SG5 B43_LP_RADIO(0x074) /* PLL JTAG PLL SG 5 */
349#define B2063_PLL_JTAG_PLL_VCO1 B43_LP_RADIO(0x075) /* PLL JTAG PLL VCO 1 */
350#define B2063_PLL_JTAG_PLL_VCO2 B43_LP_RADIO(0x076) /* PLL JTAG PLL VCO 2 */
351#define B2063_PLL_JTAG_PLL_VCO_CALIB1 B43_LP_RADIO(0x077) /* PLL JTAG PLL VCO Calibration 1 */
352#define B2063_PLL_JTAG_PLL_VCO_CALIB2 B43_LP_RADIO(0x078) /* PLL JTAG PLL VCO Calibration 2 */
353#define B2063_PLL_JTAG_PLL_VCO_CALIB3 B43_LP_RADIO(0x079) /* PLL JTAG PLL VCO Calibration 3 */
354#define B2063_PLL_JTAG_PLL_VCO_CALIB4 B43_LP_RADIO(0x07A) /* PLL JTAG PLL VCO Calibration 4 */
355#define B2063_PLL_JTAG_PLL_VCO_CALIB5 B43_LP_RADIO(0x07B) /* PLL JTAG PLL VCO Calibration 5 */
356#define B2063_PLL_JTAG_PLL_VCO_CALIB6 B43_LP_RADIO(0x07C) /* PLL JTAG PLL VCO Calibration 6 */
357#define B2063_PLL_JTAG_PLL_VCO_CALIB7 B43_LP_RADIO(0x07D) /* PLL JTAG PLL VCO Calibration 7 */
358#define B2063_PLL_JTAG_PLL_VCO_CALIB8 B43_LP_RADIO(0x07E) /* PLL JTAG PLL VCO Calibration 8 */
359#define B2063_PLL_JTAG_PLL_VCO_CALIB9 B43_LP_RADIO(0x07F) /* PLL JTAG PLL VCO Calibration 9 */
360#define B2063_PLL_JTAG_PLL_VCO_CALIB10 B43_LP_RADIO(0x080) /* PLL JTAG PLL VCO Calibration 10 */
361#define B2063_PLL_JTAG_PLL_XTAL_12 B43_LP_RADIO(0x081) /* PLL JTAG PLL XTAL 1 2 */
362#define B2063_PLL_JTAG_PLL_XTAL3 B43_LP_RADIO(0x082) /* PLL JTAG PLL XTAL 3 */
363#define B2063_LOGEN_ACL1 B43_LP_RADIO(0x083) /* LOGEN ACL 1 */
364#define B2063_LOGEN_ACL2 B43_LP_RADIO(0x084) /* LOGEN ACL 2 */
365#define B2063_LOGEN_ACL3 B43_LP_RADIO(0x085) /* LOGEN ACL 3 */
366#define B2063_LOGEN_ACL4 B43_LP_RADIO(0x086) /* LOGEN ACL 4 */
367#define B2063_LOGEN_ACL5 B43_LP_RADIO(0x087) /* LOGEN ACL 5 */
368#define B2063_LO_CALIB_INPUTS B43_LP_RADIO(0x088) /* LO Calibration INPUTS */
369#define B2063_LO_CALIB_CTL1 B43_LP_RADIO(0x089) /* LO Calibration Control 1 */
370#define B2063_LO_CALIB_CTL2 B43_LP_RADIO(0x08A) /* LO Calibration Control 2 */
371#define B2063_LO_CALIB_CTL3 B43_LP_RADIO(0x08B) /* LO Calibration Control 3 */
372#define B2063_LO_CALIB_WAITCNT B43_LP_RADIO(0x08C) /* LO Calibration WAITCNT */
373#define B2063_LO_CALIB_OVR1 B43_LP_RADIO(0x08D) /* LO Calibration OVR 1 */
374#define B2063_LO_CALIB_OVR2 B43_LP_RADIO(0x08E) /* LO Calibration OVR 2 */
375#define B2063_LO_CALIB_OVAL1 B43_LP_RADIO(0x08F) /* LO Calibration OVAL 1 */
376#define B2063_LO_CALIB_OVAL2 B43_LP_RADIO(0x090) /* LO Calibration OVAL 2 */
377#define B2063_LO_CALIB_OVAL3 B43_LP_RADIO(0x091) /* LO Calibration OVAL 3 */
378#define B2063_LO_CALIB_OVAL4 B43_LP_RADIO(0x092) /* LO Calibration OVAL 4 */
379#define B2063_LO_CALIB_OVAL5 B43_LP_RADIO(0x093) /* LO Calibration OVAL 5 */
380#define B2063_LO_CALIB_OVAL6 B43_LP_RADIO(0x094) /* LO Calibration OVAL 6 */
381#define B2063_LO_CALIB_OVAL7 B43_LP_RADIO(0x095) /* LO Calibration OVAL 7 */
382#define B2063_LO_CALIB_CALVLD1 B43_LP_RADIO(0x096) /* LO Calibration CALVLD 1 */
383#define B2063_LO_CALIB_CALVLD2 B43_LP_RADIO(0x097) /* LO Calibration CALVLD 2 */
384#define B2063_LO_CALIB_CVAL1 B43_LP_RADIO(0x098) /* LO Calibration CVAL 1 */
385#define B2063_LO_CALIB_CVAL2 B43_LP_RADIO(0x099) /* LO Calibration CVAL 2 */
386#define B2063_LO_CALIB_CVAL3 B43_LP_RADIO(0x09A) /* LO Calibration CVAL 3 */
387#define B2063_LO_CALIB_CVAL4 B43_LP_RADIO(0x09B) /* LO Calibration CVAL 4 */
388#define B2063_LO_CALIB_CVAL5 B43_LP_RADIO(0x09C) /* LO Calibration CVAL 5 */
389#define B2063_LO_CALIB_CVAL6 B43_LP_RADIO(0x09D) /* LO Calibration CVAL 6 */
390#define B2063_LO_CALIB_CVAL7 B43_LP_RADIO(0x09E) /* LO Calibration CVAL 7 */
391#define B2063_LOGEN_CALIB_EN B43_LP_RADIO(0x09F) /* LOGEN Calibration EN */
392#define B2063_LOGEN_PEAKDET1 B43_LP_RADIO(0x0A0) /* LOGEN PEAKDET 1 */
393#define B2063_LOGEN_RCCR1 B43_LP_RADIO(0x0A1) /* LOGEN RCCR 1 */
394#define B2063_LOGEN_VCOBUF1 B43_LP_RADIO(0x0A2) /* LOGEN VCOBUF 1 */
395#define B2063_LOGEN_MIXER1 B43_LP_RADIO(0x0A3) /* LOGEN MIXER 1 */
396#define B2063_LOGEN_MIXER2 B43_LP_RADIO(0x0A4) /* LOGEN MIXER 2 */
397#define B2063_LOGEN_BUF1 B43_LP_RADIO(0x0A5) /* LOGEN BUF 1 */
398#define B2063_LOGEN_BUF2 B43_LP_RADIO(0x0A6) /* LOGEN BUF 2 */
399#define B2063_LOGEN_DIV1 B43_LP_RADIO(0x0A7) /* LOGEN DIV 1 */
400#define B2063_LOGEN_DIV2 B43_LP_RADIO(0x0A8) /* LOGEN DIV 2 */
401#define B2063_LOGEN_DIV3 B43_LP_RADIO(0x0A9) /* LOGEN DIV 3 */
402#define B2063_LOGEN_CBUFRX1 B43_LP_RADIO(0x0AA) /* LOGEN CBUFRX 1 */
403#define B2063_LOGEN_CBUFRX2 B43_LP_RADIO(0x0AB) /* LOGEN CBUFRX 2 */
404#define B2063_LOGEN_CBUFTX1 B43_LP_RADIO(0x0AC) /* LOGEN CBUFTX 1 */
405#define B2063_LOGEN_CBUFTX2 B43_LP_RADIO(0x0AD) /* LOGEN CBUFTX 2 */
406#define B2063_LOGEN_IDAC1 B43_LP_RADIO(0x0AE) /* LOGEN IDAC 1 */
407#define B2063_LOGEN_SPARE1 B43_LP_RADIO(0x0AF) /* LOGEN SPARE 1 */
408#define B2063_LOGEN_SPARE2 B43_LP_RADIO(0x0B0) /* LOGEN SPARE 2 */
409#define B2063_LOGEN_SPARE3 B43_LP_RADIO(0x0B1) /* LOGEN SPARE 3 */
410#define B2063_G_RX_1ST1 B43_LP_RADIO(0x0B2) /* G RX 1ST 1 */
411#define B2063_G_RX_1ST2 B43_LP_RADIO(0x0B3) /* G RX 1ST 2 */
412#define B2063_G_RX_1ST3 B43_LP_RADIO(0x0B4) /* G RX 1ST 3 */
413#define B2063_G_RX_2ND1 B43_LP_RADIO(0x0B5) /* G RX 2ND 1 */
414#define B2063_G_RX_2ND2 B43_LP_RADIO(0x0B6) /* G RX 2ND 2 */
415#define B2063_G_RX_2ND3 B43_LP_RADIO(0x0B7) /* G RX 2ND 3 */
416#define B2063_G_RX_2ND4 B43_LP_RADIO(0x0B8) /* G RX 2ND 4 */
417#define B2063_G_RX_2ND5 B43_LP_RADIO(0x0B9) /* G RX 2ND 5 */
418#define B2063_G_RX_2ND6 B43_LP_RADIO(0x0BA) /* G RX 2ND 6 */
419#define B2063_G_RX_2ND7 B43_LP_RADIO(0x0BB) /* G RX 2ND 7 */
420#define B2063_G_RX_2ND8 B43_LP_RADIO(0x0BC) /* G RX 2ND 8 */
421#define B2063_G_RX_PS1 B43_LP_RADIO(0x0BD) /* G RX PS 1 */
422#define B2063_G_RX_PS2 B43_LP_RADIO(0x0BE) /* G RX PS 2 */
423#define B2063_G_RX_PS3 B43_LP_RADIO(0x0BF) /* G RX PS 3 */
424#define B2063_G_RX_PS4 B43_LP_RADIO(0x0C0) /* G RX PS 4 */
425#define B2063_G_RX_PS5 B43_LP_RADIO(0x0C1) /* G RX PS 5 */
426#define B2063_G_RX_MIX1 B43_LP_RADIO(0x0C2) /* G RX MIX 1 */
427#define B2063_G_RX_MIX2 B43_LP_RADIO(0x0C3) /* G RX MIX 2 */
428#define B2063_G_RX_MIX3 B43_LP_RADIO(0x0C4) /* G RX MIX 3 */
429#define B2063_G_RX_MIX4 B43_LP_RADIO(0x0C5) /* G RX MIX 4 */
430#define B2063_G_RX_MIX5 B43_LP_RADIO(0x0C6) /* G RX MIX 5 */
431#define B2063_G_RX_MIX6 B43_LP_RADIO(0x0C7) /* G RX MIX 6 */
432#define B2063_G_RX_MIX7 B43_LP_RADIO(0x0C8) /* G RX MIX 7 */
433#define B2063_G_RX_MIX8 B43_LP_RADIO(0x0C9) /* G RX MIX 8 */
434#define B2063_G_RX_PDET1 B43_LP_RADIO(0x0CA) /* G RX PDET 1 */
435#define B2063_G_RX_SPARES1 B43_LP_RADIO(0x0CB) /* G RX SPARES 1 */
436#define B2063_G_RX_SPARES2 B43_LP_RADIO(0x0CC) /* G RX SPARES 2 */
437#define B2063_G_RX_SPARES3 B43_LP_RADIO(0x0CD) /* G RX SPARES 3 */
438#define B2063_A_RX_1ST1 B43_LP_RADIO(0x0CE) /* A RX 1ST 1 */
439#define B2063_A_RX_1ST2 B43_LP_RADIO(0x0CF) /* A RX 1ST 2 */
440#define B2063_A_RX_1ST3 B43_LP_RADIO(0x0D0) /* A RX 1ST 3 */
441#define B2063_A_RX_1ST4 B43_LP_RADIO(0x0D1) /* A RX 1ST 4 */
442#define B2063_A_RX_1ST5 B43_LP_RADIO(0x0D2) /* A RX 1ST 5 */
443#define B2063_A_RX_2ND1 B43_LP_RADIO(0x0D3) /* A RX 2ND 1 */
444#define B2063_A_RX_2ND2 B43_LP_RADIO(0x0D4) /* A RX 2ND 2 */
445#define B2063_A_RX_2ND3 B43_LP_RADIO(0x0D5) /* A RX 2ND 3 */
446#define B2063_A_RX_2ND4 B43_LP_RADIO(0x0D6) /* A RX 2ND 4 */
447#define B2063_A_RX_2ND5 B43_LP_RADIO(0x0D7) /* A RX 2ND 5 */
448#define B2063_A_RX_2ND6 B43_LP_RADIO(0x0D8) /* A RX 2ND 6 */
449#define B2063_A_RX_2ND7 B43_LP_RADIO(0x0D9) /* A RX 2ND 7 */
450#define B2063_A_RX_PS1 B43_LP_RADIO(0x0DA) /* A RX PS 1 */
451#define B2063_A_RX_PS2 B43_LP_RADIO(0x0DB) /* A RX PS 2 */
452#define B2063_A_RX_PS3 B43_LP_RADIO(0x0DC) /* A RX PS 3 */
453#define B2063_A_RX_PS4 B43_LP_RADIO(0x0DD) /* A RX PS 4 */
454#define B2063_A_RX_PS5 B43_LP_RADIO(0x0DE) /* A RX PS 5 */
455#define B2063_A_RX_PS6 B43_LP_RADIO(0x0DF) /* A RX PS 6 */
456#define B2063_A_RX_MIX1 B43_LP_RADIO(0x0E0) /* A RX MIX 1 */
457#define B2063_A_RX_MIX2 B43_LP_RADIO(0x0E1) /* A RX MIX 2 */
458#define B2063_A_RX_MIX3 B43_LP_RADIO(0x0E2) /* A RX MIX 3 */
459#define B2063_A_RX_MIX4 B43_LP_RADIO(0x0E3) /* A RX MIX 4 */
460#define B2063_A_RX_MIX5 B43_LP_RADIO(0x0E4) /* A RX MIX 5 */
461#define B2063_A_RX_MIX6 B43_LP_RADIO(0x0E5) /* A RX MIX 6 */
462#define B2063_A_RX_MIX7 B43_LP_RADIO(0x0E6) /* A RX MIX 7 */
463#define B2063_A_RX_MIX8 B43_LP_RADIO(0x0E7) /* A RX MIX 8 */
464#define B2063_A_RX_PWRDET1 B43_LP_RADIO(0x0E8) /* A RX PWRDET 1 */
465#define B2063_A_RX_SPARE1 B43_LP_RADIO(0x0E9) /* A RX SPARE 1 */
466#define B2063_A_RX_SPARE2 B43_LP_RADIO(0x0EA) /* A RX SPARE 2 */
467#define B2063_A_RX_SPARE3 B43_LP_RADIO(0x0EB) /* A RX SPARE 3 */
468#define B2063_RX_TIA_CTL1 B43_LP_RADIO(0x0EC) /* RX TIA Control 1 */
469#define B2063_RX_TIA_CTL2 B43_LP_RADIO(0x0ED) /* RX TIA Control 2 */
470#define B2063_RX_TIA_CTL3 B43_LP_RADIO(0x0EE) /* RX TIA Control 3 */
471#define B2063_RX_TIA_CTL4 B43_LP_RADIO(0x0EF) /* RX TIA Control 4 */
472#define B2063_RX_TIA_CTL5 B43_LP_RADIO(0x0F0) /* RX TIA Control 5 */
473#define B2063_RX_TIA_CTL6 B43_LP_RADIO(0x0F1) /* RX TIA Control 6 */
474#define B2063_RX_BB_CTL1 B43_LP_RADIO(0x0F2) /* RX BB Control 1 */
475#define B2063_RX_BB_CTL2 B43_LP_RADIO(0x0F3) /* RX BB Control 2 */
476#define B2063_RX_BB_CTL3 B43_LP_RADIO(0x0F4) /* RX BB Control 3 */
477#define B2063_RX_BB_CTL4 B43_LP_RADIO(0x0F5) /* RX BB Control 4 */
478#define B2063_RX_BB_CTL5 B43_LP_RADIO(0x0F6) /* RX BB Control 5 */
479#define B2063_RX_BB_CTL6 B43_LP_RADIO(0x0F7) /* RX BB Control 6 */
480#define B2063_RX_BB_CTL7 B43_LP_RADIO(0x0F8) /* RX BB Control 7 */
481#define B2063_RX_BB_CTL8 B43_LP_RADIO(0x0F9) /* RX BB Control 8 */
482#define B2063_RX_BB_CTL9 B43_LP_RADIO(0x0FA) /* RX BB Control 9 */
483#define B2063_TX_RF_CTL1 B43_LP_RADIO(0x0FB) /* TX RF Control 1 */
484#define B2063_TX_RF_IDAC_LO_RF_I B43_LP_RADIO(0x0FC) /* TX RF IDAC LO RF I */
485#define B2063_TX_RF_IDAC_LO_RF_Q B43_LP_RADIO(0x0FD) /* TX RF IDAC LO RF Q */
486#define B2063_TX_RF_IDAC_LO_BB_I B43_LP_RADIO(0x0FE) /* TX RF IDAC LO BB I */
487#define B2063_TX_RF_IDAC_LO_BB_Q B43_LP_RADIO(0x0FF) /* TX RF IDAC LO BB Q */
488#define B2063_TX_RF_CTL2 B43_LP_RADIO(0x100) /* TX RF Control 2 */
489#define B2063_TX_RF_CTL3 B43_LP_RADIO(0x101) /* TX RF Control 3 */
490#define B2063_TX_RF_CTL4 B43_LP_RADIO(0x102) /* TX RF Control 4 */
491#define B2063_TX_RF_CTL5 B43_LP_RADIO(0x103) /* TX RF Control 5 */
492#define B2063_TX_RF_CTL6 B43_LP_RADIO(0x104) /* TX RF Control 6 */
493#define B2063_TX_RF_CTL7 B43_LP_RADIO(0x105) /* TX RF Control 7 */
494#define B2063_TX_RF_CTL8 B43_LP_RADIO(0x106) /* TX RF Control 8 */
495#define B2063_TX_RF_CTL9 B43_LP_RADIO(0x107) /* TX RF Control 9 */
496#define B2063_TX_RF_CTL10 B43_LP_RADIO(0x108) /* TX RF Control 10 */
497#define B2063_TX_RF_CTL14 B43_LP_RADIO(0x109) /* TX RF Control 14 */
498#define B2063_TX_RF_CTL15 B43_LP_RADIO(0x10A) /* TX RF Control 15 */
499#define B2063_PA_CTL1 B43_LP_RADIO(0x10B) /* PA Control 1 */
500#define B2063_PA_CTL2 B43_LP_RADIO(0x10C) /* PA Control 2 */
501#define B2063_PA_CTL3 B43_LP_RADIO(0x10D) /* PA Control 3 */
502#define B2063_PA_CTL4 B43_LP_RADIO(0x10E) /* PA Control 4 */
503#define B2063_PA_CTL5 B43_LP_RADIO(0x10F) /* PA Control 5 */
504#define B2063_PA_CTL6 B43_LP_RADIO(0x110) /* PA Control 6 */
505#define B2063_PA_CTL7 B43_LP_RADIO(0x111) /* PA Control 7 */
506#define B2063_PA_CTL8 B43_LP_RADIO(0x112) /* PA Control 8 */
507#define B2063_PA_CTL9 B43_LP_RADIO(0x113) /* PA Control 9 */
508#define B2063_PA_CTL10 B43_LP_RADIO(0x114) /* PA Control 10 */
509#define B2063_PA_CTL11 B43_LP_RADIO(0x115) /* PA Control 11 */
510#define B2063_PA_CTL12 B43_LP_RADIO(0x116) /* PA Control 12 */
511#define B2063_PA_CTL13 B43_LP_RADIO(0x117) /* PA Control 13 */
512#define B2063_TX_BB_CTL1 B43_LP_RADIO(0x118) /* TX BB Control 1 */
513#define B2063_TX_BB_CTL2 B43_LP_RADIO(0x119) /* TX BB Control 2 */
514#define B2063_TX_BB_CTL3 B43_LP_RADIO(0x11A) /* TX BB Control 3 */
515#define B2063_TX_BB_CTL4 B43_LP_RADIO(0x11B) /* TX BB Control 4 */
516#define B2063_GPIO_CTL1 B43_LP_RADIO(0x11C) /* GPIO Control 1 */
517#define B2063_VREG_CTL1 B43_LP_RADIO(0x11D) /* VREG Control 1 */
518#define B2063_AMUX_CTL1 B43_LP_RADIO(0x11E) /* AMUX Control 1 */
519#define B2063_IQ_CALIB_GVAR B43_LP_RADIO(0x11F) /* IQ Calibration GVAR */
520#define B2063_IQ_CALIB_CTL1 B43_LP_RADIO(0x120) /* IQ Calibration Control 1 */
521#define B2063_IQ_CALIB_CTL2 B43_LP_RADIO(0x121) /* IQ Calibration Control 2 */
522#define B2063_TEMPSENSE_CTL1 B43_LP_RADIO(0x122) /* TEMPSENSE Control 1 */
523#define B2063_TEMPSENSE_CTL2 B43_LP_RADIO(0x123) /* TEMPSENSE Control 2 */
524#define B2063_TX_RX_LOOPBACK1 B43_LP_RADIO(0x124) /* TX/RX LOOPBACK 1 */
525#define B2063_TX_RX_LOOPBACK2 B43_LP_RADIO(0x125) /* TX/RX LOOPBACK 2 */
526#define B2063_EXT_TSSI_CTL1 B43_LP_RADIO(0x126) /* EXT TSSI Control 1 */
527#define B2063_EXT_TSSI_CTL2 B43_LP_RADIO(0x127) /* EXT TSSI Control 2 */
528#define B2063_AFE_CTL B43_LP_RADIO(0x128) /* AFE Control */
529
530
531
532struct b43_phy_lp {
533 //TODO
534};
535
536
537struct b43_phy_operations;
538extern const struct b43_phy_operations b43_phyops_lp;
539
540#endif /* LINUX_B43_PHY_LP_H_ */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/phy_n.c
index 644eed993bea..8bcfda5f3f07 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -26,7 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27 27
28#include "b43.h" 28#include "b43.h"
29#include "nphy.h" 29#include "phy_n.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31 31
32 32
@@ -34,10 +34,16 @@ void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
34{//TODO 34{//TODO
35} 35}
36 36
37void b43_nphy_xmitpower(struct b43_wldev *dev) 37static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
38{//TODO 38{//TODO
39} 39}
40 40
41static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
42 bool ignore_tssi)
43{//TODO
44 return B43_TXPWR_RES_DONE;
45}
46
41static void b43_chantab_radio_upload(struct b43_wldev *dev, 47static void b43_chantab_radio_upload(struct b43_wldev *dev,
42 const struct b43_nphy_channeltab_entry *e) 48 const struct b43_nphy_channeltab_entry *e)
43{ 49{
@@ -81,9 +87,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
81 //TODO 87 //TODO
82} 88}
83 89
84/* Tune the hardware to a new channel. Don't call this directly. 90/* Tune the hardware to a new channel. */
85 * Use b43_radio_selectchannel() */ 91static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
86int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
87{ 92{
88 const struct b43_nphy_channeltab_entry *tabent; 93 const struct b43_nphy_channeltab_entry *tabent;
89 94
@@ -162,7 +167,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
162 msleep(1); 167 msleep(1);
163 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 168 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
164 msleep(1); 169 msleep(1);
165 b43_radio_selectchannel(dev, dev->phy.channel, 0); 170 nphy_channel_switch(dev, dev->phy.channel);
166 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9); 171 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
167 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9); 172 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
168 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83); 173 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
@@ -484,3 +489,140 @@ int b43_phy_initn(struct b43_wldev *dev)
484 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); 489 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
485 return 0; 490 return 0;
486} 491}
492
493static int b43_nphy_op_allocate(struct b43_wldev *dev)
494{
495 struct b43_phy_n *nphy;
496
497 nphy = kzalloc(sizeof(*nphy), GFP_KERNEL);
498 if (!nphy)
499 return -ENOMEM;
500 dev->phy.n = nphy;
501
502 return 0;
503}
504
505static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
506{
507 struct b43_phy *phy = &dev->phy;
508 struct b43_phy_n *nphy = phy->n;
509
510 memset(nphy, 0, sizeof(*nphy));
511
512 //TODO init struct b43_phy_n
513}
514
515static void b43_nphy_op_free(struct b43_wldev *dev)
516{
517 struct b43_phy *phy = &dev->phy;
518 struct b43_phy_n *nphy = phy->n;
519
520 kfree(nphy);
521 phy->n = NULL;
522}
523
524static int b43_nphy_op_init(struct b43_wldev *dev)
525{
526 return b43_phy_initn(dev);
527}
528
529static inline void check_phyreg(struct b43_wldev *dev, u16 offset)
530{
531#if B43_DEBUG
532 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
533 /* OFDM registers are onnly available on A/G-PHYs */
534 b43err(dev->wl, "Invalid OFDM PHY access at "
535 "0x%04X on N-PHY\n", offset);
536 dump_stack();
537 }
538 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
539 /* Ext-G registers are only available on G-PHYs */
540 b43err(dev->wl, "Invalid EXT-G PHY access at "
541 "0x%04X on N-PHY\n", offset);
542 dump_stack();
543 }
544#endif /* B43_DEBUG */
545}
546
547static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg)
548{
549 check_phyreg(dev, reg);
550 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
551 return b43_read16(dev, B43_MMIO_PHY_DATA);
552}
553
554static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
555{
556 check_phyreg(dev, reg);
557 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
558 b43_write16(dev, B43_MMIO_PHY_DATA, value);
559}
560
561static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
562{
563 /* Register 1 is a 32-bit register. */
564 B43_WARN_ON(reg == 1);
565 /* N-PHY needs 0x100 for read access */
566 reg |= 0x100;
567
568 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
569 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
570}
571
572static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
573{
574 /* Register 1 is a 32-bit register. */
575 B43_WARN_ON(reg == 1);
576
577 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
578 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
579}
580
581static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
582 enum rfkill_state state)
583{//TODO
584}
585
586static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
587{
588 b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
589 on ? 0 : 0x7FFF);
590}
591
592static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
593 unsigned int new_channel)
594{
595 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
596 if ((new_channel < 1) || (new_channel > 14))
597 return -EINVAL;
598 } else {
599 if (new_channel > 200)
600 return -EINVAL;
601 }
602
603 return nphy_channel_switch(dev, new_channel);
604}
605
606static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
607{
608 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
609 return 1;
610 return 36;
611}
612
613const struct b43_phy_operations b43_phyops_n = {
614 .allocate = b43_nphy_op_allocate,
615 .free = b43_nphy_op_free,
616 .prepare_structs = b43_nphy_op_prepare_structs,
617 .init = b43_nphy_op_init,
618 .phy_read = b43_nphy_op_read,
619 .phy_write = b43_nphy_op_write,
620 .radio_read = b43_nphy_op_radio_read,
621 .radio_write = b43_nphy_op_radio_write,
622 .software_rfkill = b43_nphy_op_software_rfkill,
623 .switch_analog = b43_nphy_op_switch_analog,
624 .switch_channel = b43_nphy_op_switch_channel,
625 .get_default_chan = b43_nphy_op_get_default_chan,
626 .recalc_txpower = b43_nphy_op_recalc_txpower,
627 .adjust_txpower = b43_nphy_op_adjust_txpower,
628};
diff --git a/drivers/net/wireless/b43/nphy.h b/drivers/net/wireless/b43/phy_n.h
index faf46b9cbf1b..1749aef4147d 100644
--- a/drivers/net/wireless/b43/nphy.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -1,7 +1,7 @@
1#ifndef B43_NPHY_H_ 1#ifndef B43_NPHY_H_
2#define B43_NPHY_H_ 2#define B43_NPHY_H_
3 3
4#include "phy.h" 4#include "phy_common.h"
5 5
6 6
7/* N-PHY registers. */ 7/* N-PHY registers. */
@@ -919,54 +919,12 @@
919 919
920struct b43_wldev; 920struct b43_wldev;
921 921
922struct b43_phy_n {
923 //TODO lots of missing stuff
924};
922 925
923#ifdef CONFIG_B43_NPHY
924/* N-PHY support enabled */
925 926
926int b43_phy_initn(struct b43_wldev *dev); 927struct b43_phy_operations;
928extern const struct b43_phy_operations b43_phyops_n;
927 929
928void b43_nphy_radio_turn_on(struct b43_wldev *dev);
929void b43_nphy_radio_turn_off(struct b43_wldev *dev);
930
931int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel);
932
933void b43_nphy_xmitpower(struct b43_wldev *dev);
934void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna);
935
936
937#else /* CONFIG_B43_NPHY */
938/* N-PHY support disabled */
939
940
941static inline
942int b43_phy_initn(struct b43_wldev *dev)
943{
944 return -EOPNOTSUPP;
945}
946
947static inline
948void b43_nphy_radio_turn_on(struct b43_wldev *dev)
949{
950}
951static inline
952void b43_nphy_radio_turn_off(struct b43_wldev *dev)
953{
954}
955
956static inline
957int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
958{
959 return -ENOSYS;
960}
961
962static inline
963void b43_nphy_xmitpower(struct b43_wldev *dev)
964{
965}
966static inline
967void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
968{
969}
970
971#endif /* CONFIG_B43_NPHY */
972#endif /* B43_NPHY_H_ */ 930#endif /* B43_NPHY_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index fec5645944a4..7b9e99adb8c3 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -24,6 +24,7 @@
24 24
25#include "rfkill.h" 25#include "rfkill.h"
26#include "b43.h" 26#include "b43.h"
27#include "phy_common.h"
27 28
28#include <linux/kmod.h> 29#include <linux/kmod.h>
29 30
@@ -114,11 +115,11 @@ static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state)
114 goto out_unlock; 115 goto out_unlock;
115 } 116 }
116 if (!dev->phy.radio_on) 117 if (!dev->phy.radio_on)
117 b43_radio_turn_on(dev); 118 b43_software_rfkill(dev, state);
118 break; 119 break;
119 case RFKILL_STATE_SOFT_BLOCKED: 120 case RFKILL_STATE_SOFT_BLOCKED:
120 if (dev->phy.radio_on) 121 if (dev->phy.radio_on)
121 b43_radio_turn_off(dev, 0); 122 b43_software_rfkill(dev, state);
122 break; 123 break;
123 default: 124 default:
124 b43warn(wl, "Received unexpected rfkill state %d.\n", state); 125 b43warn(wl, "Received unexpected rfkill state %d.\n", state);
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 275095b8cbe7..5adaa3692d75 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -29,7 +29,7 @@
29#include "b43.h" 29#include "b43.h"
30#include "sysfs.h" 30#include "sysfs.h"
31#include "main.h" 31#include "main.h"
32#include "phy.h" 32#include "phy_common.h"
33 33
34#define GENERIC_FILESIZE 64 34#define GENERIC_FILESIZE 64
35 35
@@ -59,7 +59,12 @@ static ssize_t b43_attr_interfmode_show(struct device *dev,
59 59
60 mutex_lock(&wldev->wl->mutex); 60 mutex_lock(&wldev->wl->mutex);
61 61
62 switch (wldev->phy.interfmode) { 62 if (wldev->phy.type != B43_PHYTYPE_G) {
63 mutex_unlock(&wldev->wl->mutex);
64 return -ENOSYS;
65 }
66
67 switch (wldev->phy.g->interfmode) {
63 case B43_INTERFMODE_NONE: 68 case B43_INTERFMODE_NONE:
64 count = 69 count =
65 snprintf(buf, PAGE_SIZE, 70 snprintf(buf, PAGE_SIZE,
@@ -117,11 +122,15 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
117 mutex_lock(&wldev->wl->mutex); 122 mutex_lock(&wldev->wl->mutex);
118 spin_lock_irqsave(&wldev->wl->irq_lock, flags); 123 spin_lock_irqsave(&wldev->wl->irq_lock, flags);
119 124
120 err = b43_radio_set_interference_mitigation(wldev, mode); 125 if (wldev->phy.ops->interf_mitigation) {
121 if (err) { 126 err = wldev->phy.ops->interf_mitigation(wldev, mode);
122 b43err(wldev->wl, "Interference Mitigation not " 127 if (err) {
123 "supported by device\n"); 128 b43err(wldev->wl, "Interference Mitigation not "
124 } 129 "supported by device\n");
130 }
131 } else
132 err = -ENOSYS;
133
125 mmiowb(); 134 mmiowb();
126 spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); 135 spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
127 mutex_unlock(&wldev->wl->mutex); 136 mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c
index 3f5ea06bf13c..1ef9a6463ec6 100644
--- a/drivers/net/wireless/b43/tables.c
+++ b/drivers/net/wireless/b43/tables.c
@@ -27,7 +27,8 @@
27 27
28#include "b43.h" 28#include "b43.h"
29#include "tables.h" 29#include "tables.h"
30#include "phy.h" 30#include "phy_g.h"
31
31 32
32const u32 b43_tab_rotor[] = { 33const u32 b43_tab_rotor[] = {
33 0xFEB93FFD, 0xFEC63FFD, /* 0 */ 34 0xFEB93FFD, 0xFEC63FFD, /* 0 */
@@ -377,17 +378,17 @@ static inline void assert_sizes(void)
377 378
378u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) 379u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
379{ 380{
380 struct b43_phy *phy = &dev->phy; 381 struct b43_phy_g *gphy = dev->phy.g;
381 u16 addr; 382 u16 addr;
382 383
383 addr = table + offset; 384 addr = table + offset;
384 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || 385 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
385 (addr - 1 != phy->ofdmtab_addr)) { 386 (addr - 1 != gphy->ofdmtab_addr)) {
386 /* The hardware has a different address in memory. Update it. */ 387 /* The hardware has a different address in memory. Update it. */
387 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 388 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
388 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; 389 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
389 } 390 }
390 phy->ofdmtab_addr = addr; 391 gphy->ofdmtab_addr = addr;
391 392
392 return b43_phy_read(dev, B43_PHY_OTABLEI); 393 return b43_phy_read(dev, B43_PHY_OTABLEI);
393 394
@@ -398,34 +399,34 @@ u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
398void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, 399void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
399 u16 offset, u16 value) 400 u16 offset, u16 value)
400{ 401{
401 struct b43_phy *phy = &dev->phy; 402 struct b43_phy_g *gphy = dev->phy.g;
402 u16 addr; 403 u16 addr;
403 404
404 addr = table + offset; 405 addr = table + offset;
405 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || 406 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
406 (addr - 1 != phy->ofdmtab_addr)) { 407 (addr - 1 != gphy->ofdmtab_addr)) {
407 /* The hardware has a different address in memory. Update it. */ 408 /* The hardware has a different address in memory. Update it. */
408 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 409 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
409 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; 410 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
410 } 411 }
411 phy->ofdmtab_addr = addr; 412 gphy->ofdmtab_addr = addr;
412 b43_phy_write(dev, B43_PHY_OTABLEI, value); 413 b43_phy_write(dev, B43_PHY_OTABLEI, value);
413} 414}
414 415
415u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) 416u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
416{ 417{
417 struct b43_phy *phy = &dev->phy; 418 struct b43_phy_g *gphy = dev->phy.g;
418 u32 ret; 419 u32 ret;
419 u16 addr; 420 u16 addr;
420 421
421 addr = table + offset; 422 addr = table + offset;
422 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || 423 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
423 (addr - 1 != phy->ofdmtab_addr)) { 424 (addr - 1 != gphy->ofdmtab_addr)) {
424 /* The hardware has a different address in memory. Update it. */ 425 /* The hardware has a different address in memory. Update it. */
425 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 426 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
426 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; 427 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
427 } 428 }
428 phy->ofdmtab_addr = addr; 429 gphy->ofdmtab_addr = addr;
429 ret = b43_phy_read(dev, B43_PHY_OTABLEQ); 430 ret = b43_phy_read(dev, B43_PHY_OTABLEQ);
430 ret <<= 16; 431 ret <<= 16;
431 ret |= b43_phy_read(dev, B43_PHY_OTABLEI); 432 ret |= b43_phy_read(dev, B43_PHY_OTABLEI);
@@ -436,17 +437,17 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
436void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, 437void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
437 u16 offset, u32 value) 438 u16 offset, u32 value)
438{ 439{
439 struct b43_phy *phy = &dev->phy; 440 struct b43_phy_g *gphy = dev->phy.g;
440 u16 addr; 441 u16 addr;
441 442
442 addr = table + offset; 443 addr = table + offset;
443 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || 444 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
444 (addr - 1 != phy->ofdmtab_addr)) { 445 (addr - 1 != gphy->ofdmtab_addr)) {
445 /* The hardware has a different address in memory. Update it. */ 446 /* The hardware has a different address in memory. Update it. */
446 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 447 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
447 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; 448 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
448 } 449 }
449 phy->ofdmtab_addr = addr; 450 gphy->ofdmtab_addr = addr;
450 451
451 b43_phy_write(dev, B43_PHY_OTABLEI, value); 452 b43_phy_write(dev, B43_PHY_OTABLEI, value);
452 b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); 453 b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16));
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 2aa57551786a..4e2336315545 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -24,8 +24,8 @@
24 24
25#include "b43.h" 25#include "b43.h"
26#include "tables_nphy.h" 26#include "tables_nphy.h"
27#include "phy.h" 27#include "phy_common.h"
28#include "nphy.h" 28#include "phy_n.h"
29 29
30 30
31struct b2055_inittab_entry { 31struct b2055_inittab_entry {
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index daa94211f838..0c0fb15abb9f 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -27,7 +27,7 @@
27#include "b43.h" 27#include "b43.h"
28#include "main.h" 28#include "main.h"
29#include "tables.h" 29#include "tables.h"
30#include "phy.h" 30#include "phy_common.h"
31#include "wa.h" 31#include "wa.h"
32 32
33static void b43_wa_papd(struct b43_wldev *dev) 33static void b43_wa_papd(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 9dda8169f7cc..5e0b71c3ad02 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30#include "xmit.h" 30#include "xmit.h"
31#include "phy.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
34 34
@@ -431,6 +431,7 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
431 int adjust_2053, int adjust_2050) 431 int adjust_2053, int adjust_2050)
432{ 432{
433 struct b43_phy *phy = &dev->phy; 433 struct b43_phy *phy = &dev->phy;
434 struct b43_phy_g *gphy = phy->g;
434 s32 tmp; 435 s32 tmp;
435 436
436 switch (phy->radio_ver) { 437 switch (phy->radio_ver) {
@@ -450,7 +451,8 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
450 boardflags_lo & B43_BFL_RSSI) { 451 boardflags_lo & B43_BFL_RSSI) {
451 if (in_rssi > 63) 452 if (in_rssi > 63)
452 in_rssi = 63; 453 in_rssi = 63;
453 tmp = phy->nrssi_lt[in_rssi]; 454 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
455 tmp = gphy->nrssi_lt[in_rssi];
454 tmp = 31 - tmp; 456 tmp = 31 - tmp;
455 tmp *= -131; 457 tmp *= -131;
456 tmp /= 128; 458 tmp /= 128;
@@ -678,6 +680,8 @@ void b43_handle_txstatus(struct b43_wldev *dev,
678 b43_pio_handle_txstatus(dev, status); 680 b43_pio_handle_txstatus(dev, status);
679 else 681 else
680 b43_dma_handle_txstatus(dev, status); 682 b43_dma_handle_txstatus(dev, status);
683
684 b43_phy_txpower_check(dev, 0);
681} 685}
682 686
683/* Fill out the mac80211 TXstatus report based on the b43-specific 687/* Fill out the mac80211 TXstatus report based on the b43-specific
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1cb77db5c292..9fb1421cbec2 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -888,13 +888,13 @@ generate_new:
888 888
889static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) 889static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
890{ 890{
891 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { 891 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
892 /* TODO: PS TBTT */ 892 /* TODO: PS TBTT */
893 } else { 893 } else {
894 if (1/*FIXME: the last PSpoll frame was sent successfully */) 894 if (1/*FIXME: the last PSpoll frame was sent successfully */)
895 b43legacy_power_saving_ctl_bits(dev, -1, -1); 895 b43legacy_power_saving_ctl_bits(dev, -1, -1);
896 } 896 }
897 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 897 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
898 dev->dfq_valid = 1; 898 dev->dfq_valid = 1;
899} 899}
900 900
@@ -1201,7 +1201,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
1201 struct b43legacy_wl *wl = dev->wl; 1201 struct b43legacy_wl *wl = dev->wl;
1202 u32 cmd; 1202 u32 cmd;
1203 1203
1204 if (!b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1204 if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
1205 return; 1205 return;
1206 1206
1207 /* This is the bottom half of the asynchronous beacon update. */ 1207 /* This is the bottom half of the asynchronous beacon update. */
@@ -1936,9 +1936,9 @@ static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
1936 ctl &= ~B43legacy_MACCTL_BEACPROMISC; 1936 ctl &= ~B43legacy_MACCTL_BEACPROMISC;
1937 ctl |= B43legacy_MACCTL_INFRA; 1937 ctl |= B43legacy_MACCTL_INFRA;
1938 1938
1939 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1939 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
1940 ctl |= B43legacy_MACCTL_AP; 1940 ctl |= B43legacy_MACCTL_AP;
1941 else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 1941 else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))
1942 ctl &= ~B43legacy_MACCTL_INFRA; 1942 ctl &= ~B43legacy_MACCTL_INFRA;
1943 1943
1944 if (wl->filter_flags & FIF_CONTROL) 1944 if (wl->filter_flags & FIF_CONTROL)
@@ -2646,7 +2646,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2646 b43legacy_mgmtframe_txantenna(dev, antenna_tx); 2646 b43legacy_mgmtframe_txantenna(dev, antenna_tx);
2647 2647
2648 /* Update templates for AP mode. */ 2648 /* Update templates for AP mode. */
2649 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 2649 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
2650 b43legacy_set_beacon_int(dev, conf->beacon_int); 2650 b43legacy_set_beacon_int(dev, conf->beacon_int);
2651 2651
2652 2652
@@ -2733,12 +2733,12 @@ static int b43legacy_op_config_interface(struct ieee80211_hw *hw,
2733 else 2733 else
2734 memset(wl->bssid, 0, ETH_ALEN); 2734 memset(wl->bssid, 0, ETH_ALEN);
2735 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { 2735 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
2736 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) { 2736 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) {
2737 B43legacy_WARN_ON(vif->type != IEEE80211_IF_TYPE_AP); 2737 B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP);
2738 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len); 2738 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len);
2739 if (conf->changed & IEEE80211_IFCC_BEACON) 2739 if (conf->changed & IEEE80211_IFCC_BEACON)
2740 b43legacy_update_templates(wl); 2740 b43legacy_update_templates(wl);
2741 } else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) { 2741 } else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
2742 if (conf->changed & IEEE80211_IFCC_BEACON) 2742 if (conf->changed & IEEE80211_IFCC_BEACON)
2743 b43legacy_update_templates(wl); 2743 b43legacy_update_templates(wl);
2744 } 2744 }
@@ -3020,7 +3020,7 @@ static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
3020 bool idle) { 3020 bool idle) {
3021 u16 pu_delay = 1050; 3021 u16 pu_delay = 1050;
3022 3022
3023 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle) 3023 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
3024 pu_delay = 500; 3024 pu_delay = 500;
3025 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) 3025 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3026 pu_delay = max(pu_delay, (u16)2400); 3026 pu_delay = max(pu_delay, (u16)2400);
@@ -3035,7 +3035,7 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
3035 u16 pretbtt; 3035 u16 pretbtt;
3036 3036
3037 /* The time value is in microseconds. */ 3037 /* The time value is in microseconds. */
3038 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 3038 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
3039 pretbtt = 2; 3039 pretbtt = 2;
3040 else 3040 else
3041 pretbtt = 250; 3041 pretbtt = 250;
@@ -3259,10 +3259,10 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3259 3259
3260 /* TODO: allow WDS/AP devices to coexist */ 3260 /* TODO: allow WDS/AP devices to coexist */
3261 3261
3262 if (conf->type != IEEE80211_IF_TYPE_AP && 3262 if (conf->type != NL80211_IFTYPE_AP &&
3263 conf->type != IEEE80211_IF_TYPE_STA && 3263 conf->type != NL80211_IFTYPE_STATION &&
3264 conf->type != IEEE80211_IF_TYPE_WDS && 3264 conf->type != NL80211_IFTYPE_WDS &&
3265 conf->type != IEEE80211_IF_TYPE_IBSS) 3265 conf->type != NL80211_IFTYPE_ADHOC)
3266 return -EOPNOTSUPP; 3266 return -EOPNOTSUPP;
3267 3267
3268 mutex_lock(&wl->mutex); 3268 mutex_lock(&wl->mutex);
@@ -3403,7 +3403,7 @@ out_unlock:
3403} 3403}
3404 3404
3405static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw, 3405static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3406 int aid, int set) 3406 struct ieee80211_sta *sta, bool set)
3407{ 3407{
3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3409 unsigned long flags; 3409 unsigned long flags;
@@ -3704,6 +3704,11 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3704 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 3704 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3705 IEEE80211_HW_SIGNAL_DBM | 3705 IEEE80211_HW_SIGNAL_DBM |
3706 IEEE80211_HW_NOISE_DBM; 3706 IEEE80211_HW_NOISE_DBM;
3707 hw->wiphy->interface_modes =
3708 BIT(NL80211_IFTYPE_AP) |
3709 BIT(NL80211_IFTYPE_STATION) |
3710 BIT(NL80211_IFTYPE_WDS) |
3711 BIT(NL80211_IFTYPE_ADHOC);
3707 hw->queues = 1; /* FIXME: hardware has more queues */ 3712 hw->queues = 1; /* FIXME: hardware has more queues */
3708 SET_IEEE80211_DEV(hw, dev->dev); 3713 SET_IEEE80211_DEV(hw, dev->dev);
3709 if (is_valid_ether_addr(sprom->et1mac)) 3714 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 768cccb9b1ba..4c9442b16f3f 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -103,7 +103,7 @@ void b43legacy_phy_lock(struct b43legacy_wldev *dev)
103 if (dev->dev->id.revision < 3) { 103 if (dev->dev->id.revision < 3) {
104 b43legacy_mac_suspend(dev); 104 b43legacy_mac_suspend(dev);
105 } else { 105 } else {
106 if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 106 if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
107 b43legacy_power_saving_ctl_bits(dev, -1, 1); 107 b43legacy_power_saving_ctl_bits(dev, -1, 1);
108 } 108 }
109} 109}
@@ -118,7 +118,7 @@ void b43legacy_phy_unlock(struct b43legacy_wldev *dev)
118 if (dev->dev->id.revision < 3) { 118 if (dev->dev->id.revision < 3) {
119 b43legacy_mac_enable(dev); 119 b43legacy_mac_enable(dev);
120 } else { 120 } else {
121 if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 121 if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
122 b43legacy_power_saving_ctl_bits(dev, -1, -1); 122 b43legacy_power_saving_ctl_bits(dev, -1, -1);
123 } 123 }
124} 124}
@@ -595,12 +595,14 @@ static void b43legacy_phy_initb5(struct b43legacy_wldev *dev)
595 0x0035) & 0xFFC0) | 0x0064); 595 0x0035) & 0xFFC0) | 0x0064);
596 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, 596 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
597 0x005D) & 0xFF80) | 0x000A); 597 0x005D) & 0xFF80) | 0x000A);
598 b43legacy_phy_write(dev, 0x5B, 0x0000);
599 b43legacy_phy_write(dev, 0x5C, 0x0000);
598 } 600 }
599 601
600 if (dev->bad_frames_preempt) 602 if (dev->bad_frames_preempt)
601 b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, 603 b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD,
602 b43legacy_phy_read(dev, 604 b43legacy_phy_read(dev,
603 B43legacy_PHY_RADIO_BITFIELD) | (1 << 11)); 605 B43legacy_PHY_RADIO_BITFIELD) | (1 << 12));
604 606
605 if (phy->analog == 1) { 607 if (phy->analog == 1) {
606 b43legacy_phy_write(dev, 0x0026, 0xCE00); 608 b43legacy_phy_write(dev, 0x0026, 0xCE00);
@@ -753,7 +755,7 @@ static void b43legacy_phy_initb6(struct b43legacy_wldev *dev)
753 b43legacy_radio_write16(dev, 0x0050, 0x0020); 755 b43legacy_radio_write16(dev, 0x0050, 0x0020);
754 } 756 }
755 if (phy->radio_rev <= 2) { 757 if (phy->radio_rev <= 2) {
756 b43legacy_radio_write16(dev, 0x007C, 0x0020); 758 b43legacy_radio_write16(dev, 0x0050, 0x0020);
757 b43legacy_radio_write16(dev, 0x005A, 0x0070); 759 b43legacy_radio_write16(dev, 0x005A, 0x0070);
758 b43legacy_radio_write16(dev, 0x005B, 0x007B); 760 b43legacy_radio_write16(dev, 0x005B, 0x007B);
759 b43legacy_radio_write16(dev, 0x005C, 0x00B0); 761 b43legacy_radio_write16(dev, 0x005C, 0x00B0);
@@ -771,7 +773,7 @@ static void b43legacy_phy_initb6(struct b43legacy_wldev *dev)
771 b43legacy_phy_write(dev, 0x002A, 0x8AC0); 773 b43legacy_phy_write(dev, 0x002A, 0x8AC0);
772 b43legacy_phy_write(dev, 0x0038, 0x0668); 774 b43legacy_phy_write(dev, 0x0038, 0x0668);
773 b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); 775 b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF);
774 if (phy->radio_rev <= 5) 776 if (phy->radio_rev == 4 || phy->radio_rev == 5)
775 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, 777 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
776 0x005D) & 0xFF80) | 0x0003); 778 0x005D) & 0xFF80) | 0x0003);
777 if (phy->radio_rev <= 2) 779 if (phy->radio_rev <= 2)
@@ -1010,7 +1012,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1010 b43legacy_phy_initb5(dev); 1012 b43legacy_phy_initb5(dev);
1011 else 1013 else
1012 b43legacy_phy_initb6(dev); 1014 b43legacy_phy_initb6(dev);
1013 if (phy->rev >= 2 || phy->gmode) 1015 if (phy->rev >= 2 && phy->gmode)
1014 b43legacy_phy_inita(dev); 1016 b43legacy_phy_inita(dev);
1015 1017
1016 if (phy->rev >= 2) { 1018 if (phy->rev >= 2) {
@@ -1025,18 +1027,22 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1025 b43legacy_phy_write(dev, 0x0811, 0x0400); 1027 b43legacy_phy_write(dev, 0x0811, 0x0400);
1026 b43legacy_phy_write(dev, 0x0015, 0x00C0); 1028 b43legacy_phy_write(dev, 0x0015, 0x00C0);
1027 } 1029 }
1028 if (phy->rev >= 2 || phy->gmode) { 1030 if (phy->gmode) {
1029 tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF; 1031 tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF;
1030 if (tmp == 3 || tmp == 5) { 1032 if (tmp == 3) {
1033 b43legacy_phy_write(dev, 0x04C2, 0x1816);
1034 b43legacy_phy_write(dev, 0x04C3, 0x8606);
1035 }
1036 if (tmp == 4 || tmp == 5) {
1031 b43legacy_phy_write(dev, 0x04C2, 0x1816); 1037 b43legacy_phy_write(dev, 0x04C2, 0x1816);
1032 b43legacy_phy_write(dev, 0x04C3, 0x8006); 1038 b43legacy_phy_write(dev, 0x04C3, 0x8006);
1033 if (tmp == 5) 1039 b43legacy_phy_write(dev, 0x04CC,
1034 b43legacy_phy_write(dev, 0x04CC, 1040 (b43legacy_phy_read(dev,
1035 (b43legacy_phy_read(dev, 1041 0x04CC) & 0x00FF) |
1036 0x04CC) & 0x00FF) | 1042 0x1F00);
1037 0x1F00);
1038 } 1043 }
1039 b43legacy_phy_write(dev, 0x047E, 0x0078); 1044 if (phy->rev >= 2)
1045 b43legacy_phy_write(dev, 0x047E, 0x0078);
1040 } 1046 }
1041 if (phy->radio_rev == 8) { 1047 if (phy->radio_rev == 8) {
1042 b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) 1048 b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801)
@@ -1078,7 +1084,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1078 else 1084 else
1079 b43legacy_phy_write(dev, 0x002F, 0x0202); 1085 b43legacy_phy_write(dev, 0x002F, 0x0202);
1080 } 1086 }
1081 if (phy->gmode || phy->rev >= 2) { 1087 if (phy->gmode) {
1082 b43legacy_phy_lo_adjust(dev, 0); 1088 b43legacy_phy_lo_adjust(dev, 0);
1083 b43legacy_phy_write(dev, 0x080F, 0x8078); 1089 b43legacy_phy_write(dev, 0x080F, 0x8078);
1084 } 1090 }
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 68e1f8c78727..6835064758fb 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -193,7 +193,6 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = !!info->control.hw_key; 195 int use_encryption = !!info->control.hw_key;
196 u16 fctl;
197 u8 rate; 196 u8 rate;
198 struct ieee80211_rate *rate_fb; 197 struct ieee80211_rate *rate_fb;
199 int rate_ofdm; 198 int rate_ofdm;
@@ -204,7 +203,6 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
204 struct ieee80211_rate *tx_rate; 203 struct ieee80211_rate *tx_rate;
205 204
206 wlhdr = (const struct ieee80211_hdr *)fragment_data; 205 wlhdr = (const struct ieee80211_hdr *)fragment_data;
207 fctl = le16_to_cpu(wlhdr->frame_control);
208 206
209 memset(txhdr, 0, sizeof(*txhdr)); 207 memset(txhdr, 0, sizeof(*txhdr));
210 208
@@ -253,7 +251,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
253 mac_ctl |= (key->algorithm << 251 mac_ctl |= (key->algorithm <<
254 B43legacy_TX4_MAC_KEYALG_SHIFT) & 252 B43legacy_TX4_MAC_KEYALG_SHIFT) &
255 B43legacy_TX4_MAC_KEYALG; 253 B43legacy_TX4_MAC_KEYALG;
256 wlhdr_len = ieee80211_get_hdrlen(fctl); 254 wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
257 iv_len = min((size_t)info->control.iv_len, 255 iv_len = min((size_t)info->control.iv_len,
258 ARRAY_SIZE(txhdr->iv)); 256 ARRAY_SIZE(txhdr->iv));
259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); 257 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
@@ -626,7 +624,7 @@ void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev,
626 tmp = hw->count; 624 tmp = hw->count;
627 status.frame_count = (tmp >> 4); 625 status.frame_count = (tmp >> 4);
628 status.rts_count = (tmp & 0x0F); 626 status.rts_count = (tmp & 0x0F);
629 tmp = hw->flags; 627 tmp = hw->flags << 1;
630 status.supp_reason = ((tmp & 0x1C) >> 2); 628 status.supp_reason = ((tmp & 0x1C) >> 2);
631 status.pm_indicated = !!(tmp & 0x80); 629 status.pm_indicated = !!(tmp & 0x80);
632 status.intermediate = !!(tmp & 0x40); 630 status.intermediate = !!(tmp & 0x40);
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 29d39105f5b8..bfa375369df3 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -87,7 +87,8 @@ MODULE_LICENSE("Dual MPL/GPL");
87 87
88 Callable from any context. 88 Callable from any context.
89*/ 89*/
90static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0) 90static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
91 u16 param1, u16 param2)
91{ 92{
92 int k = CMD_BUSY_TIMEOUT; 93 int k = CMD_BUSY_TIMEOUT;
93 u16 reg; 94 u16 reg;
@@ -103,8 +104,8 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
103 return -EBUSY; 104 return -EBUSY;
104 } 105 }
105 106
106 hermes_write_regn(hw, PARAM2, 0); 107 hermes_write_regn(hw, PARAM2, param2);
107 hermes_write_regn(hw, PARAM1, 0); 108 hermes_write_regn(hw, PARAM1, param1);
108 hermes_write_regn(hw, PARAM0, param0); 109 hermes_write_regn(hw, PARAM0, param0);
109 hermes_write_regn(hw, CMD, cmd); 110 hermes_write_regn(hw, CMD, cmd);
110 111
@@ -115,16 +116,72 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
115 * Function definitions 116 * Function definitions
116 */ 117 */
117 118
119/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
120int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
121 u16 parm0, u16 parm1, u16 parm2,
122 struct hermes_response *resp)
123{
124 int err = 0;
125 int k;
126 u16 status, reg;
127
128 err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2);
129 if (err)
130 return err;
131
132 reg = hermes_read_regn(hw, EVSTAT);
133 k = CMD_INIT_TIMEOUT;
134 while ((!(reg & HERMES_EV_CMD)) && k) {
135 k--;
136 udelay(10);
137 reg = hermes_read_regn(hw, EVSTAT);
138 }
139
140 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
141
142 if (!hermes_present(hw)) {
143 DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
144 hw->iobase);
145 err = -ENODEV;
146 goto out;
147 }
148
149 if (!(reg & HERMES_EV_CMD)) {
150 printk(KERN_ERR "hermes @ %p: "
151 "Timeout waiting for card to reset (reg=0x%04x)!\n",
152 hw->iobase, reg);
153 err = -ETIMEDOUT;
154 goto out;
155 }
156
157 status = hermes_read_regn(hw, STATUS);
158 if (resp) {
159 resp->status = status;
160 resp->resp0 = hermes_read_regn(hw, RESP0);
161 resp->resp1 = hermes_read_regn(hw, RESP1);
162 resp->resp2 = hermes_read_regn(hw, RESP2);
163 }
164
165 hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
166
167 if (status & HERMES_STATUS_RESULT)
168 err = -EIO;
169out:
170 return err;
171}
172EXPORT_SYMBOL(hermes_doicmd_wait);
173
118void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing) 174void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
119{ 175{
120 hw->iobase = address; 176 hw->iobase = address;
121 hw->reg_spacing = reg_spacing; 177 hw->reg_spacing = reg_spacing;
122 hw->inten = 0x0; 178 hw->inten = 0x0;
123} 179}
180EXPORT_SYMBOL(hermes_struct_init);
124 181
125int hermes_init(hermes_t *hw) 182int hermes_init(hermes_t *hw)
126{ 183{
127 u16 status, reg; 184 u16 reg;
128 int err = 0; 185 int err = 0;
129 int k; 186 int k;
130 187
@@ -162,45 +219,11 @@ int hermes_init(hermes_t *hw)
162 219
163 /* We don't use hermes_docmd_wait here, because the reset wipes 220 /* We don't use hermes_docmd_wait here, because the reset wipes
164 the magic constant in SWSUPPORT0 away, and it gets confused */ 221 the magic constant in SWSUPPORT0 away, and it gets confused */
165 err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0); 222 err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL);
166 if (err)
167 return err;
168
169 reg = hermes_read_regn(hw, EVSTAT);
170 k = CMD_INIT_TIMEOUT;
171 while ( (! (reg & HERMES_EV_CMD)) && k) {
172 k--;
173 udelay(10);
174 reg = hermes_read_regn(hw, EVSTAT);
175 }
176
177 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
178
179 if (! hermes_present(hw)) {
180 DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
181 hw->iobase);
182 err = -ENODEV;
183 goto out;
184 }
185
186 if (! (reg & HERMES_EV_CMD)) {
187 printk(KERN_ERR "hermes @ %p: "
188 "Timeout waiting for card to reset (reg=0x%04x)!\n",
189 hw->iobase, reg);
190 err = -ETIMEDOUT;
191 goto out;
192 }
193 223
194 status = hermes_read_regn(hw, STATUS);
195
196 hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
197
198 if (status & HERMES_STATUS_RESULT)
199 err = -EIO;
200
201 out:
202 return err; 224 return err;
203} 225}
226EXPORT_SYMBOL(hermes_init);
204 227
205/* Issue a command to the chip, and (busy!) wait for it to 228/* Issue a command to the chip, and (busy!) wait for it to
206 * complete. 229 * complete.
@@ -216,7 +239,7 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
216 u16 reg; 239 u16 reg;
217 u16 status; 240 u16 status;
218 241
219 err = hermes_issue_cmd(hw, cmd, parm0); 242 err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
220 if (err) { 243 if (err) {
221 if (! hermes_present(hw)) { 244 if (! hermes_present(hw)) {
222 if (net_ratelimit()) 245 if (net_ratelimit())
@@ -271,6 +294,7 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
271 out: 294 out:
272 return err; 295 return err;
273} 296}
297EXPORT_SYMBOL(hermes_docmd_wait);
274 298
275int hermes_allocate(hermes_t *hw, u16 size, u16 *fid) 299int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
276{ 300{
@@ -313,7 +337,7 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
313 337
314 return 0; 338 return 0;
315} 339}
316 340EXPORT_SYMBOL(hermes_allocate);
317 341
318/* Set up a BAP to read a particular chunk of data from card's internal buffer. 342/* Set up a BAP to read a particular chunk of data from card's internal buffer.
319 * 343 *
@@ -397,6 +421,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
397 out: 421 out:
398 return err; 422 return err;
399} 423}
424EXPORT_SYMBOL(hermes_bap_pread);
400 425
401/* Write a block of data to the chip's buffer, via the 426/* Write a block of data to the chip's buffer, via the
402 * BAP. Synchronization/serialization is the caller's problem. 427 * BAP. Synchronization/serialization is the caller's problem.
@@ -422,6 +447,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
422 out: 447 out:
423 return err; 448 return err;
424} 449}
450EXPORT_SYMBOL(hermes_bap_pwrite);
425 451
426/* Read a Length-Type-Value record from the card. 452/* Read a Length-Type-Value record from the card.
427 * 453 *
@@ -463,7 +489,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
463 if (rtype != rid) 489 if (rtype != rid)
464 printk(KERN_WARNING "hermes @ %p: %s(): " 490 printk(KERN_WARNING "hermes @ %p: %s(): "
465 "rid (0x%04x) does not match type (0x%04x)\n", 491 "rid (0x%04x) does not match type (0x%04x)\n",
466 hw->iobase, __FUNCTION__, rid, rtype); 492 hw->iobase, __func__, rid, rtype);
467 if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize) 493 if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
468 printk(KERN_WARNING "hermes @ %p: " 494 printk(KERN_WARNING "hermes @ %p: "
469 "Truncating LTV record from %d to %d bytes. " 495 "Truncating LTV record from %d to %d bytes. "
@@ -475,6 +501,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
475 501
476 return 0; 502 return 0;
477} 503}
504EXPORT_SYMBOL(hermes_read_ltv);
478 505
479int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 506int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
480 u16 length, const void *value) 507 u16 length, const void *value)
@@ -497,20 +524,11 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
497 524
498 hermes_write_bytes(hw, dreg, value, count << 1); 525 hermes_write_bytes(hw, dreg, value, count << 1);
499 526
500 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, 527 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
501 rid, NULL); 528 rid, NULL);
502 529
503 return err; 530 return err;
504} 531}
505
506EXPORT_SYMBOL(hermes_struct_init);
507EXPORT_SYMBOL(hermes_init);
508EXPORT_SYMBOL(hermes_docmd_wait);
509EXPORT_SYMBOL(hermes_allocate);
510
511EXPORT_SYMBOL(hermes_bap_pread);
512EXPORT_SYMBOL(hermes_bap_pwrite);
513EXPORT_SYMBOL(hermes_read_ltv);
514EXPORT_SYMBOL(hermes_write_ltv); 532EXPORT_SYMBOL(hermes_write_ltv);
515 533
516static int __init init_hermes(void) 534static int __init init_hermes(void)
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 8e3f0e3edb58..8b13c8fef3dc 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -179,17 +179,23 @@
179#define HERMES_802_11_OFFSET (14) 179#define HERMES_802_11_OFFSET (14)
180#define HERMES_802_3_OFFSET (14+32) 180#define HERMES_802_3_OFFSET (14+32)
181#define HERMES_802_2_OFFSET (14+32+14) 181#define HERMES_802_2_OFFSET (14+32+14)
182#define HERMES_TXCNTL2_OFFSET (HERMES_802_3_OFFSET - 2)
182 183
183#define HERMES_RXSTAT_ERR (0x0003) 184#define HERMES_RXSTAT_ERR (0x0003)
184#define HERMES_RXSTAT_BADCRC (0x0001) 185#define HERMES_RXSTAT_BADCRC (0x0001)
185#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002) 186#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
187#define HERMES_RXSTAT_MIC (0x0010) /* Frame contains MIC */
186#define HERMES_RXSTAT_MACPORT (0x0700) 188#define HERMES_RXSTAT_MACPORT (0x0700)
187#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */ 189#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
190#define HERMES_RXSTAT_MIC_KEY_ID (0x1800) /* MIC key used */
188#define HERMES_RXSTAT_MSGTYPE (0xE000) 191#define HERMES_RXSTAT_MSGTYPE (0xE000)
189#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */ 192#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
190#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */ 193#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
191#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */ 194#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
192 195
196/* Shift amount for key ID in RXSTAT and TXCTRL */
197#define HERMES_MIC_KEY_ID_SHIFT 11
198
193struct hermes_tx_descriptor { 199struct hermes_tx_descriptor {
194 __le16 status; 200 __le16 status;
195 __le16 reserved1; 201 __le16 reserved1;
@@ -208,6 +214,8 @@ struct hermes_tx_descriptor {
208#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */ 214#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
209#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */ 215#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
210#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */ 216#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
217#define HERMES_TXCTRL_MIC (0x0010) /* 802.3 + TKIP */
218#define HERMES_TXCTRL_MIC_KEY_ID (0x1800) /* MIC Key ID mask */
211#define HERMES_TXCTRL_ALT_RTRY (0x0020) 219#define HERMES_TXCTRL_ALT_RTRY (0x0020)
212 220
213/* Inquiry constants and data types */ 221/* Inquiry constants and data types */
@@ -302,6 +310,40 @@ union hermes_scan_info {
302 struct symbol_scan_apinfo s; 310 struct symbol_scan_apinfo s;
303}; 311};
304 312
313/* Extended scan struct for HERMES_INQ_CHANNELINFO.
314 * wl_lkm calls this an ACS scan (Automatic Channel Select).
315 * Keep out of union hermes_scan_info because it is much bigger than
316 * the older scan structures. */
317struct agere_ext_scan_info {
318 __le16 reserved0;
319
320 u8 noise;
321 u8 level;
322 u8 rx_flow;
323 u8 rate;
324 __le16 reserved1[2];
325
326 __le16 frame_control;
327 __le16 dur_id;
328 u8 addr1[ETH_ALEN];
329 u8 addr2[ETH_ALEN];
330 u8 bssid[ETH_ALEN];
331 __le16 sequence;
332 u8 addr4[ETH_ALEN];
333
334 __le16 data_length;
335
336 /* Next 3 fields do not get filled in. */
337 u8 daddr[ETH_ALEN];
338 u8 saddr[ETH_ALEN];
339 __le16 len_type;
340
341 __le64 timestamp;
342 __le16 beacon_interval;
343 __le16 capabilities;
344 u8 data[316];
345} __attribute__ ((packed));
346
305#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) 347#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
306#define HERMES_LINKSTATUS_CONNECTED (0x0001) 348#define HERMES_LINKSTATUS_CONNECTED (0x0001)
307#define HERMES_LINKSTATUS_DISCONNECTED (0x0002) 349#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
@@ -353,6 +395,9 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
353int hermes_init(hermes_t *hw); 395int hermes_init(hermes_t *hw);
354int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, 396int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
355 struct hermes_response *resp); 397 struct hermes_response *resp);
398int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
399 u16 parm0, u16 parm1, u16 parm2,
400 struct hermes_response *resp);
356int hermes_allocate(hermes_t *hw, u16 size, u16 *fid); 401int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
357 402
358int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, 403int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
diff --git a/drivers/net/wireless/hermes_dld.c b/drivers/net/wireless/hermes_dld.c
new file mode 100644
index 000000000000..d8c626e61a3a
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.c
@@ -0,0 +1,730 @@
1/*
2 * Hermes download helper driver.
3 *
4 * This could be entirely merged into hermes.c.
5 *
6 * I'm keeping it separate to minimise the amount of merging between
7 * kernel upgrades. It also means the memory overhead for drivers that
8 * don't need firmware download low.
9 *
10 * This driver:
11 * - is capable of writing to the volatile area of the hermes device
12 * - is currently not capable of writing to non-volatile areas
13 * - provide helpers to identify and update plugin data
14 * - is not capable of interpreting a fw image directly. That is up to
15 * the main card driver.
16 * - deals with Hermes I devices. It can probably be modified to deal
17 * with Hermes II devices
18 *
19 * Copyright (C) 2007, David Kilroy
20 *
21 * Plug data code slightly modified from spectrum_cs driver
22 * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
23 * Portions based on information in wl_lkm_718 Agere driver
24 * COPYRIGHT (C) 2001-2004 by Agere Systems Inc. All Rights Reserved
25 *
26 * The contents of this file are subject to the Mozilla Public License
27 * Version 1.1 (the "License"); you may not use this file except in
28 * compliance with the License. You may obtain a copy of the License
29 * at http://www.mozilla.org/MPL/
30 *
31 * Software distributed under the License is distributed on an "AS IS"
32 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
33 * the License for the specific language governing rights and
34 * limitations under the License.
35 *
36 * Alternatively, the contents of this file may be used under the
37 * terms of the GNU General Public License version 2 (the "GPL"), in
38 * which case the provisions of the GPL are applicable instead of the
39 * above. If you wish to allow the use of your version of this file
40 * only under the terms of the GPL and not to allow others to use your
41 * version of this file under the MPL, indicate your decision by
42 * deleting the provisions above and replace them with the notice and
43 * other provisions required by the GPL. If you do not delete the
44 * provisions above, a recipient may use your version of this file
45 * under either the MPL or the GPL.
46 */
47
48#include <linux/module.h>
49#include <linux/delay.h>
50#include "hermes.h"
51#include "hermes_dld.h"
52
53MODULE_DESCRIPTION("Download helper for Lucent Hermes chipset");
54MODULE_AUTHOR("David Kilroy <kilroyd@gmail.com>");
55MODULE_LICENSE("Dual MPL/GPL");
56
57#define PFX "hermes_dld: "
58
59/*
60 * AUX port access. To unlock the AUX port write the access keys to the
61 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
62 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
63 */
64#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
65#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
66#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
67#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
68
69#define HERMES_AUX_PW0 0xFE01
70#define HERMES_AUX_PW1 0xDC23
71#define HERMES_AUX_PW2 0xBA45
72
73/* HERMES_CMD_DOWNLD */
74#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
75#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
76#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
77#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
78
79/* End markers used in dblocks */
80#define PDI_END 0x00000000 /* End of PDA */
81#define BLOCK_END 0xFFFFFFFF /* Last image block */
82#define TEXT_END 0x1A /* End of text header */
83
84/*
85 * PDA == Production Data Area
86 *
87 * In principle, the max. size of the PDA is is 4096 words. Currently,
88 * however, only about 500 bytes of this area are used.
89 *
90 * Some USB implementations can't handle sizes in excess of 1016. Note
91 * that PDA is not actually used in those USB environments, but may be
92 * retrieved by common code.
93 */
94#define MAX_PDA_SIZE 1000
95
96/* Limit the amout we try to download in a single shot.
97 * Size is in bytes.
98 */
99#define MAX_DL_SIZE 1024
100#define LIMIT_PROGRAM_SIZE 0
101
102/*
103 * The following structures have little-endian fields denoted by
104 * the leading underscore. Don't access them directly - use inline
105 * functions defined below.
106 */
107
108/*
109 * The binary image to be downloaded consists of series of data blocks.
110 * Each block has the following structure.
111 */
112struct dblock {
113 __le32 addr; /* adapter address where to write the block */
114 __le16 len; /* length of the data only, in bytes */
115 char data[0]; /* data to be written */
116} __attribute__ ((packed));
117
118/*
119 * Plug Data References are located in in the image after the last data
120 * block. They refer to areas in the adapter memory where the plug data
121 * items with matching ID should be written.
122 */
123struct pdr {
124 __le32 id; /* record ID */
125 __le32 addr; /* adapter address where to write the data */
126 __le32 len; /* expected length of the data, in bytes */
127 char next[0]; /* next PDR starts here */
128} __attribute__ ((packed));
129
130/*
131 * Plug Data Items are located in the EEPROM read from the adapter by
132 * primary firmware. They refer to the device-specific data that should
133 * be plugged into the secondary firmware.
134 */
135struct pdi {
136 __le16 len; /* length of ID and data, in words */
137 __le16 id; /* record ID */
138 char data[0]; /* plug data */
139} __attribute__ ((packed));
140
141/*** FW data block access functions ***/
142
143static inline u32
144dblock_addr(const struct dblock *blk)
145{
146 return le32_to_cpu(blk->addr);
147}
148
149static inline u32
150dblock_len(const struct dblock *blk)
151{
152 return le16_to_cpu(blk->len);
153}
154
155/*** PDR Access functions ***/
156
157static inline u32
158pdr_id(const struct pdr *pdr)
159{
160 return le32_to_cpu(pdr->id);
161}
162
163static inline u32
164pdr_addr(const struct pdr *pdr)
165{
166 return le32_to_cpu(pdr->addr);
167}
168
169static inline u32
170pdr_len(const struct pdr *pdr)
171{
172 return le32_to_cpu(pdr->len);
173}
174
175/*** PDI Access functions ***/
176
177static inline u32
178pdi_id(const struct pdi *pdi)
179{
180 return le16_to_cpu(pdi->id);
181}
182
183/* Return length of the data only, in bytes */
184static inline u32
185pdi_len(const struct pdi *pdi)
186{
187 return 2 * (le16_to_cpu(pdi->len) - 1);
188}
189
190/*** Hermes AUX control ***/
191
192static inline void
193hermes_aux_setaddr(hermes_t *hw, u32 addr)
194{
195 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
196 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
197}
198
199static inline int
200hermes_aux_control(hermes_t *hw, int enabled)
201{
202 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
203 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
204 int i;
205
206 /* Already open? */
207 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
208 return 0;
209
210 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
211 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
212 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
213 hermes_write_reg(hw, HERMES_CONTROL, action);
214
215 for (i = 0; i < 20; i++) {
216 udelay(10);
217 if (hermes_read_reg(hw, HERMES_CONTROL) ==
218 desired_state)
219 return 0;
220 }
221
222 return -EBUSY;
223}
224
225/*** Plug Data Functions ***/
226
227/*
228 * Scan PDR for the record with the specified RECORD_ID.
229 * If it's not found, return NULL.
230 */
231static struct pdr *
232hermes_find_pdr(struct pdr *first_pdr, u32 record_id)
233{
234 struct pdr *pdr = first_pdr;
235 void *end = (void *)first_pdr + MAX_PDA_SIZE;
236
237 while (((void *)pdr < end) &&
238 (pdr_id(pdr) != PDI_END)) {
239 /*
240 * PDR area is currently not terminated by PDI_END.
241 * It's followed by CRC records, which have the type
242 * field where PDR has length. The type can be 0 or 1.
243 */
244 if (pdr_len(pdr) < 2)
245 return NULL;
246
247 /* If the record ID matches, we are done */
248 if (pdr_id(pdr) == record_id)
249 return pdr;
250
251 pdr = (struct pdr *) pdr->next;
252 }
253 return NULL;
254}
255
256/* Scan production data items for a particular entry */
257static struct pdi *
258hermes_find_pdi(struct pdi *first_pdi, u32 record_id)
259{
260 struct pdi *pdi = first_pdi;
261
262 while (pdi_id(pdi) != PDI_END) {
263
264 /* If the record ID matches, we are done */
265 if (pdi_id(pdi) == record_id)
266 return pdi;
267
268 pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
269 }
270 return NULL;
271}
272
273/* Process one Plug Data Item - find corresponding PDR and plug it */
274static int
275hermes_plug_pdi(hermes_t *hw, struct pdr *first_pdr, const struct pdi *pdi)
276{
277 struct pdr *pdr;
278
279 /* Find the PDR corresponding to this PDI */
280 pdr = hermes_find_pdr(first_pdr, pdi_id(pdi));
281
282 /* No match is found, safe to ignore */
283 if (!pdr)
284 return 0;
285
286 /* Lengths of the data in PDI and PDR must match */
287 if (pdi_len(pdi) != pdr_len(pdr))
288 return -EINVAL;
289
290 /* do the actual plugging */
291 hermes_aux_setaddr(hw, pdr_addr(pdr));
292 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
293
294 return 0;
295}
296
297/* Read PDA from the adapter */
298int hermes_read_pda(hermes_t *hw,
299 __le16 *pda,
300 u32 pda_addr,
301 u16 pda_len,
302 int use_eeprom) /* can we get this into hw? */
303{
304 int ret;
305 u16 pda_size;
306 u16 data_len = pda_len;
307 __le16 *data = pda;
308
309 if (use_eeprom) {
310 /* PDA of spectrum symbol is in eeprom */
311
312 /* Issue command to read EEPROM */
313 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
314 if (ret)
315 return ret;
316 } else {
317 /* wl_lkm does not include PDA size in the PDA area.
318 * We will pad the information into pda, so other routines
319 * don't have to be modified */
320 pda[0] = cpu_to_le16(pda_len - 2);
321 /* Includes CFG_PROD_DATA but not itself */
322 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
323 data_len = pda_len - 4;
324 data = pda + 2;
325 }
326
327 /* Open auxiliary port */
328 ret = hermes_aux_control(hw, 1);
329 printk(KERN_DEBUG PFX "AUX enable returned %d\n", ret);
330 if (ret)
331 return ret;
332
333 /* read PDA from EEPROM */
334 hermes_aux_setaddr(hw, pda_addr);
335 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
336
337 /* Close aux port */
338 ret = hermes_aux_control(hw, 0);
339 printk(KERN_DEBUG PFX "AUX disable returned %d\n", ret);
340
341 /* Check PDA length */
342 pda_size = le16_to_cpu(pda[0]);
343 printk(KERN_DEBUG PFX "Actual PDA length %d, Max allowed %d\n",
344 pda_size, pda_len);
345 if (pda_size > pda_len)
346 return -EINVAL;
347
348 return 0;
349}
350EXPORT_SYMBOL(hermes_read_pda);
351
352/* Parse PDA and write the records into the adapter
353 *
354 * Attempt to write every records that is in the specified pda
355 * which also has a valid production data record for the firmware.
356 */
357int hermes_apply_pda(hermes_t *hw,
358 const char *first_pdr,
359 const __le16 *pda)
360{
361 int ret;
362 const struct pdi *pdi;
363 struct pdr *pdr;
364
365 pdr = (struct pdr *) first_pdr;
366
367 /* Go through every PDI and plug them into the adapter */
368 pdi = (const struct pdi *) (pda + 2);
369 while (pdi_id(pdi) != PDI_END) {
370 ret = hermes_plug_pdi(hw, pdr, pdi);
371 if (ret)
372 return ret;
373
374 /* Increment to the next PDI */
375 pdi = (const struct pdi *) &pdi->data[pdi_len(pdi)];
376 }
377 return 0;
378}
379EXPORT_SYMBOL(hermes_apply_pda);
380
381/* Identify the total number of bytes in all blocks
382 * including the header data.
383 */
384size_t
385hermes_blocks_length(const char *first_block)
386{
387 const struct dblock *blk = (const struct dblock *) first_block;
388 int total_len = 0;
389 int len;
390
391 /* Skip all blocks to locate Plug Data References
392 * (Spectrum CS) */
393 while (dblock_addr(blk) != BLOCK_END) {
394 len = dblock_len(blk);
395 total_len += sizeof(*blk) + len;
396 blk = (struct dblock *) &blk->data[len];
397 }
398
399 return total_len;
400}
401EXPORT_SYMBOL(hermes_blocks_length);
402
403/*** Hermes programming ***/
404
405/* About to start programming data (Hermes I)
406 * offset is the entry point
407 *
408 * Spectrum_cs' Symbol fw does not require this
409 * wl_lkm Agere fw does
410 * Don't know about intersil
411 */
412int hermesi_program_init(hermes_t *hw, u32 offset)
413{
414 int err;
415
416 /* Disable interrupts?*/
417 /*hw->inten = 0x0;*/
418 /*hermes_write_regn(hw, INTEN, 0);*/
419 /*hermes_set_irqmask(hw, 0);*/
420
421 /* Acknowledge any outstanding command */
422 hermes_write_regn(hw, EVACK, 0xFFFF);
423
424 /* Using doicmd_wait rather than docmd_wait */
425 err = hermes_doicmd_wait(hw,
426 0x0100 | HERMES_CMD_INIT,
427 0, 0, 0, NULL);
428 if (err)
429 return err;
430
431 err = hermes_doicmd_wait(hw,
432 0x0000 | HERMES_CMD_INIT,
433 0, 0, 0, NULL);
434 if (err)
435 return err;
436
437 err = hermes_aux_control(hw, 1);
438 printk(KERN_DEBUG PFX "AUX enable returned %d\n", err);
439
440 if (err)
441 return err;
442
443 printk(KERN_DEBUG PFX "Enabling volatile, EP 0x%08x\n", offset);
444 err = hermes_doicmd_wait(hw,
445 HERMES_PROGRAM_ENABLE_VOLATILE,
446 offset & 0xFFFFu,
447 offset >> 16,
448 0,
449 NULL);
450 printk(KERN_DEBUG PFX "PROGRAM_ENABLE returned %d\n",
451 err);
452
453 return err;
454}
455EXPORT_SYMBOL(hermesi_program_init);
456
457/* Done programming data (Hermes I)
458 *
459 * Spectrum_cs' Symbol fw does not require this
460 * wl_lkm Agere fw does
461 * Don't know about intersil
462 */
463int hermesi_program_end(hermes_t *hw)
464{
465 struct hermes_response resp;
466 int rc = 0;
467 int err;
468
469 rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
470
471 printk(KERN_DEBUG PFX "PROGRAM_DISABLE returned %d, "
472 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
473 rc, resp.resp0, resp.resp1, resp.resp2);
474
475 if ((rc == 0) &&
476 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
477 rc = -EIO;
478
479 err = hermes_aux_control(hw, 0);
480 printk(KERN_DEBUG PFX "AUX disable returned %d\n", err);
481
482 /* Acknowledge any outstanding command */
483 hermes_write_regn(hw, EVACK, 0xFFFF);
484
485 /* Reinitialise, ignoring return */
486 (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
487 0, 0, 0, NULL);
488
489 return rc ? rc : err;
490}
491EXPORT_SYMBOL(hermesi_program_end);
492
493/* Program the data blocks */
494int hermes_program(hermes_t *hw, const char *first_block, const char *end)
495{
496 const struct dblock *blk;
497 u32 blkaddr;
498 u32 blklen;
499#if LIMIT_PROGRAM_SIZE
500 u32 addr;
501 u32 len;
502#endif
503
504 blk = (const struct dblock *) first_block;
505
506 if ((const char *) blk > (end - sizeof(*blk)))
507 return -EIO;
508
509 blkaddr = dblock_addr(blk);
510 blklen = dblock_len(blk);
511
512 while ((blkaddr != BLOCK_END) &&
513 (((const char *) blk + blklen) <= end)) {
514 printk(KERN_DEBUG PFX
515 "Programming block of length %d to address 0x%08x\n",
516 blklen, blkaddr);
517
518#if !LIMIT_PROGRAM_SIZE
519 /* wl_lkm driver splits this into writes of 2000 bytes */
520 hermes_aux_setaddr(hw, blkaddr);
521 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
522 blklen);
523#else
524 len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
525 addr = blkaddr;
526
527 while (addr < (blkaddr + blklen)) {
528 printk(KERN_DEBUG PFX
529 "Programming subblock of length %d "
530 "to address 0x%08x. Data @ %p\n",
531 len, addr, &blk->data[addr - blkaddr]);
532
533 hermes_aux_setaddr(hw, addr);
534 hermes_write_bytes(hw, HERMES_AUXDATA,
535 &blk->data[addr - blkaddr],
536 len);
537
538 addr += len;
539 len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
540 (blkaddr + blklen - addr) : MAX_DL_SIZE;
541 }
542#endif
543 blk = (const struct dblock *) &blk->data[blklen];
544
545 if ((const char *) blk > (end - sizeof(*blk)))
546 return -EIO;
547
548 blkaddr = dblock_addr(blk);
549 blklen = dblock_len(blk);
550 }
551 return 0;
552}
553EXPORT_SYMBOL(hermes_program);
554
555static int __init init_hermes_dld(void)
556{
557 return 0;
558}
559
560static void __exit exit_hermes_dld(void)
561{
562}
563
564module_init(init_hermes_dld);
565module_exit(exit_hermes_dld);
566
567/*** Default plugging data for Hermes I ***/
568/* Values from wl_lkm_718/hcf/dhf.c */
569
570#define DEFINE_DEFAULT_PDR(pid, length, data) \
571static const struct { \
572 __le16 len; \
573 __le16 id; \
574 u8 val[length]; \
575} __attribute__ ((packed)) default_pdr_data_##pid = { \
576 __constant_cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
577 sizeof(__le16)) - 1), \
578 __constant_cpu_to_le16(pid), \
579 data \
580}
581
582#define DEFAULT_PDR(pid) default_pdr_data_##pid
583
584/* HWIF Compatiblity */
585DEFINE_DEFAULT_PDR(0x0005, 10, "\x00\x00\x06\x00\x01\x00\x01\x00\x01\x00");
586
587/* PPPPSign */
588DEFINE_DEFAULT_PDR(0x0108, 4, "\x00\x00\x00\x00");
589
590/* PPPPProf */
591DEFINE_DEFAULT_PDR(0x0109, 10, "\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00");
592
593/* Antenna diversity */
594DEFINE_DEFAULT_PDR(0x0150, 2, "\x00\x3F");
595
596/* Modem VCO band Set-up */
597DEFINE_DEFAULT_PDR(0x0160, 28,
598 "\x00\x00\x00\x00\x00\x00\x00\x00"
599 "\x00\x00\x00\x00\x00\x00\x00\x00"
600 "\x00\x00\x00\x00\x00\x00\x00\x00"
601 "\x00\x00\x00\x00");
602
603/* Modem Rx Gain Table Values */
604DEFINE_DEFAULT_PDR(0x0161, 256,
605 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
606 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
607 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
608 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
609 "\x3F\x01\x3E\01\x3E\x01\x3D\x01"
610 "\x3D\x01\x3C\01\x3C\x01\x3B\x01"
611 "\x3B\x01\x3A\01\x3A\x01\x39\x01"
612 "\x39\x01\x38\01\x38\x01\x37\x01"
613 "\x37\x01\x36\01\x36\x01\x35\x01"
614 "\x35\x01\x34\01\x34\x01\x33\x01"
615 "\x33\x01\x32\x01\x32\x01\x31\x01"
616 "\x31\x01\x30\x01\x30\x01\x7B\x01"
617 "\x7B\x01\x7A\x01\x7A\x01\x79\x01"
618 "\x79\x01\x78\x01\x78\x01\x77\x01"
619 "\x77\x01\x76\x01\x76\x01\x75\x01"
620 "\x75\x01\x74\x01\x74\x01\x73\x01"
621 "\x73\x01\x72\x01\x72\x01\x71\x01"
622 "\x71\x01\x70\x01\x70\x01\x68\x01"
623 "\x68\x01\x67\x01\x67\x01\x66\x01"
624 "\x66\x01\x65\x01\x65\x01\x57\x01"
625 "\x57\x01\x56\x01\x56\x01\x55\x01"
626 "\x55\x01\x54\x01\x54\x01\x53\x01"
627 "\x53\x01\x52\x01\x52\x01\x51\x01"
628 "\x51\x01\x50\x01\x50\x01\x48\x01"
629 "\x48\x01\x47\x01\x47\x01\x46\x01"
630 "\x46\x01\x45\x01\x45\x01\x44\x01"
631 "\x44\x01\x43\x01\x43\x01\x42\x01"
632 "\x42\x01\x41\x01\x41\x01\x40\x01"
633 "\x40\x01\x40\x01\x40\x01\x40\x01"
634 "\x40\x01\x40\x01\x40\x01\x40\x01"
635 "\x40\x01\x40\x01\x40\x01\x40\x01"
636 "\x40\x01\x40\x01\x40\x01\x40\x01");
637
638/* Write PDA according to certain rules.
639 *
640 * For every production data record, look for a previous setting in
641 * the pda, and use that.
642 *
643 * For certain records, use defaults if they are not found in pda.
644 */
645int hermes_apply_pda_with_defaults(hermes_t *hw,
646 const char *first_pdr,
647 const __le16 *pda)
648{
649 const struct pdr *pdr = (const struct pdr *) first_pdr;
650 struct pdi *first_pdi = (struct pdi *) &pda[2];
651 struct pdi *pdi;
652 struct pdi *default_pdi = NULL;
653 struct pdi *outdoor_pdi;
654 void *end = (void *)first_pdr + MAX_PDA_SIZE;
655 int record_id;
656
657 while (((void *)pdr < end) &&
658 (pdr_id(pdr) != PDI_END)) {
659 /*
660 * For spectrum_cs firmwares,
661 * PDR area is currently not terminated by PDI_END.
662 * It's followed by CRC records, which have the type
663 * field where PDR has length. The type can be 0 or 1.
664 */
665 if (pdr_len(pdr) < 2)
666 break;
667 record_id = pdr_id(pdr);
668
669 pdi = hermes_find_pdi(first_pdi, record_id);
670 if (pdi)
671 printk(KERN_DEBUG PFX "Found record 0x%04x at %p\n",
672 record_id, pdi);
673
674 switch (record_id) {
675 case 0x110: /* Modem REFDAC values */
676 case 0x120: /* Modem VGDAC values */
677 outdoor_pdi = hermes_find_pdi(first_pdi, record_id + 1);
678 default_pdi = NULL;
679 if (outdoor_pdi) {
680 pdi = outdoor_pdi;
681 printk(KERN_DEBUG PFX
682 "Using outdoor record 0x%04x at %p\n",
683 record_id + 1, pdi);
684 }
685 break;
686 case 0x5: /* HWIF Compatiblity */
687 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0005);
688 break;
689 case 0x108: /* PPPPSign */
690 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0108);
691 break;
692 case 0x109: /* PPPPProf */
693 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0109);
694 break;
695 case 0x150: /* Antenna diversity */
696 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0150);
697 break;
698 case 0x160: /* Modem VCO band Set-up */
699 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0160);
700 break;
701 case 0x161: /* Modem Rx Gain Table Values */
702 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0161);
703 break;
704 default:
705 default_pdi = NULL;
706 break;
707 }
708 if (!pdi && default_pdi) {
709 /* Use default */
710 pdi = default_pdi;
711 printk(KERN_DEBUG PFX
712 "Using default record 0x%04x at %p\n",
713 record_id, pdi);
714 }
715
716 if (pdi) {
717 /* Lengths of the data in PDI and PDR must match */
718 if (pdi_len(pdi) == pdr_len(pdr)) {
719 /* do the actual plugging */
720 hermes_aux_setaddr(hw, pdr_addr(pdr));
721 hermes_write_bytes(hw, HERMES_AUXDATA,
722 pdi->data, pdi_len(pdi));
723 }
724 }
725
726 pdr++;
727 }
728 return 0;
729}
730EXPORT_SYMBOL(hermes_apply_pda_with_defaults);
diff --git a/drivers/net/wireless/hermes_dld.h b/drivers/net/wireless/hermes_dld.h
new file mode 100644
index 000000000000..6fcb26277999
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2007, David Kilroy
3 *
4 * The contents of this file are subject to the Mozilla Public License
5 * Version 1.1 (the "License"); you may not use this file except in
6 * compliance with the License. You may obtain a copy of the License
7 * at http://www.mozilla.org/MPL/
8 *
9 * Software distributed under the License is distributed on an "AS IS"
10 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
11 * the License for the specific language governing rights and
12 * limitations under the License.
13 *
14 * Alternatively, the contents of this file may be used under the
15 * terms of the GNU General Public License version 2 (the "GPL"), in
16 * which case the provisions of the GPL are applicable instead of the
17 * above. If you wish to allow the use of your version of this file
18 * only under the terms of the GPL and not to allow others to use your
19 * version of this file under the MPL, indicate your decision by
20 * deleting the provisions above and replace them with the notice and
21 * other provisions required by the GPL. If you do not delete the
22 * provisions above, a recipient may use your version of this file
23 * under either the MPL or the GPL.
24 */
25#ifndef _HERMES_DLD_H
26#define _HERMES_DLD_H
27
28#include "hermes.h"
29
30int hermesi_program_init(hermes_t *hw, u32 offset);
31int hermesi_program_end(hermes_t *hw);
32int hermes_program(hermes_t *hw, const char *first_block, const char *end);
33
34int hermes_read_pda(hermes_t *hw,
35 __le16 *pda,
36 u32 pda_addr,
37 u16 pda_len,
38 int use_eeprom);
39int hermes_apply_pda(hermes_t *hw,
40 const char *first_pdr,
41 const __le16 *pda);
42int hermes_apply_pda_with_defaults(hermes_t *hw,
43 const char *first_pdr,
44 const __le16 *pda);
45
46size_t hermes_blocks_length(const char *first_block);
47
48#endif /* _HERMES_DLD_H */
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/hermes_rid.h
index 4f46b4809e55..42eb67dea1df 100644
--- a/drivers/net/wireless/hermes_rid.h
+++ b/drivers/net/wireless/hermes_rid.h
@@ -30,6 +30,7 @@
30#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20 30#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20
31#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21 31#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21
32#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21 32#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
33#define HERMES_RID_CNFDROPUNENCRYPTED 0xFC22
33#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23 34#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23
34#define HERMES_RID_CNFDEFAULTKEY0 0xFC24 35#define HERMES_RID_CNFDEFAULTKEY0 0xFC24
35#define HERMES_RID_CNFDEFAULTKEY1 0xFC25 36#define HERMES_RID_CNFDEFAULTKEY1 0xFC25
@@ -85,6 +86,16 @@
85#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2 86#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2
86#define HERMES_RID_CNFBASICRATES 0xFCB3 87#define HERMES_RID_CNFBASICRATES 0xFCB3
87#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4 88#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
89#define HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE 0xFCB4
90#define HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE 0xFCB5
91#define HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE 0xFCB6
92#define HERMES_RID_CNFADDMAPPEDTKIPKEY_AGERE 0xFCB7
93#define HERMES_RID_CNFREMMAPPEDTKIPKEY_AGERE 0xFCB8
94#define HERMES_RID_CNFSETWPACAPABILITIES_AGERE 0xFCB9
95#define HERMES_RID_CNFCACHEDPMKADDRESS 0xFCBA
96#define HERMES_RID_CNFREMOVEPMKADDRESS 0xFCBB
97#define HERMES_RID_CNFSCANCHANNELS2GHZ 0xFCC2
98#define HERMES_RID_CNFDISASSOCIATE 0xFCC8
88#define HERMES_RID_CNFTICKTIME 0xFCE0 99#define HERMES_RID_CNFTICKTIME 0xFCE0
89#define HERMES_RID_CNFSCANREQUEST 0xFCE1 100#define HERMES_RID_CNFSCANREQUEST 0xFCE1
90#define HERMES_RID_CNFJOINREQUEST 0xFCE2 101#define HERMES_RID_CNFJOINREQUEST 0xFCE2
@@ -137,6 +148,12 @@
137#define HERMES_RID_CURRENTTXRATE6 0xFD85 148#define HERMES_RID_CURRENTTXRATE6 0xFD85
138#define HERMES_RID_OWNMACADDR 0xFD86 149#define HERMES_RID_OWNMACADDR 0xFD86
139#define HERMES_RID_SCANRESULTSTABLE 0xFD88 150#define HERMES_RID_SCANRESULTSTABLE 0xFD88
151#define HERMES_RID_CURRENT_COUNTRY_INFO 0xFD89
152#define HERMES_RID_CURRENT_WPA_IE 0xFD8A
153#define HERMES_RID_CURRENT_TKIP_IV 0xFD8B
154#define HERMES_RID_CURRENT_ASSOC_REQ_INFO 0xFD8C
155#define HERMES_RID_CURRENT_ASSOC_RESP_INFO 0xFD8D
156#define HERMES_RID_TXQUEUEEMPTY 0xFD91
140#define HERMES_RID_PHYTYPE 0xFDC0 157#define HERMES_RID_PHYTYPE 0xFDC0
141#define HERMES_RID_CURRENTCHANNEL 0xFDC1 158#define HERMES_RID_CURRENTCHANNEL 0xFDC1
142#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2 159#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 19a401c4a0dc..bca74811bc7f 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -211,7 +211,7 @@ static u32 ipw2100_debug_level = IPW_DL_NONE;
211do { \ 211do { \
212 if (ipw2100_debug_level & (level)) { \ 212 if (ipw2100_debug_level & (level)) { \
213 printk(KERN_DEBUG "ipw2100: %c %s ", \ 213 printk(KERN_DEBUG "ipw2100: %c %s ", \
214 in_interrupt() ? 'I' : 'U', __FUNCTION__); \ 214 in_interrupt() ? 'I' : 'U', __func__); \
215 printk(message); \ 215 printk(message); \
216 } \ 216 } \
217} while (0) 217} while (0)
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index d4ab28b73b32..0bad1ec3e7e0 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1394,13 +1394,13 @@ BIT_ARG16(x)
1394#define IPW_DEBUG(level, fmt, args...) \ 1394#define IPW_DEBUG(level, fmt, args...) \
1395do { if (ipw_debug_level & (level)) \ 1395do { if (ipw_debug_level & (level)) \
1396 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1396 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1397 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1397 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
1398 1398
1399#ifdef CONFIG_IPW2200_DEBUG 1399#ifdef CONFIG_IPW2200_DEBUG
1400#define IPW_LL_DEBUG(level, fmt, args...) \ 1400#define IPW_LL_DEBUG(level, fmt, args...) \
1401do { if (ipw_debug_level & (level)) \ 1401do { if (ipw_debug_level & (level)) \
1402 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1402 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1403 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1403 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
1404#else 1404#else
1405#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0) 1405#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
1406#endif /* CONFIG_IPW2200_DEBUG */ 1406#endif /* CONFIG_IPW2200_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
index f1d002f7b790..33016fb5e9b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
@@ -34,12 +34,12 @@ extern u32 iwl3945_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 34#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl3945_debug_level & (level)) \ 35do { if (iwl3945_debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 37 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
38 38
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 39#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \ 40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 42 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
43 43
44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len) 44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
45{ 45{
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
index 0b9475114618..b3fe48de3ae7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
@@ -59,7 +59,7 @@
59 * 59 *
60 */ 60 */
61 61
62#define _iwl3945_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs)) 62#define _iwl3945_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
63#ifdef CONFIG_IWL3945_DEBUG 63#ifdef CONFIG_IWL3945_DEBUG
64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv, 64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv,
65 u32 ofs, u32 val) 65 u32 ofs, u32 val)
@@ -73,14 +73,14 @@ static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *
73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val) 73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val)
74#endif 74#endif
75 75
76#define _iwl3945_read32(priv, ofs) readl((priv)->hw_base + (ofs)) 76#define _iwl3945_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
77#ifdef CONFIG_IWL3945_DEBUG 77#ifdef CONFIG_IWL3945_DEBUG
78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs) 78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs)
79{ 79{
80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); 80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
81 return _iwl3945_read32(priv, ofs); 81 return _iwl3945_read32(priv, ofs);
82} 82}
83#define iwl3945_read32(priv, ofs) __iwl3945_read32(__FILE__, __LINE__, priv, ofs) 83#define iwl3945_read32(priv, ofs)__iwl3945_read32(__FILE__, __LINE__, priv, ofs)
84#else 84#else
85#define iwl3945_read32(p, o) _iwl3945_read32(p, o) 85#define iwl3945_read32(p, o) _iwl3945_read32(p, o)
86#endif 86#endif
@@ -153,28 +153,10 @@ static inline void __iwl3945_clear_bit(const char *f, u32 l,
153static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv) 153static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv)
154{ 154{
155 int ret; 155 int ret;
156 u32 gp_ctl;
157
158#ifdef CONFIG_IWL3945_DEBUG 156#ifdef CONFIG_IWL3945_DEBUG
159 if (atomic_read(&priv->restrict_refcnt)) 157 if (atomic_read(&priv->restrict_refcnt))
160 return 0; 158 return 0;
161#endif 159#endif
162 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
163 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
164 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
165 "wakes up NIC\n");
166
167 /* 10 msec allows time for NIC to complete its data save */
168 gp_ctl = _iwl3945_read32(priv, CSR_GP_CNTRL);
169 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
170 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
171 "gpctl = 0x%08x\n", gp_ctl);
172 mdelay(10);
173 } else
174 IWL_DEBUG_RF_KILL("power-down complete, "
175 "gpctl = 0x%08x\n", gp_ctl);
176 }
177
178 /* this bit wakes up the NIC */ 160 /* this bit wakes up the NIC */
179 _iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 161 _iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
180 ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL, 162 ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 10c64bdb314c..a279bf1dc9b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -65,6 +65,9 @@ struct iwl3945_rs_sta {
65 u8 ibss_sta_added; 65 u8 ibss_sta_added;
66 struct timer_list rate_scale_flush; 66 struct timer_list rate_scale_flush;
67 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT]; 67 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT];
68
69 /* used to be in sta_info */
70 int last_txrate_idx;
68}; 71};
69 72
70static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = { 73static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = {
@@ -319,6 +322,7 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
319static void rs_rate_init(void *priv_rate, void *priv_sta, 322static void rs_rate_init(void *priv_rate, void *priv_sta,
320 struct ieee80211_local *local, struct sta_info *sta) 323 struct ieee80211_local *local, struct sta_info *sta)
321{ 324{
325 struct iwl3945_rs_sta *rs_sta = (void *)sta->rate_ctrl_priv;
322 int i; 326 int i;
323 327
324 IWL_DEBUG_RATE("enter\n"); 328 IWL_DEBUG_RATE("enter\n");
@@ -329,17 +333,15 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
329 * after assoc.. */ 333 * after assoc.. */
330 334
331 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { 335 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) {
332 if (sta->supp_rates[local->hw.conf.channel->band] & (1 << i)) { 336 if (sta->sta.supp_rates[local->hw.conf.channel->band] & (1 << i)) {
333 sta->txrate_idx = i; 337 rs_sta->last_txrate_idx = i;
334 break; 338 break;
335 } 339 }
336 } 340 }
337 341
338 sta->last_txrate_idx = sta->txrate_idx;
339
340 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ 342 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
341 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) 343 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
342 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 344 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
343 345
344 IWL_DEBUG_RATE("leave\n"); 346 IWL_DEBUG_RATE("leave\n");
345} 347}
@@ -674,15 +676,15 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
674 return; 676 return;
675 } 677 }
676 678
677 rate_mask = sta->supp_rates[sband->band]; 679 rs_sta = (void *)sta->rate_ctrl_priv;
678 index = min(sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 680
681 rate_mask = sta->sta.supp_rates[sband->band];
682 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
679 683
680 if (sband->band == IEEE80211_BAND_5GHZ) 684 if (sband->band == IEEE80211_BAND_5GHZ)
681 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 685 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
682 686
683 rs_sta = (void *)sta->rate_ctrl_priv; 687 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
684
685 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
686 !rs_sta->ibss_sta_added) { 688 !rs_sta->ibss_sta_added) {
687 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 689 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
688 690
@@ -803,17 +805,15 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
803 805
804 out: 806 out:
805 807
806 sta->last_txrate_idx = index; 808 rs_sta->last_txrate_idx = index;
807 if (sband->band == IEEE80211_BAND_5GHZ) 809 if (sband->band == IEEE80211_BAND_5GHZ)
808 sta->txrate_idx = sta->last_txrate_idx - IWL_FIRST_OFDM_RATE; 810 sel->rate_idx = rs_sta->last_txrate_idx - IWL_FIRST_OFDM_RATE;
809 else 811 else
810 sta->txrate_idx = sta->last_txrate_idx; 812 sel->rate_idx = rs_sta->last_txrate_idx;
811 813
812 rcu_read_unlock(); 814 rcu_read_unlock();
813 815
814 IWL_DEBUG_RATE("leave: %d\n", index); 816 IWL_DEBUG_RATE("leave: %d\n", index);
815
816 sel->rate_idx = sta->txrate_idx;
817} 817}
818 818
819static struct rate_control_ops rs_ops = { 819static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 3f51f3635344..7ca5627cc078 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -520,10 +520,10 @@ static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
520 /* Filter incoming packets to determine if they are targeted toward 520 /* Filter incoming packets to determine if they are targeted toward
521 * this network, discarding packets coming from ourselves */ 521 * this network, discarding packets coming from ourselves */
522 switch (priv->iw_mode) { 522 switch (priv->iw_mode) {
523 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ 523 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
524 /* packets to our IBSS update information */ 524 /* packets to our IBSS update information */
525 return !compare_ether_addr(header->addr3, priv->bssid); 525 return !compare_ether_addr(header->addr3, priv->bssid);
526 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ 526 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
527 /* packets to our IBSS update information */ 527 /* packets to our IBSS update information */
528 return !compare_ether_addr(header->addr2, priv->bssid); 528 return !compare_ether_addr(header->addr2, priv->bssid);
529 default: 529 default:
@@ -531,99 +531,6 @@ static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
531 } 531 }
532} 532}
533 533
534static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
535 struct sk_buff *skb,
536 struct iwl3945_rx_frame_hdr *rx_hdr,
537 struct ieee80211_rx_status *stats)
538{
539 /* First cache any information we need before we overwrite
540 * the information provided in the skb from the hardware */
541 s8 signal = stats->signal;
542 s8 noise = 0;
543 int rate = stats->rate_idx;
544 u64 tsf = stats->mactime;
545 __le16 phy_flags_hw = rx_hdr->phy_flags, antenna;
546
547 struct iwl3945_rt_rx_hdr {
548 struct ieee80211_radiotap_header rt_hdr;
549 __le64 rt_tsf; /* TSF */
550 u8 rt_flags; /* radiotap packet flags */
551 u8 rt_rate; /* rate in 500kb/s */
552 __le16 rt_channelMHz; /* channel in MHz */
553 __le16 rt_chbitmask; /* channel bitfield */
554 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
555 s8 rt_dbmnoise;
556 u8 rt_antenna; /* antenna number */
557 } __attribute__ ((packed)) *iwl3945_rt;
558
559 if (skb_headroom(skb) < sizeof(*iwl3945_rt)) {
560 if (net_ratelimit())
561 printk(KERN_ERR "not enough headroom [%d] for "
562 "radiotap head [%zd]\n",
563 skb_headroom(skb), sizeof(*iwl3945_rt));
564 return;
565 }
566
567 /* put radiotap header in front of 802.11 header and data */
568 iwl3945_rt = (void *)skb_push(skb, sizeof(*iwl3945_rt));
569
570 /* initialise radiotap header */
571 iwl3945_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
572 iwl3945_rt->rt_hdr.it_pad = 0;
573
574 /* total header + data */
575 put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
576
577 /* Indicate all the fields we add to the radiotap header */
578 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
579 (1 << IEEE80211_RADIOTAP_FLAGS) |
580 (1 << IEEE80211_RADIOTAP_RATE) |
581 (1 << IEEE80211_RADIOTAP_CHANNEL) |
582 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
583 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
584 (1 << IEEE80211_RADIOTAP_ANTENNA),
585 &iwl3945_rt->rt_hdr.it_present);
586
587 /* Zero the flags, we'll add to them as we go */
588 iwl3945_rt->rt_flags = 0;
589
590 put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
591
592 iwl3945_rt->rt_dbmsignal = signal;
593 iwl3945_rt->rt_dbmnoise = noise;
594
595 /* Convert the channel frequency and set the flags */
596 put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
597 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
598 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
599 &iwl3945_rt->rt_chbitmask);
600 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
601 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
602 &iwl3945_rt->rt_chbitmask);
603 else /* 802.11g */
604 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
605 &iwl3945_rt->rt_chbitmask);
606
607 if (rate == -1)
608 iwl3945_rt->rt_rate = 0;
609 else {
610 if (stats->band == IEEE80211_BAND_5GHZ)
611 rate += IWL_FIRST_OFDM_RATE;
612
613 iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
614 }
615
616 /* antenna number */
617 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
618 iwl3945_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
619
620 /* set the preamble flag if we have it */
621 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
622 iwl3945_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
623
624 stats->flag |= RX_FLAG_RADIOTAP;
625}
626
627static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv, 534static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
628 struct iwl3945_rx_mem_buffer *rxb, 535 struct iwl3945_rx_mem_buffer *rxb,
629 struct ieee80211_rx_status *stats) 536 struct ieee80211_rx_status *stats)
@@ -657,9 +564,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
657 iwl3945_set_decrypted_flag(priv, rxb->skb, 564 iwl3945_set_decrypted_flag(priv, rxb->skb,
658 le32_to_cpu(rx_end->status), stats); 565 le32_to_cpu(rx_end->status), stats);
659 566
660 if (priv->add_radiotap)
661 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats);
662
663#ifdef CONFIG_IWL3945_LEDS 567#ifdef CONFIG_IWL3945_LEDS
664 if (ieee80211_is_data(hdr->frame_control)) 568 if (ieee80211_is_data(hdr->frame_control))
665 priv->rxtxpackets += len; 569 priv->rxtxpackets += len;
@@ -684,7 +588,6 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
684 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); 588 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
685 u8 network_packet; 589 u8 network_packet;
686 590
687 rx_status.antenna = 0;
688 rx_status.flag = 0; 591 rx_status.flag = 0;
689 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 592 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
690 rx_status.freq = 593 rx_status.freq =
@@ -696,6 +599,13 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
696 if (rx_status.band == IEEE80211_BAND_5GHZ) 599 if (rx_status.band == IEEE80211_BAND_5GHZ)
697 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; 600 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
698 601
602 rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags &
603 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
604
605 /* set the preamble flag if appropriate */
606 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
607 rx_status.flag |= RX_FLAG_SHORTPRE;
608
699 if ((unlikely(rx_stats->phy_count > 20))) { 609 if ((unlikely(rx_stats->phy_count > 20))) {
700 IWL_DEBUG_DROP 610 IWL_DEBUG_DROP
701 ("dsp size out of range [0,20]: " 611 ("dsp size out of range [0,20]: "
@@ -771,100 +681,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
771 priv->last_rx_noise = rx_status.noise; 681 priv->last_rx_noise = rx_status.noise;
772 } 682 }
773 683
774 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 684 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
775 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
776 return;
777 }
778
779 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
780 case IEEE80211_FTYPE_MGMT:
781 switch (le16_to_cpu(header->frame_control) &
782 IEEE80211_FCTL_STYPE) {
783 case IEEE80211_STYPE_PROBE_RESP:
784 case IEEE80211_STYPE_BEACON:{
785 /* If this is a beacon or probe response for
786 * our network then cache the beacon
787 * timestamp */
788 if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA)
789 && !compare_ether_addr(header->addr2,
790 priv->bssid)) ||
791 ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
792 && !compare_ether_addr(header->addr3,
793 priv->bssid)))) {
794 struct ieee80211_mgmt *mgmt =
795 (struct ieee80211_mgmt *)header;
796 __le32 *pos;
797 pos = (__le32 *)&mgmt->u.beacon.
798 timestamp;
799 priv->timestamp0 = le32_to_cpu(pos[0]);
800 priv->timestamp1 = le32_to_cpu(pos[1]);
801 priv->beacon_int = le16_to_cpu(
802 mgmt->u.beacon.beacon_int);
803 if (priv->call_post_assoc_from_beacon &&
804 (priv->iw_mode ==
805 IEEE80211_IF_TYPE_STA))
806 queue_work(priv->workqueue,
807 &priv->post_associate.work);
808
809 priv->call_post_assoc_from_beacon = 0;
810 }
811
812 break;
813 }
814
815 case IEEE80211_STYPE_ACTION:
816 /* TODO: Parse 802.11h frames for CSA... */
817 break;
818
819 /*
820 * TODO: Use the new callback function from
821 * mac80211 instead of sniffing these packets.
822 */
823 case IEEE80211_STYPE_ASSOC_RESP:
824 case IEEE80211_STYPE_REASSOC_RESP:{
825 struct ieee80211_mgmt *mgnt =
826 (struct ieee80211_mgmt *)header;
827
828 /* We have just associated, give some
829 * time for the 4-way handshake if
830 * any. Don't start scan too early. */
831 priv->next_scan_jiffies = jiffies +
832 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
833
834 priv->assoc_id = (~((1 << 15) | (1 << 14)) &
835 le16_to_cpu(mgnt->u.
836 assoc_resp.aid));
837 priv->assoc_capability =
838 le16_to_cpu(mgnt->u.assoc_resp.capab_info);
839 if (priv->beacon_int)
840 queue_work(priv->workqueue,
841 &priv->post_associate.work);
842 else
843 priv->call_post_assoc_from_beacon = 1;
844 break;
845 }
846
847 case IEEE80211_STYPE_PROBE_REQ:{
848 DECLARE_MAC_BUF(mac1);
849 DECLARE_MAC_BUF(mac2);
850 DECLARE_MAC_BUF(mac3);
851 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
852 IWL_DEBUG_DROP
853 ("Dropping (non network): %s"
854 ", %s, %s\n",
855 print_mac(mac1, header->addr1),
856 print_mac(mac2, header->addr2),
857 print_mac(mac3, header->addr3));
858 return;
859 }
860 }
861
862 case IEEE80211_FTYPE_DATA:
863 /* fall through */
864 default:
865 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
866 break;
867 }
868} 685}
869 686
870int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr, 687int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr,
@@ -990,7 +807,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
990 807
991 priv->stations[sta_id].current_rate.rate_n_flags = rate; 808 priv->stations[sta_id].current_rate.rate_n_flags = rate;
992 809
993 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 810 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
994 (sta_id != priv->hw_setting.bcast_sta_id) && 811 (sta_id != priv->hw_setting.bcast_sta_id) &&
995 (sta_id != IWL_MULTICAST_ID)) 812 (sta_id != IWL_MULTICAST_ID))
996 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate; 813 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index fa81ba1af3d3..2a4933b5fb64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -707,7 +707,6 @@ struct iwl3945_priv {
707 707
708 enum ieee80211_band band; 708 enum ieee80211_band band;
709 int alloc_rxb_skb; 709 int alloc_rxb_skb;
710 bool add_radiotap;
711 710
712 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv, 711 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv,
713 struct iwl3945_rx_mem_buffer *rxb); 712 struct iwl3945_rx_mem_buffer *rxb);
@@ -852,7 +851,7 @@ struct iwl3945_priv {
852 /* eeprom */ 851 /* eeprom */
853 struct iwl3945_eeprom eeprom; 852 struct iwl3945_eeprom eeprom;
854 853
855 enum ieee80211_if_types iw_mode; 854 enum nl80211_iftype iw_mode;
856 855
857 struct sk_buff *ibss_beacon; 856 struct sk_buff *ibss_beacon;
858 857
@@ -895,7 +894,6 @@ struct iwl3945_priv {
895 struct delayed_work thermal_periodic; 894 struct delayed_work thermal_periodic;
896 struct delayed_work gather_stats; 895 struct delayed_work gather_stats;
897 struct delayed_work scan_check; 896 struct delayed_work scan_check;
898 struct delayed_work post_associate;
899 897
900#define IWL_DEFAULT_TX_POWER 0x0F 898#define IWL_DEFAULT_TX_POWER 0x0F
901 s8 user_txpower_limit; 899 s8 user_txpower_limit;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index fce950f4163c..f4793a609443 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -98,16 +98,17 @@
98#define IWL_RSSI_OFFSET 44 98#define IWL_RSSI_OFFSET 44
99 99
100 100
101#include "iwl-commands.h"
102 101
103/* PCI registers */ 102/* PCI registers */
104#define PCI_LINK_CTRL 0x0F0 /* 1 byte */ 103#define PCI_CFG_RETRY_TIMEOUT 0x041
105#define PCI_POWER_SOURCE 0x0C8 104#define PCI_CFG_POWER_SOURCE 0x0C8
106#define PCI_REG_WUM8 0x0E8 105#define PCI_REG_WUM8 0x0E8
106#define PCI_CFG_LINK_CTRL 0x0F0
107 107
108/* PCI register values */ 108/* PCI register values */
109#define PCI_LINK_VAL_L0S_EN 0x01 109#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
110#define PCI_LINK_VAL_L1_EN 0x02 110#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
111#define PCI_CFG_CMD_REG_INT_DIS_MSK 0x04
111#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 112#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
112 113
113#define TFD_QUEUE_SIZE_MAX (256) 114#define TFD_QUEUE_SIZE_MAX (256)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index e2581229d8b2..9838de5f4369 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -399,7 +399,7 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
399 unsigned long flags; 399 unsigned long flags;
400 u32 val; 400 u32 val;
401 u16 radio_cfg; 401 u16 radio_cfg;
402 u8 val_link; 402 u16 link;
403 403
404 spin_lock_irqsave(&priv->lock, flags); 404 spin_lock_irqsave(&priv->lock, flags);
405 405
@@ -410,10 +410,10 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
410 val & ~(1 << 11)); 410 val & ~(1 << 11));
411 } 411 }
412 412
413 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 413 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
414 414
415 /* L1 is enabled by BIOS */ 415 /* L1 is enabled by BIOS */
416 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN) 416 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
417 /* diable L0S disabled L1A enabled */ 417 /* diable L0S disabled L1A enabled */
418 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 418 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
419 else 419 else
@@ -474,8 +474,8 @@ static void iwl4965_apm_stop(struct iwl_priv *priv)
474 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 474 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
475 475
476 udelay(10); 476 udelay(10);
477 477 /* clear "init complete" move adapter D0A* --> D0U state */
478 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 478 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
479 spin_unlock_irqrestore(&priv->lock, flags); 479 spin_unlock_irqrestore(&priv->lock, flags);
480} 480}
481 481
@@ -1607,8 +1607,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1607 return ret; 1607 return ret;
1608} 1608}
1609 1609
1610 1610#ifdef IEEE80211_CONF_CHANNEL_SWITCH
1611int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1611static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1612{ 1612{
1613 int rc; 1613 int rc;
1614 u8 band = 0; 1614 u8 band = 0;
@@ -1648,6 +1648,7 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1648 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1648 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1649 return rc; 1649 return rc;
1650} 1650}
1651#endif
1651 1652
1652static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv) 1653static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
1653{ 1654{
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 17d4f31c5934..c479ee211c5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -129,6 +129,13 @@ struct iwl5000_shared {
129 __le32 padding2; 129 __le32 padding2;
130} __attribute__ ((packed)); 130} __attribute__ ((packed));
131 131
132/* calibrations defined for 5000 */
133/* defines the order in which results should be sent to the runtime uCode */
134enum iwl5000_calib {
135 IWL5000_CALIB_LO,
136 IWL5000_CALIB_TX_IQ,
137 IWL5000_CALIB_TX_IQ_PERD,
138};
132 139
133#endif /* __iwl_5000_hw_h__ */ 140#endif /* __iwl_5000_hw_h__ */
134 141
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index cbc01a00eaf4..f6003e7996af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -145,7 +145,8 @@ static void iwl5000_apm_stop(struct iwl_priv *priv)
145 145
146 udelay(10); 146 udelay(10);
147 147
148 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 148 /* clear "init complete" move adapter D0A* --> D0U state */
149 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
149 150
150 spin_unlock_irqrestore(&priv->lock, flags); 151 spin_unlock_irqrestore(&priv->lock, flags);
151} 152}
@@ -208,14 +209,14 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
208{ 209{
209 unsigned long flags; 210 unsigned long flags;
210 u16 radio_cfg; 211 u16 radio_cfg;
211 u8 val_link; 212 u16 link;
212 213
213 spin_lock_irqsave(&priv->lock, flags); 214 spin_lock_irqsave(&priv->lock, flags);
214 215
215 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 216 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
216 217
217 /* L1 is enabled by BIOS */ 218 /* L1 is enabled by BIOS */
218 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN) 219 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
219 /* diable L0S disabled L1A enabled */ 220 /* diable L0S disabled L1A enabled */
220 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 221 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
221 else 222 else
@@ -444,48 +445,6 @@ static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
444 sizeof(cal_cmd), &cal_cmd); 445 sizeof(cal_cmd), &cal_cmd);
445} 446}
446 447
447static int iwl5000_send_calib_results(struct iwl_priv *priv)
448{
449 int ret = 0;
450
451 struct iwl_host_cmd hcmd = {
452 .id = REPLY_PHY_CALIBRATION_CMD,
453 .meta.flags = CMD_SIZE_HUGE,
454 };
455
456 if (priv->calib_results.lo_res) {
457 hcmd.len = priv->calib_results.lo_res_len;
458 hcmd.data = priv->calib_results.lo_res;
459 ret = iwl_send_cmd_sync(priv, &hcmd);
460
461 if (ret)
462 goto err;
463 }
464
465 if (priv->calib_results.tx_iq_res) {
466 hcmd.len = priv->calib_results.tx_iq_res_len;
467 hcmd.data = priv->calib_results.tx_iq_res;
468 ret = iwl_send_cmd_sync(priv, &hcmd);
469
470 if (ret)
471 goto err;
472 }
473
474 if (priv->calib_results.tx_iq_perd_res) {
475 hcmd.len = priv->calib_results.tx_iq_perd_res_len;
476 hcmd.data = priv->calib_results.tx_iq_perd_res;
477 ret = iwl_send_cmd_sync(priv, &hcmd);
478
479 if (ret)
480 goto err;
481 }
482
483 return 0;
484err:
485 IWL_ERROR("Error %d\n", ret);
486 return ret;
487}
488
489static int iwl5000_send_calib_cfg(struct iwl_priv *priv) 448static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
490{ 449{
491 struct iwl5000_calib_cfg_cmd calib_cfg_cmd; 450 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
@@ -510,33 +469,30 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
510 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 469 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
511 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw; 470 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
512 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; 471 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
513 472 int index;
514 iwl_free_calib_results(priv);
515 473
516 /* reduce the size of the length field itself */ 474 /* reduce the size of the length field itself */
517 len -= 4; 475 len -= 4;
518 476
477 /* Define the order in which the results will be sent to the runtime
478 * uCode. iwl_send_calib_results sends them in a row according to their
479 * index. We sort them here */
519 switch (hdr->op_code) { 480 switch (hdr->op_code) {
520 case IWL5000_PHY_CALIBRATE_LO_CMD: 481 case IWL5000_PHY_CALIBRATE_LO_CMD:
521 priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC); 482 index = IWL5000_CALIB_LO;
522 priv->calib_results.lo_res_len = len;
523 memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
524 break; 483 break;
525 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD: 484 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
526 priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC); 485 index = IWL5000_CALIB_TX_IQ;
527 priv->calib_results.tx_iq_res_len = len;
528 memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
529 break; 486 break;
530 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD: 487 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
531 priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC); 488 index = IWL5000_CALIB_TX_IQ_PERD;
532 priv->calib_results.tx_iq_perd_res_len = len;
533 memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
534 break; 489 break;
535 default: 490 default:
536 IWL_ERROR("Unknown calibration notification %d\n", 491 IWL_ERROR("Unknown calibration notification %d\n",
537 hdr->op_code); 492 hdr->op_code);
538 return; 493 return;
539 } 494 }
495 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
540} 496}
541 497
542static void iwl5000_rx_calib_complete(struct iwl_priv *priv, 498static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
@@ -577,14 +533,11 @@ static int iwl5000_load_section(struct iwl_priv *priv,
577 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 533 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
578 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 534 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
579 535
580 /* FIME: write the MSB of the phy_addr in CTRL1
581 * iwl_write_direct32(priv,
582 IWL_FH_TFDIB_CTRL1_REG(IWL_FH_SRVC_CHNL),
583 ((phy_addr & MSB_MSK)
584 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_count);
585 */
586 iwl_write_direct32(priv, 536 iwl_write_direct32(priv,
587 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), byte_cnt); 537 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
538 (iwl_get_dma_hi_address(phy_addr)
539 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
540
588 iwl_write_direct32(priv, 541 iwl_write_direct32(priv,
589 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 542 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
590 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 543 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
@@ -834,7 +787,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
834 iwl5000_send_Xtal_calib(priv); 787 iwl5000_send_Xtal_calib(priv);
835 788
836 if (priv->ucode_type == UCODE_RT) 789 if (priv->ucode_type == UCODE_RT)
837 iwl5000_send_calib_results(priv); 790 iwl_send_calib_results(priv);
838 791
839 return 0; 792 return 0;
840} 793}
@@ -1616,6 +1569,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
1616 .mod_params = &iwl50_mod_params, 1569 .mod_params = &iwl50_mod_params,
1617}; 1570};
1618 1571
1572MODULE_FIRMWARE("iwlwifi-5000" IWL5000_UCODE_API ".ucode");
1573
1619module_param_named(disable50, iwl50_mod_params.disable, int, 0444); 1574module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1620MODULE_PARM_DESC(disable50, 1575MODULE_PARM_DESC(disable50,
1621 "manually disable the 50XX radio (default 0 [radio on])"); 1576 "manually disable the 50XX radio (default 0 [radio on])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 754fef5b592f..8b57b390c8ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -163,6 +163,9 @@ struct iwl_lq_sta {
163 u32 dbg_fixed_rate; 163 u32 dbg_fixed_rate;
164#endif 164#endif
165 struct iwl_priv *drv; 165 struct iwl_priv *drv;
166
167 /* used to be in sta_info */
168 int last_txrate_idx;
166}; 169};
167 170
168static void rs_rate_scale_perform(struct iwl_priv *priv, 171static void rs_rate_scale_perform(struct iwl_priv *priv,
@@ -356,18 +359,12 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
356 struct iwl_lq_sta *lq_data, u8 tid, 359 struct iwl_lq_sta *lq_data, u8 tid,
357 struct sta_info *sta) 360 struct sta_info *sta)
358{ 361{
359 unsigned long state;
360 DECLARE_MAC_BUF(mac); 362 DECLARE_MAC_BUF(mac);
361 363
362 spin_lock_bh(&sta->lock); 364 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
363 state = sta->ampdu_mlme.tid_state_tx[tid];
364 spin_unlock_bh(&sta->lock);
365
366 if (state == HT_AGG_STATE_IDLE &&
367 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
368 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n", 365 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n",
369 print_mac(mac, sta->addr), tid); 366 print_mac(mac, sta->sta.addr), tid);
370 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid); 367 ieee80211_start_tx_ba_session(priv->hw, sta->sta.addr, tid);
371 } 368 }
372} 369}
373 370
@@ -436,7 +433,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
436 /* Shift bitmap by one frame (throw away oldest history), 433 /* Shift bitmap by one frame (throw away oldest history),
437 * OR in "1", and increment "success" if this 434 * OR in "1", and increment "success" if this
438 * frame was successful. */ 435 * frame was successful. */
439 window->data <<= 1;; 436 window->data <<= 1;
440 if (successes > 0) { 437 if (successes > 0) {
441 window->success_counter++; 438 window->success_counter++;
442 window->data |= 0x1; 439 window->data |= 0x1;
@@ -821,7 +818,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
821 818
822 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 819 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
823 820
824 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 821 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
825 !lq_sta->ibss_sta_added) 822 !lq_sta->ibss_sta_added)
826 goto out; 823 goto out;
827 824
@@ -1128,6 +1125,7 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1128 1125
1129 /* Higher rate not available, use the original */ 1126 /* Higher rate not available, use the original */
1130 } else { 1127 } else {
1128 new_rate = rate;
1131 break; 1129 break;
1132 } 1130 }
1133 } 1131 }
@@ -1150,10 +1148,11 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1150 s8 is_green = lq_sta->is_green; 1148 s8 is_green = lq_sta->is_green;
1151 1149
1152 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1150 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) ||
1153 !sta->ht_info.ht_supported) 1151 !sta->sta.ht_info.ht_supported)
1154 return -1; 1152 return -1;
1155 1153
1156 if (priv->current_ht_config.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) 1154 if (((sta->sta.ht_info.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1155 == WLAN_HT_CAP_SM_PS_STATIC)
1157 return -1; 1156 return -1;
1158 1157
1159 /* Need both Tx chains/antennas to support MIMO */ 1158 /* Need both Tx chains/antennas to support MIMO */
@@ -1217,7 +1216,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1217 s32 rate; 1216 s32 rate;
1218 1217
1219 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1218 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) ||
1220 !sta->ht_info.ht_supported) 1219 !sta->sta.ht_info.ht_supported)
1221 return -1; 1220 return -1;
1222 1221
1223 IWL_DEBUG_RATE("LQ: try to switch to SISO\n"); 1222 IWL_DEBUG_RATE("LQ: try to switch to SISO\n");
@@ -1280,15 +1279,23 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1280 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1279 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1281 u8 start_action = tbl->action; 1280 u8 start_action = tbl->action;
1282 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1281 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1282 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1283 int ret = 0; 1283 int ret = 0;
1284 1284
1285 for (; ;) { 1285 for (; ;) {
1286 switch (tbl->action) { 1286 switch (tbl->action) {
1287 case IWL_LEGACY_SWITCH_ANTENNA: 1287 case IWL_LEGACY_SWITCH_ANTENNA1:
1288 case IWL_LEGACY_SWITCH_ANTENNA2:
1288 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n"); 1289 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
1289 1290
1290 lq_sta->action_counter++; 1291 lq_sta->action_counter++;
1291 1292
1293 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1294 tx_chains_num <= 1) ||
1295 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1296 tx_chains_num <= 2))
1297 break;
1298
1292 /* Don't change antenna if success has been great */ 1299 /* Don't change antenna if success has been great */
1293 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1300 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1294 break; 1301 break;
@@ -1298,7 +1305,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1298 1305
1299 if (rs_toggle_antenna(valid_tx_ant, 1306 if (rs_toggle_antenna(valid_tx_ant,
1300 &search_tbl->current_rate, search_tbl)) { 1307 &search_tbl->current_rate, search_tbl)) {
1301 lq_sta->search_better_tbl = 1; 1308 rs_set_expected_tpt_table(lq_sta, search_tbl);
1302 goto out; 1309 goto out;
1303 } 1310 }
1304 break; 1311 break;
@@ -1311,43 +1318,54 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1311 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1318 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1312 search_tbl, index); 1319 search_tbl, index);
1313 if (!ret) { 1320 if (!ret) {
1314 lq_sta->search_better_tbl = 1;
1315 lq_sta->action_counter = 0; 1321 lq_sta->action_counter = 0;
1316 goto out; 1322 goto out;
1317 } 1323 }
1318 1324
1319 break; 1325 break;
1320 case IWL_LEGACY_SWITCH_MIMO2: 1326 case IWL_LEGACY_SWITCH_MIMO2_AB:
1327 case IWL_LEGACY_SWITCH_MIMO2_AC:
1328 case IWL_LEGACY_SWITCH_MIMO2_BC:
1321 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n"); 1329 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
1322 1330
1323 /* Set up search table to try MIMO */ 1331 /* Set up search table to try MIMO */
1324 memcpy(search_tbl, tbl, sz); 1332 memcpy(search_tbl, tbl, sz);
1325 search_tbl->is_SGI = 0; 1333 search_tbl->is_SGI = 0;
1326 search_tbl->ant_type = ANT_AB;/*FIXME:RS*/ 1334
1327 /*FIXME:RS:need to check ant validity*/ 1335 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1336 search_tbl->ant_type = ANT_AB;
1337 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1338 search_tbl->ant_type = ANT_AC;
1339 else
1340 search_tbl->ant_type = ANT_BC;
1341
1342 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1343 break;
1344
1328 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, 1345 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1329 search_tbl, index); 1346 search_tbl, index);
1330 if (!ret) { 1347 if (!ret) {
1331 lq_sta->search_better_tbl = 1;
1332 lq_sta->action_counter = 0; 1348 lq_sta->action_counter = 0;
1333 goto out; 1349 goto out;
1334 } 1350 }
1335 break; 1351 break;
1336 } 1352 }
1337 tbl->action++; 1353 tbl->action++;
1338 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) 1354 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1339 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1355 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1340 1356
1341 if (tbl->action == start_action) 1357 if (tbl->action == start_action)
1342 break; 1358 break;
1343 1359
1344 } 1360 }
1361 search_tbl->lq_type = LQ_NONE;
1345 return 0; 1362 return 0;
1346 1363
1347 out: 1364out:
1365 lq_sta->search_better_tbl = 1;
1348 tbl->action++; 1366 tbl->action++;
1349 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) 1367 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1350 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1368 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1351 return 0; 1369 return 0;
1352 1370
1353} 1371}
@@ -1369,34 +1387,51 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1369 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1387 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1370 u8 start_action = tbl->action; 1388 u8 start_action = tbl->action;
1371 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1389 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1390 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1372 int ret; 1391 int ret;
1373 1392
1374 for (;;) { 1393 for (;;) {
1375 lq_sta->action_counter++; 1394 lq_sta->action_counter++;
1376 switch (tbl->action) { 1395 switch (tbl->action) {
1377 case IWL_SISO_SWITCH_ANTENNA: 1396 case IWL_SISO_SWITCH_ANTENNA1:
1397 case IWL_SISO_SWITCH_ANTENNA2:
1378 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n"); 1398 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
1399
1400 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1401 tx_chains_num <= 1) ||
1402 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1403 tx_chains_num <= 2))
1404 break;
1405
1379 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1406 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1380 break; 1407 break;
1381 1408
1382 memcpy(search_tbl, tbl, sz); 1409 memcpy(search_tbl, tbl, sz);
1383 if (rs_toggle_antenna(valid_tx_ant, 1410 if (rs_toggle_antenna(valid_tx_ant,
1384 &search_tbl->current_rate, search_tbl)) { 1411 &search_tbl->current_rate, search_tbl))
1385 lq_sta->search_better_tbl = 1;
1386 goto out; 1412 goto out;
1387 }
1388 break; 1413 break;
1389 case IWL_SISO_SWITCH_MIMO2: 1414 case IWL_SISO_SWITCH_MIMO2_AB:
1415 case IWL_SISO_SWITCH_MIMO2_AC:
1416 case IWL_SISO_SWITCH_MIMO2_BC:
1390 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n"); 1417 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
1391 memcpy(search_tbl, tbl, sz); 1418 memcpy(search_tbl, tbl, sz);
1392 search_tbl->is_SGI = 0; 1419 search_tbl->is_SGI = 0;
1393 search_tbl->ant_type = ANT_AB; /*FIXME:RS*/ 1420
1421 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1422 search_tbl->ant_type = ANT_AB;
1423 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1424 search_tbl->ant_type = ANT_AC;
1425 else
1426 search_tbl->ant_type = ANT_BC;
1427
1428 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1429 break;
1430
1394 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, 1431 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1395 search_tbl, index); 1432 search_tbl, index);
1396 if (!ret) { 1433 if (!ret)
1397 lq_sta->search_better_tbl = 1;
1398 goto out; 1434 goto out;
1399 }
1400 break; 1435 break;
1401 case IWL_SISO_SWITCH_GI: 1436 case IWL_SISO_SWITCH_GI:
1402 if (!tbl->is_fat && 1437 if (!tbl->is_fat &&
@@ -1426,22 +1461,23 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1426 } 1461 }
1427 search_tbl->current_rate = rate_n_flags_from_tbl( 1462 search_tbl->current_rate = rate_n_flags_from_tbl(
1428 search_tbl, index, is_green); 1463 search_tbl, index, is_green);
1429 lq_sta->search_better_tbl = 1;
1430 goto out; 1464 goto out;
1431 } 1465 }
1432 tbl->action++; 1466 tbl->action++;
1433 if (tbl->action > IWL_SISO_SWITCH_GI) 1467 if (tbl->action > IWL_SISO_SWITCH_GI)
1434 tbl->action = IWL_SISO_SWITCH_ANTENNA; 1468 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1435 1469
1436 if (tbl->action == start_action) 1470 if (tbl->action == start_action)
1437 break; 1471 break;
1438 } 1472 }
1473 search_tbl->lq_type = LQ_NONE;
1439 return 0; 1474 return 0;
1440 1475
1441 out: 1476 out:
1477 lq_sta->search_better_tbl = 1;
1442 tbl->action++; 1478 tbl->action++;
1443 if (tbl->action > IWL_SISO_SWITCH_GI) 1479 if (tbl->action > IWL_SISO_SWITCH_GI)
1444 tbl->action = IWL_SISO_SWITCH_ANTENNA; 1480 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1445 return 0; 1481 return 0;
1446} 1482}
1447 1483
@@ -1457,37 +1493,58 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1457 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1493 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1458 struct iwl_scale_tbl_info *search_tbl = 1494 struct iwl_scale_tbl_info *search_tbl =
1459 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1495 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1496 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1460 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1497 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1461 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1498 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1462 u8 start_action = tbl->action; 1499 u8 start_action = tbl->action;
1463 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/ 1500 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1501 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1464 int ret; 1502 int ret;
1465 1503
1466 for (;;) { 1504 for (;;) {
1467 lq_sta->action_counter++; 1505 lq_sta->action_counter++;
1468 switch (tbl->action) { 1506 switch (tbl->action) {
1469 case IWL_MIMO_SWITCH_ANTENNA_A: 1507 case IWL_MIMO2_SWITCH_ANTENNA1:
1470 case IWL_MIMO_SWITCH_ANTENNA_B: 1508 case IWL_MIMO2_SWITCH_ANTENNA2:
1509 IWL_DEBUG_RATE("LQ: MIMO toggle Antennas\n");
1510
1511 if (tx_chains_num <= 2)
1512 break;
1513
1514 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1515 break;
1516
1517 memcpy(search_tbl, tbl, sz);
1518 if (rs_toggle_antenna(valid_tx_ant,
1519 &search_tbl->current_rate, search_tbl))
1520 goto out;
1521 break;
1522 case IWL_MIMO2_SWITCH_SISO_A:
1523 case IWL_MIMO2_SWITCH_SISO_B:
1524 case IWL_MIMO2_SWITCH_SISO_C:
1471 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n"); 1525 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
1472 1526
1473 /* Set up new search table for SISO */ 1527 /* Set up new search table for SISO */
1474 memcpy(search_tbl, tbl, sz); 1528 memcpy(search_tbl, tbl, sz);
1475 1529
1476 /*FIXME:RS:need to check ant validity + C*/ 1530 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1477 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1478 search_tbl->ant_type = ANT_A; 1531 search_tbl->ant_type = ANT_A;
1479 else 1532 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1480 search_tbl->ant_type = ANT_B; 1533 search_tbl->ant_type = ANT_B;
1534 else
1535 search_tbl->ant_type = ANT_C;
1536
1537 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1538 break;
1481 1539
1482 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1540 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1483 search_tbl, index); 1541 search_tbl, index);
1484 if (!ret) { 1542 if (!ret)
1485 lq_sta->search_better_tbl = 1;
1486 goto out; 1543 goto out;
1487 } 1544
1488 break; 1545 break;
1489 1546
1490 case IWL_MIMO_SWITCH_GI: 1547 case IWL_MIMO2_SWITCH_GI:
1491 if (!tbl->is_fat && 1548 if (!tbl->is_fat &&
1492 !(priv->current_ht_config.sgf & 1549 !(priv->current_ht_config.sgf &
1493 HT_SHORT_GI_20MHZ)) 1550 HT_SHORT_GI_20MHZ))
@@ -1516,23 +1573,23 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1516 } 1573 }
1517 search_tbl->current_rate = rate_n_flags_from_tbl( 1574 search_tbl->current_rate = rate_n_flags_from_tbl(
1518 search_tbl, index, is_green); 1575 search_tbl, index, is_green);
1519 lq_sta->search_better_tbl = 1;
1520 goto out; 1576 goto out;
1521 1577
1522 } 1578 }
1523 tbl->action++; 1579 tbl->action++;
1524 if (tbl->action > IWL_MIMO_SWITCH_GI) 1580 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1525 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; 1581 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1526 1582
1527 if (tbl->action == start_action) 1583 if (tbl->action == start_action)
1528 break; 1584 break;
1529 } 1585 }
1530 1586 search_tbl->lq_type = LQ_NONE;
1531 return 0; 1587 return 0;
1532 out: 1588 out:
1589 lq_sta->search_better_tbl = 1;
1533 tbl->action++; 1590 tbl->action++;
1534 if (tbl->action > IWL_MIMO_SWITCH_GI) 1591 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1535 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; 1592 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1536 return 0; 1593 return 0;
1537 1594
1538} 1595}
@@ -1668,6 +1725,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1668 return; 1725 return;
1669 1726
1670 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 1727 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
1728 lq_sta->supp_rates = sta->sta.supp_rates[lq_sta->band];
1671 1729
1672 tid = rs_tl_add_packet(lq_sta, hdr); 1730 tid = rs_tl_add_packet(lq_sta, hdr);
1673 1731
@@ -1685,7 +1743,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1685 is_green = lq_sta->is_green; 1743 is_green = lq_sta->is_green;
1686 1744
1687 /* current tx rate */ 1745 /* current tx rate */
1688 index = sta->last_txrate_idx; 1746 index = lq_sta->last_txrate_idx;
1689 1747
1690 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, 1748 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index,
1691 tbl->lq_type); 1749 tbl->lq_type);
@@ -1746,19 +1804,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1746 rs_stay_in_table(lq_sta); 1804 rs_stay_in_table(lq_sta);
1747 1805
1748 goto out; 1806 goto out;
1807 }
1749 1808
1750 /* Else we have enough samples; calculate estimate of 1809 /* Else we have enough samples; calculate estimate of
1751 * actual average throughput */ 1810 * actual average throughput */
1752 } else { 1811
1753 /*FIXME:RS remove this else if we don't get this error*/ 1812 BUG_ON(window->average_tpt != ((window->success_ratio *
1754 if (window->average_tpt != ((window->success_ratio * 1813 tbl->expected_tpt[index] + 64) / 128));
1755 tbl->expected_tpt[index] + 64) / 128)) {
1756 IWL_ERROR("expected_tpt should have been calculated"
1757 " by now\n");
1758 window->average_tpt = ((window->success_ratio *
1759 tbl->expected_tpt[index] + 64) / 128);
1760 }
1761 }
1762 1814
1763 /* If we are searching for better modulation mode, check success. */ 1815 /* If we are searching for better modulation mode, check success. */
1764 if (lq_sta->search_better_tbl) { 1816 if (lq_sta->search_better_tbl) {
@@ -1768,7 +1820,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1768 * continuing to use the setup that we've been trying. */ 1820 * continuing to use the setup that we've been trying. */
1769 if (window->average_tpt > lq_sta->last_tpt) { 1821 if (window->average_tpt > lq_sta->last_tpt) {
1770 1822
1771 IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE " 1823 IWL_DEBUG_RATE("LQ: SWITCHING TO NEW TABLE "
1772 "suc=%d cur-tpt=%d old-tpt=%d\n", 1824 "suc=%d cur-tpt=%d old-tpt=%d\n",
1773 window->success_ratio, 1825 window->success_ratio,
1774 window->average_tpt, 1826 window->average_tpt,
@@ -2004,15 +2056,7 @@ lq_update:
2004out: 2056out:
2005 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green); 2057 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
2006 i = index; 2058 i = index;
2007 sta->last_txrate_idx = i; 2059 lq_sta->last_txrate_idx = i;
2008
2009 /* sta->txrate_idx is an index to A mode rates which start
2010 * at IWL_FIRST_OFDM_RATE
2011 */
2012 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2013 sta->txrate_idx = i - IWL_FIRST_OFDM_RATE;
2014 else
2015 sta->txrate_idx = i;
2016 2060
2017 return; 2061 return;
2018} 2062}
@@ -2035,10 +2079,10 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2035 goto out; 2079 goto out;
2036 2080
2037 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 2081 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2038 i = sta->last_txrate_idx; 2082 i = lq_sta->last_txrate_idx;
2039 2083
2040 if ((lq_sta->lq.sta_id == 0xff) && 2084 if ((lq_sta->lq.sta_id == 0xff) &&
2041 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2085 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
2042 goto out; 2086 goto out;
2043 2087
2044 valid_tx_ant = priv->hw_params.valid_tx_ant; 2088 valid_tx_ant = priv->hw_params.valid_tx_ant;
@@ -2106,9 +2150,9 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2106 } 2150 }
2107 2151
2108 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 2152 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
2109 i = sta->last_txrate_idx; 2153 i = lq_sta->last_txrate_idx;
2110 2154
2111 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2155 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2112 !lq_sta->ibss_sta_added) { 2156 !lq_sta->ibss_sta_added) {
2113 u8 sta_id = iwl_find_station(priv, hdr->addr1); 2157 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2114 DECLARE_MAC_BUF(mac); 2158 DECLARE_MAC_BUF(mac);
@@ -2175,31 +2219,30 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2175 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2219 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2176 2220
2177 lq_sta->flush_timer = 0; 2221 lq_sta->flush_timer = 0;
2178 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2222 lq_sta->supp_rates = sta->sta.supp_rates[sband->band];
2179 sta->txrate_idx = 3;
2180 for (j = 0; j < LQ_SIZE; j++) 2223 for (j = 0; j < LQ_SIZE; j++)
2181 for (i = 0; i < IWL_RATE_COUNT; i++) 2224 for (i = 0; i < IWL_RATE_COUNT; i++)
2182 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2225 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2183 2226
2184 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n"); 2227 IWL_DEBUG_RATE("LQ: *** rate scale station global init ***\n");
2185 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2228 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2186 * the lowest or the highest rate.. Could consider using RSSI from 2229 * the lowest or the highest rate.. Could consider using RSSI from
2187 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2230 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2188 * after assoc.. */ 2231 * after assoc.. */
2189 2232
2190 lq_sta->ibss_sta_added = 0; 2233 lq_sta->ibss_sta_added = 0;
2191 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2234 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2192 u8 sta_id = iwl_find_station(priv, sta->addr); 2235 u8 sta_id = iwl_find_station(priv, sta->sta.addr);
2193 DECLARE_MAC_BUF(mac); 2236 DECLARE_MAC_BUF(mac);
2194 2237
2195 /* for IBSS the call are from tasklet */ 2238 /* for IBSS the call are from tasklet */
2196 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2239 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2197 print_mac(mac, sta->addr)); 2240 print_mac(mac, sta->sta.addr));
2198 2241
2199 if (sta_id == IWL_INVALID_STATION) { 2242 if (sta_id == IWL_INVALID_STATION) {
2200 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2243 IWL_DEBUG_RATE("LQ: ADD station %s\n",
2201 print_mac(mac, sta->addr)); 2244 print_mac(mac, sta->sta.addr));
2202 sta_id = iwl_add_station_flags(priv, sta->addr, 2245 sta_id = iwl_add_station_flags(priv, sta->sta.addr,
2203 0, CMD_ASYNC, NULL); 2246 0, CMD_ASYNC, NULL);
2204 } 2247 }
2205 if ((sta_id != IWL_INVALID_STATION)) { 2248 if ((sta_id != IWL_INVALID_STATION)) {
@@ -2211,15 +2254,14 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2211 } 2254 }
2212 2255
2213 /* Find highest tx rate supported by hardware and destination station */ 2256 /* Find highest tx rate supported by hardware and destination station */
2257 lq_sta->last_txrate_idx = 3;
2214 for (i = 0; i < sband->n_bitrates; i++) 2258 for (i = 0; i < sband->n_bitrates; i++)
2215 if (sta->supp_rates[sband->band] & BIT(i)) 2259 if (sta->sta.supp_rates[sband->band] & BIT(i))
2216 sta->txrate_idx = i; 2260 lq_sta->last_txrate_idx = i;
2217 2261
2218 sta->last_txrate_idx = sta->txrate_idx; 2262 /* For MODE_IEEE80211A, skip over cck rates in global rate table */
2219 /* WTF is with this bogus comment? A doesn't have cck rates */
2220 /* For MODE_IEEE80211A, cck rates are at end of rate table */
2221 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) 2263 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
2222 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2264 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2223 2265
2224 lq_sta->is_dup = 0; 2266 lq_sta->is_dup = 0;
2225 lq_sta->is_green = rs_use_green(priv, conf); 2267 lq_sta->is_green = rs_use_green(priv, conf);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 84d4d1e33755..d148d73635eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -206,21 +206,28 @@ enum {
206#define IWL_RATE_DECREASE_TH 1920 /* 15% */ 206#define IWL_RATE_DECREASE_TH 1920 /* 15% */
207 207
208/* possible actions when in legacy mode */ 208/* possible actions when in legacy mode */
209#define IWL_LEGACY_SWITCH_ANTENNA 0 209#define IWL_LEGACY_SWITCH_ANTENNA1 0
210#define IWL_LEGACY_SWITCH_SISO 1 210#define IWL_LEGACY_SWITCH_ANTENNA2 1
211#define IWL_LEGACY_SWITCH_MIMO2 2 211#define IWL_LEGACY_SWITCH_SISO 2
212#define IWL_LEGACY_SWITCH_MIMO2_AB 3
213#define IWL_LEGACY_SWITCH_MIMO2_AC 4
214#define IWL_LEGACY_SWITCH_MIMO2_BC 5
212 215
213/* possible actions when in siso mode */ 216/* possible actions when in siso mode */
214#define IWL_SISO_SWITCH_ANTENNA 0 217#define IWL_SISO_SWITCH_ANTENNA1 0
215#define IWL_SISO_SWITCH_MIMO2 1 218#define IWL_SISO_SWITCH_ANTENNA2 1
216#define IWL_SISO_SWITCH_GI 2 219#define IWL_SISO_SWITCH_MIMO2_AB 2
220#define IWL_SISO_SWITCH_MIMO2_AC 3
221#define IWL_SISO_SWITCH_MIMO2_BC 4
222#define IWL_SISO_SWITCH_GI 5
217 223
218/* possible actions when in mimo mode */ 224/* possible actions when in mimo mode */
219#define IWL_MIMO_SWITCH_ANTENNA_A 0 225#define IWL_MIMO2_SWITCH_ANTENNA1 0
220#define IWL_MIMO_SWITCH_ANTENNA_B 1 226#define IWL_MIMO2_SWITCH_ANTENNA2 1
221#define IWL_MIMO_SWITCH_GI 2 227#define IWL_MIMO2_SWITCH_SISO_A 2
222 228#define IWL_MIMO2_SWITCH_SISO_B 3
223/*FIXME:RS:separate MIMO2/3 transitions*/ 229#define IWL_MIMO2_SWITCH_SISO_C 4
230#define IWL_MIMO2_SWITCH_GI 5
224 231
225/*FIXME:RS:add posible acctions for MIMO3*/ 232/*FIXME:RS:add posible acctions for MIMO3*/
226 233
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 061ffba9c884..273762769767 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -181,14 +181,14 @@ static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
181} 181}
182 182
183/** 183/**
184 * iwl4965_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 184 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
185 * @priv: staging_rxon is compared to active_rxon 185 * @priv: staging_rxon is compared to active_rxon
186 * 186 *
187 * If the RXON structure is changing enough to require a new tune, 187 * If the RXON structure is changing enough to require a new tune,
188 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 188 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
189 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 189 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
190 */ 190 */
191static int iwl4965_full_rxon_required(struct iwl_priv *priv) 191static int iwl_full_rxon_required(struct iwl_priv *priv)
192{ 192{
193 193
194 /* These items are only settable from the full RXON command */ 194 /* These items are only settable from the full RXON command */
@@ -207,7 +207,6 @@ static int iwl4965_full_rxon_required(struct iwl_priv *priv)
207 priv->active_rxon.ofdm_ht_single_stream_basic_rates) || 207 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
208 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != 208 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
209 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || 209 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
210 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
211 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) 210 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
212 return 1; 211 return 1;
213 212
@@ -263,7 +262,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
263 /* If we don't need to send a full RXON, we can use 262 /* If we don't need to send a full RXON, we can use
264 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter 263 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
265 * and other flags for the current radio configuration. */ 264 * and other flags for the current radio configuration. */
266 if (!iwl4965_full_rxon_required(priv)) { 265 if (!iwl_full_rxon_required(priv)) {
267 ret = iwl_send_rxon_assoc(priv); 266 ret = iwl_send_rxon_assoc(priv);
268 if (ret) { 267 if (ret) {
269 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret); 268 IWL_ERROR("Error setting RXON_ASSOC (%d)\n", ret);
@@ -338,7 +337,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
338 /* If we have set the ASSOC_MSK and we are in BSS mode then 337 /* If we have set the ASSOC_MSK and we are in BSS mode then
339 * add the IWL_AP_ID to the station rate table */ 338 * add the IWL_AP_ID to the station rate table */
340 if (new_assoc) { 339 if (new_assoc) {
341 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 340 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
342 ret = iwl_rxon_add_station(priv, 341 ret = iwl_rxon_add_station(priv,
343 priv->active_rxon.bssid_addr, 1); 342 priv->active_rxon.bssid_addr, 1);
344 if (ret == IWL_INVALID_STATION) { 343 if (ret == IWL_INVALID_STATION) {
@@ -449,8 +448,8 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
449 const u8 *dest, int left) 448 const u8 *dest, int left)
450{ 449{
451 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 450 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
452 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 451 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
453 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 452 (priv->iw_mode != NL80211_IFTYPE_AP)))
454 return 0; 453 return 0;
455 454
456 if (priv->ibss_beacon->len > left) 455 if (priv->ibss_beacon->len > left)
@@ -486,7 +485,7 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
486 return IWL_RATE_6M_PLCP; 485 return IWL_RATE_6M_PLCP;
487} 486}
488 487
489unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 488static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
490 struct iwl_frame *frame, u8 rate) 489 struct iwl_frame *frame, u8 rate)
491{ 490{
492 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 491 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
@@ -565,8 +564,6 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
565 if (!iwl_conf->is_ht) 564 if (!iwl_conf->is_ht)
566 return; 565 return;
567 566
568 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
569
570 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20) 567 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
571 iwl_conf->sgf |= HT_SHORT_GI_20MHZ; 568 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
572 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40) 569 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
@@ -587,8 +584,8 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
587 iwl_conf->supported_chan_width = 0; 584 iwl_conf->supported_chan_width = 0;
588 } 585 }
589 586
590 iwl_conf->tx_mimo_ps_mode = 587 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
591 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2); 588
592 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16); 589 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
593 590
594 iwl_conf->control_channel = ht_bss_conf->primary_channel; 591 iwl_conf->control_channel = ht_bss_conf->primary_channel;
@@ -675,7 +672,7 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
675 beacon_int = priv->beacon_int; 672 beacon_int = priv->beacon_int;
676 spin_unlock_irqrestore(&priv->lock, flags); 673 spin_unlock_irqrestore(&priv->lock, flags);
677 674
678 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 675 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
679 if (beacon_int == 0) { 676 if (beacon_int == 0) {
680 priv->rxon_timing.beacon_interval = cpu_to_le16(100); 677 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
681 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); 678 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -724,7 +721,7 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
724 else 721 else
725 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 722 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
726 723
727 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 724 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
728 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 725 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
729 726
730 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 727 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -743,23 +740,23 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
743 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 740 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
744 741
745 switch (priv->iw_mode) { 742 switch (priv->iw_mode) {
746 case IEEE80211_IF_TYPE_AP: 743 case NL80211_IFTYPE_AP:
747 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 744 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
748 break; 745 break;
749 746
750 case IEEE80211_IF_TYPE_STA: 747 case NL80211_IFTYPE_STATION:
751 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 748 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
752 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 749 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
753 break; 750 break;
754 751
755 case IEEE80211_IF_TYPE_IBSS: 752 case NL80211_IFTYPE_ADHOC:
756 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 753 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
757 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 754 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
758 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 755 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
759 RXON_FILTER_ACCEPT_GRP_MSK; 756 RXON_FILTER_ACCEPT_GRP_MSK;
760 break; 757 break;
761 758
762 case IEEE80211_IF_TYPE_MNTR: 759 case NL80211_IFTYPE_MONITOR:
763 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; 760 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
764 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 761 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
765 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 762 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -788,7 +785,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
788 * in some case A channels are all non IBSS 785 * in some case A channels are all non IBSS
789 * in this case force B/G channel 786 * in this case force B/G channel
790 */ 787 */
791 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 788 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
792 !(is_channel_ibss(ch_info))) 789 !(is_channel_ibss(ch_info)))
793 ch_info = &priv->channel_info[0]; 790 ch_info = &priv->channel_info[0];
794 791
@@ -1185,7 +1182,7 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1185 le32_to_cpu(beacon->low_tsf), rate); 1182 le32_to_cpu(beacon->low_tsf), rate);
1186#endif 1183#endif
1187 1184
1188 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 1185 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
1189 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 1186 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
1190 queue_work(priv->workqueue, &priv->beacon_update); 1187 queue_work(priv->workqueue, &priv->beacon_update);
1191} 1188}
@@ -1273,7 +1270,7 @@ int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1273 1270
1274 if (src == IWL_PWR_SRC_VAUX) { 1271 if (src == IWL_PWR_SRC_VAUX) {
1275 u32 val; 1272 u32 val;
1276 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 1273 ret = pci_read_config_dword(priv->pci_dev, PCI_CFG_POWER_SOURCE,
1277 &val); 1274 &val);
1278 1275
1279 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 1276 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
@@ -2190,7 +2187,10 @@ static void __iwl4965_down(struct iwl_priv *priv)
2190 udelay(5); 2187 udelay(5);
2191 2188
2192 /* FIXME: apm_ops.suspend(priv) */ 2189 /* FIXME: apm_ops.suspend(priv) */
2193 priv->cfg->ops->lib->apm_ops.reset(priv); 2190 if (exit_pending || test_bit(STATUS_IN_SUSPEND, &priv->status))
2191 priv->cfg->ops->lib->apm_ops.stop(priv);
2192 else
2193 priv->cfg->ops->lib->apm_ops.reset(priv);
2194 priv->cfg->ops->lib->free_shared_mem(priv); 2194 priv->cfg->ops->lib->free_shared_mem(priv);
2195 2195
2196 exit: 2196 exit:
@@ -2388,7 +2388,7 @@ static void iwl4965_bg_set_monitor(struct work_struct *work)
2388 2388
2389 mutex_lock(&priv->mutex); 2389 mutex_lock(&priv->mutex);
2390 2390
2391 ret = iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR); 2391 ret = iwl4965_set_mode(priv, NL80211_IFTYPE_MONITOR);
2392 2392
2393 if (ret) { 2393 if (ret) {
2394 if (ret == -EAGAIN) 2394 if (ret == -EAGAIN)
@@ -2469,7 +2469,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2469 DECLARE_MAC_BUF(mac); 2469 DECLARE_MAC_BUF(mac);
2470 unsigned long flags; 2470 unsigned long flags;
2471 2471
2472 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2472 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2473 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 2473 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
2474 return; 2474 return;
2475 } 2475 }
@@ -2486,6 +2486,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2486 if (!priv->vif || !priv->is_open) 2486 if (!priv->vif || !priv->is_open)
2487 return; 2487 return;
2488 2488
2489 iwl_power_cancel_timeout(priv);
2489 iwl_scan_cancel_timeout(priv, 200); 2490 iwl_scan_cancel_timeout(priv, 200);
2490 2491
2491 conf = ieee80211_get_hw_conf(priv->hw); 2492 conf = ieee80211_get_hw_conf(priv->hw);
@@ -2523,7 +2524,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2523 else 2524 else
2524 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2525 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2525 2526
2526 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2527 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2527 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2528 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2528 2529
2529 } 2530 }
@@ -2531,10 +2532,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2531 iwl4965_commit_rxon(priv); 2532 iwl4965_commit_rxon(priv);
2532 2533
2533 switch (priv->iw_mode) { 2534 switch (priv->iw_mode) {
2534 case IEEE80211_IF_TYPE_STA: 2535 case NL80211_IFTYPE_STATION:
2535 break; 2536 break;
2536 2537
2537 case IEEE80211_IF_TYPE_IBSS: 2538 case NL80211_IFTYPE_ADHOC:
2538 2539
2539 /* assume default assoc id */ 2540 /* assume default assoc id */
2540 priv->assoc_id = 1; 2541 priv->assoc_id = 1;
@@ -2550,18 +2551,23 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2550 break; 2551 break;
2551 } 2552 }
2552 2553
2553 /* Enable Rx differential gain and sensitivity calibrations */ 2554 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2554 iwl_chain_noise_reset(priv);
2555 priv->start_calib = 1;
2556
2557 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2558 priv->assoc_station_added = 1; 2555 priv->assoc_station_added = 1;
2559 2556
2560 spin_lock_irqsave(&priv->lock, flags); 2557 spin_lock_irqsave(&priv->lock, flags);
2561 iwl_activate_qos(priv, 0); 2558 iwl_activate_qos(priv, 0);
2562 spin_unlock_irqrestore(&priv->lock, flags); 2559 spin_unlock_irqrestore(&priv->lock, flags);
2563 2560
2564 iwl_power_update_mode(priv, 0); 2561 /* the chain noise calibration will enabled PM upon completion
2562 * If chain noise has already been run, then we need to enable
2563 * power management here */
2564 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2565 iwl_power_enable_management(priv);
2566
2567 /* Enable Rx differential gain and sensitivity calibrations */
2568 iwl_chain_noise_reset(priv);
2569 priv->start_calib = 1;
2570
2565 /* we have just associated, don't start scan too early */ 2571 /* we have just associated, don't start scan too early */
2566 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 2572 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
2567} 2573}
@@ -2602,6 +2608,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2602{ 2608{
2603 struct iwl_priv *priv = hw->priv; 2609 struct iwl_priv *priv = hw->priv;
2604 int ret; 2610 int ret;
2611 u16 pci_cmd;
2605 2612
2606 IWL_DEBUG_MAC80211("enter\n"); 2613 IWL_DEBUG_MAC80211("enter\n");
2607 2614
@@ -2612,6 +2619,13 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2612 pci_restore_state(priv->pci_dev); 2619 pci_restore_state(priv->pci_dev);
2613 pci_enable_msi(priv->pci_dev); 2620 pci_enable_msi(priv->pci_dev);
2614 2621
2622 /* enable interrupts if needed: hw bug w/a */
2623 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
2624 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2625 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2626 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
2627 }
2628
2615 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED, 2629 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED,
2616 DRV_NAME, priv); 2630 DRV_NAME, priv);
2617 if (ret) { 2631 if (ret) {
@@ -2720,12 +2734,6 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2720 2734
2721 IWL_DEBUG_MACDUMP("enter\n"); 2735 IWL_DEBUG_MACDUMP("enter\n");
2722 2736
2723 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2724 IWL_DEBUG_MAC80211("leave - monitor\n");
2725 dev_kfree_skb_any(skb);
2726 return 0;
2727 }
2728
2729 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2737 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2730 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2738 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2731 2739
@@ -2790,8 +2798,6 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2790 mutex_lock(&priv->mutex); 2798 mutex_lock(&priv->mutex);
2791 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 2799 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
2792 2800
2793 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
2794
2795 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2801 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
2796 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n"); 2802 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n");
2797 goto out; 2803 goto out;
@@ -2822,7 +2828,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2822 goto out; 2828 goto out;
2823 } 2829 }
2824 2830
2825 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 2831 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2826 !is_channel_ibss(ch_info)) { 2832 !is_channel_ibss(ch_info)) {
2827 IWL_ERROR("channel %d in band %d not IBSS channel\n", 2833 IWL_ERROR("channel %d in band %d not IBSS channel\n",
2828 conf->channel->hw_value, conf->channel->band); 2834 conf->channel->hw_value, conf->channel->band);
@@ -2843,7 +2849,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2843 ) 2849 )
2844 priv->staging_rxon.flags = 0; 2850 priv->staging_rxon.flags = 0;
2845 2851
2846 iwl_set_rxon_channel(priv, conf->channel->band, channel); 2852 iwl_set_rxon_channel(priv, conf->channel);
2847 2853
2848 iwl_set_flags_for_band(priv, conf->channel->band); 2854 iwl_set_flags_for_band(priv, conf->channel->band);
2849 2855
@@ -2937,7 +2943,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2937 priv->staging_rxon.flags &= 2943 priv->staging_rxon.flags &=
2938 ~RXON_FLG_SHORT_SLOT_MSK; 2944 ~RXON_FLG_SHORT_SLOT_MSK;
2939 2945
2940 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2946 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2941 priv->staging_rxon.flags &= 2947 priv->staging_rxon.flags &=
2942 ~RXON_FLG_SHORT_SLOT_MSK; 2948 ~RXON_FLG_SHORT_SLOT_MSK;
2943 } 2949 }
@@ -2976,7 +2982,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2976 return 0; 2982 return 0;
2977 } 2983 }
2978 2984
2979 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 2985 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2980 conf->changed & IEEE80211_IFCC_BEACON) { 2986 conf->changed & IEEE80211_IFCC_BEACON) {
2981 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2987 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2982 if (!beacon) 2988 if (!beacon)
@@ -2986,7 +2992,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2986 return rc; 2992 return rc;
2987 } 2993 }
2988 2994
2989 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 2995 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
2990 (!conf->ssid_len)) { 2996 (!conf->ssid_len)) {
2991 IWL_DEBUG_MAC80211 2997 IWL_DEBUG_MAC80211
2992 ("Leaving in AP mode because HostAPD is not ready.\n"); 2998 ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -3009,7 +3015,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3009 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 3015 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
3010 */ 3016 */
3011 3017
3012 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 3018 if (priv->iw_mode == NL80211_IFTYPE_AP) {
3013 if (!conf->bssid) { 3019 if (!conf->bssid) {
3014 conf->bssid = priv->mac_addr; 3020 conf->bssid = priv->mac_addr;
3015 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 3021 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -3044,11 +3050,11 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3044 * to verify) - jpk */ 3050 * to verify) - jpk */
3045 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 3051 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
3046 3052
3047 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 3053 if (priv->iw_mode == NL80211_IFTYPE_AP)
3048 iwl4965_config_ap(priv); 3054 iwl4965_config_ap(priv);
3049 else { 3055 else {
3050 rc = iwl4965_commit_rxon(priv); 3056 rc = iwl4965_commit_rxon(priv);
3051 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 3057 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
3052 iwl_rxon_add_station( 3058 iwl_rxon_add_station(
3053 priv, priv->active_rxon.bssid_addr, 1); 3059 priv, priv->active_rxon.bssid_addr, 1);
3054 } 3060 }
@@ -3084,7 +3090,7 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
3084 3090
3085 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 3091 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
3086 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 3092 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
3087 IEEE80211_IF_TYPE_MNTR, 3093 NL80211_IFTYPE_MONITOR,
3088 changed_flags, *total_flags); 3094 changed_flags, *total_flags);
3089 /* queue work 'cuz mac80211 is holding a lock which 3095 /* queue work 'cuz mac80211 is holding a lock which
3090 * prevents us from issuing (synchronous) f/w cmds */ 3096 * prevents us from issuing (synchronous) f/w cmds */
@@ -3181,9 +3187,9 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3181 3187
3182} 3188}
3183 3189
3184static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 3190static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
3185{ 3191{
3186 int rc = 0; 3192 int ret;
3187 unsigned long flags; 3193 unsigned long flags;
3188 struct iwl_priv *priv = hw->priv; 3194 struct iwl_priv *priv = hw->priv;
3189 3195
@@ -3193,41 +3199,40 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
3193 spin_lock_irqsave(&priv->lock, flags); 3199 spin_lock_irqsave(&priv->lock, flags);
3194 3200
3195 if (!iwl_is_ready_rf(priv)) { 3201 if (!iwl_is_ready_rf(priv)) {
3196 rc = -EIO; 3202 ret = -EIO;
3197 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); 3203 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
3198 goto out_unlock; 3204 goto out_unlock;
3199 } 3205 }
3200 3206
3201 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ 3207 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
3202 rc = -EIO; 3208 ret = -EIO;
3203 IWL_ERROR("ERROR: APs don't scan\n"); 3209 IWL_ERROR("ERROR: APs don't scan\n");
3204 goto out_unlock; 3210 goto out_unlock;
3205 } 3211 }
3206 3212
3207 /* we don't schedule scan within next_scan_jiffies period */ 3213 /* we don't schedule scan within next_scan_jiffies period */
3208 if (priv->next_scan_jiffies && 3214 if (priv->next_scan_jiffies &&
3209 time_after(priv->next_scan_jiffies, jiffies)) { 3215 time_after(priv->next_scan_jiffies, jiffies)) {
3210 rc = -EAGAIN; 3216 IWL_DEBUG_SCAN("scan rejected: within next scan period\n");
3217 ret = -EAGAIN;
3211 goto out_unlock; 3218 goto out_unlock;
3212 } 3219 }
3213 /* if we just finished scan ask for delay */ 3220 /* if we just finished scan ask for delay */
3214 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + 3221 if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
3215 IWL_DELAY_NEXT_SCAN, jiffies)) { 3222 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
3216 rc = -EAGAIN; 3223 IWL_DEBUG_SCAN("scan rejected: within previous scan period\n");
3224 ret = -EAGAIN;
3217 goto out_unlock; 3225 goto out_unlock;
3218 } 3226 }
3219 if (len) { 3227 if (ssid_len) {
3220 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
3221 iwl_escape_essid(ssid, len), (int)len);
3222
3223 priv->one_direct_scan = 1; 3228 priv->one_direct_scan = 1;
3224 priv->direct_ssid_len = (u8) 3229 priv->direct_ssid_len = min_t(u8, ssid_len, IW_ESSID_MAX_SIZE);
3225 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
3226 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); 3230 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
3227 } else 3231 } else {
3228 priv->one_direct_scan = 0; 3232 priv->one_direct_scan = 0;
3233 }
3229 3234
3230 rc = iwl_scan_initiate(priv); 3235 ret = iwl_scan_initiate(priv);
3231 3236
3232 IWL_DEBUG_MAC80211("leave\n"); 3237 IWL_DEBUG_MAC80211("leave\n");
3233 3238
@@ -3235,7 +3240,7 @@ out_unlock:
3235 spin_unlock_irqrestore(&priv->lock, flags); 3240 spin_unlock_irqrestore(&priv->lock, flags);
3236 mutex_unlock(&priv->mutex); 3241 mutex_unlock(&priv->mutex);
3237 3242
3238 return rc; 3243 return ret;
3239} 3244}
3240 3245
3241static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, 3246static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3324,7 +3329,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3324 * in 1X mode. 3329 * in 1X mode.
3325 * In legacy wep mode, we use another host command to the uCode */ 3330 * In legacy wep mode, we use another host command to the uCode */
3326 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && 3331 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
3327 priv->iw_mode != IEEE80211_IF_TYPE_AP) { 3332 priv->iw_mode != NL80211_IFTYPE_AP) {
3328 if (cmd == SET_KEY) 3333 if (cmd == SET_KEY)
3329 is_default_wep_key = !priv->key_mapping_key; 3334 is_default_wep_key = !priv->key_mapping_key;
3330 else 3335 else
@@ -3395,7 +3400,7 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3395 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 3400 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
3396 priv->qos_data.qos_active = 1; 3401 priv->qos_data.qos_active = 1;
3397 3402
3398 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 3403 if (priv->iw_mode == NL80211_IFTYPE_AP)
3399 iwl_activate_qos(priv, 1); 3404 iwl_activate_qos(priv, 1);
3400 else if (priv->assoc_id && iwl_is_associated(priv)) 3405 else if (priv->assoc_id && iwl_is_associated(priv))
3401 iwl_activate_qos(priv, 0); 3406 iwl_activate_qos(priv, 0);
@@ -3408,13 +3413,13 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3408 3413
3409static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 3414static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3410 enum ieee80211_ampdu_mlme_action action, 3415 enum ieee80211_ampdu_mlme_action action,
3411 const u8 *addr, u16 tid, u16 *ssn) 3416 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3412{ 3417{
3413 struct iwl_priv *priv = hw->priv; 3418 struct iwl_priv *priv = hw->priv;
3414 DECLARE_MAC_BUF(mac); 3419 DECLARE_MAC_BUF(mac);
3415 3420
3416 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n", 3421 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3417 print_mac(mac, addr), tid); 3422 print_mac(mac, sta->addr), tid);
3418 3423
3419 if (!(priv->cfg->sku & IWL_SKU_N)) 3424 if (!(priv->cfg->sku & IWL_SKU_N))
3420 return -EACCES; 3425 return -EACCES;
@@ -3422,16 +3427,16 @@ static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3422 switch (action) { 3427 switch (action) {
3423 case IEEE80211_AMPDU_RX_START: 3428 case IEEE80211_AMPDU_RX_START:
3424 IWL_DEBUG_HT("start Rx\n"); 3429 IWL_DEBUG_HT("start Rx\n");
3425 return iwl_rx_agg_start(priv, addr, tid, *ssn); 3430 return iwl_rx_agg_start(priv, sta->addr, tid, *ssn);
3426 case IEEE80211_AMPDU_RX_STOP: 3431 case IEEE80211_AMPDU_RX_STOP:
3427 IWL_DEBUG_HT("stop Rx\n"); 3432 IWL_DEBUG_HT("stop Rx\n");
3428 return iwl_rx_agg_stop(priv, addr, tid); 3433 return iwl_rx_agg_stop(priv, sta->addr, tid);
3429 case IEEE80211_AMPDU_TX_START: 3434 case IEEE80211_AMPDU_TX_START:
3430 IWL_DEBUG_HT("start Tx\n"); 3435 IWL_DEBUG_HT("start Tx\n");
3431 return iwl_tx_agg_start(priv, addr, tid, ssn); 3436 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
3432 case IEEE80211_AMPDU_TX_STOP: 3437 case IEEE80211_AMPDU_TX_STOP:
3433 IWL_DEBUG_HT("stop Tx\n"); 3438 IWL_DEBUG_HT("stop Tx\n");
3434 return iwl_tx_agg_stop(priv, addr, tid); 3439 return iwl_tx_agg_stop(priv, sta->addr, tid);
3435 default: 3440 default:
3436 IWL_DEBUG_HT("unknown\n"); 3441 IWL_DEBUG_HT("unknown\n");
3437 return -EINVAL; 3442 return -EINVAL;
@@ -3513,7 +3518,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3513 3518
3514 priv->beacon_int = priv->hw->conf.beacon_int; 3519 priv->beacon_int = priv->hw->conf.beacon_int;
3515 priv->timestamp = 0; 3520 priv->timestamp = 0;
3516 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) 3521 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
3517 priv->beacon_int = 0; 3522 priv->beacon_int = 0;
3518 3523
3519 spin_unlock_irqrestore(&priv->lock, flags); 3524 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3527,7 +3532,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3527 /* we are restarting association process 3532 /* we are restarting association process
3528 * clear RXON_FILTER_ASSOC_MSK bit 3533 * clear RXON_FILTER_ASSOC_MSK bit
3529 */ 3534 */
3530 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 3535 if (priv->iw_mode != NL80211_IFTYPE_AP) {
3531 iwl_scan_cancel_timeout(priv, 100); 3536 iwl_scan_cancel_timeout(priv, 100);
3532 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3537 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3533 iwl4965_commit_rxon(priv); 3538 iwl4965_commit_rxon(priv);
@@ -3536,7 +3541,17 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3536 iwl_power_update_mode(priv, 0); 3541 iwl_power_update_mode(priv, 0);
3537 3542
3538 /* Per mac80211.h: This is only used in IBSS mode... */ 3543 /* Per mac80211.h: This is only used in IBSS mode... */
3539 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 3544 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3545
3546 /* switch to CAM during association period.
3547 * the ucode will block any association/authentication
3548 * frome during assiciation period if it can not hear
3549 * the AP because of PM. the timer enable PM back is
3550 * association do not complete
3551 */
3552 if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
3553 IEEE80211_CHAN_RADAR))
3554 iwl_power_disable_management(priv, 3000);
3540 3555
3541 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 3556 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
3542 mutex_unlock(&priv->mutex); 3557 mutex_unlock(&priv->mutex);
@@ -3565,7 +3580,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3565 return -EIO; 3580 return -EIO;
3566 } 3581 }
3567 3582
3568 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 3583 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3569 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 3584 IWL_DEBUG_MAC80211("leave - not IBSS\n");
3570 mutex_unlock(&priv->mutex); 3585 mutex_unlock(&priv->mutex);
3571 return -EIO; 3586 return -EIO;
@@ -3580,7 +3595,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3580 3595
3581 priv->assoc_id = 0; 3596 priv->assoc_id = 0;
3582 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 3597 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
3583 priv->timestamp = le64_to_cpu(timestamp) + (priv->beacon_int * 1000); 3598 priv->timestamp = le64_to_cpu(timestamp);
3584 3599
3585 IWL_DEBUG_MAC80211("leave\n"); 3600 IWL_DEBUG_MAC80211("leave\n");
3586 spin_unlock_irqrestore(&priv->lock, flags); 3601 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3622,11 +3637,11 @@ static ssize_t store_debug_level(struct device *d,
3622 const char *buf, size_t count) 3637 const char *buf, size_t count)
3623{ 3638{
3624 struct iwl_priv *priv = d->driver_data; 3639 struct iwl_priv *priv = d->driver_data;
3625 char *p = (char *)buf; 3640 unsigned long val;
3626 u32 val; 3641 int ret;
3627 3642
3628 val = simple_strtoul(p, &p, 0); 3643 ret = strict_strtoul(buf, 0, &val);
3629 if (p == buf) 3644 if (ret)
3630 printk(KERN_INFO DRV_NAME 3645 printk(KERN_INFO DRV_NAME
3631 ": %s is not in hex or decimal form.\n", buf); 3646 ": %s is not in hex or decimal form.\n", buf);
3632 else 3647 else
@@ -3698,11 +3713,11 @@ static ssize_t store_tx_power(struct device *d,
3698 const char *buf, size_t count) 3713 const char *buf, size_t count)
3699{ 3714{
3700 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3715 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3701 char *p = (char *)buf; 3716 unsigned long val;
3702 u32 val; 3717 int ret;
3703 3718
3704 val = simple_strtoul(p, &p, 10); 3719 ret = strict_strtoul(buf, 10, &val);
3705 if (p == buf) 3720 if (ret)
3706 printk(KERN_INFO DRV_NAME 3721 printk(KERN_INFO DRV_NAME
3707 ": %s is not in decimal form.\n", buf); 3722 ": %s is not in decimal form.\n", buf);
3708 else 3723 else
@@ -3726,7 +3741,12 @@ static ssize_t store_flags(struct device *d,
3726 const char *buf, size_t count) 3741 const char *buf, size_t count)
3727{ 3742{
3728 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3743 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3729 u32 flags = simple_strtoul(buf, NULL, 0); 3744 unsigned long val;
3745 u32 flags;
3746 int ret = strict_strtoul(buf, 0, &val);
3747 if (ret)
3748 return ret;
3749 flags = (u32)val;
3730 3750
3731 mutex_lock(&priv->mutex); 3751 mutex_lock(&priv->mutex);
3732 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3752 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
@@ -3734,8 +3754,7 @@ static ssize_t store_flags(struct device *d,
3734 if (iwl_scan_cancel_timeout(priv, 100)) 3754 if (iwl_scan_cancel_timeout(priv, 100))
3735 IWL_WARNING("Could not cancel scan.\n"); 3755 IWL_WARNING("Could not cancel scan.\n");
3736 else { 3756 else {
3737 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", 3757 IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags);
3738 flags);
3739 priv->staging_rxon.flags = cpu_to_le32(flags); 3758 priv->staging_rxon.flags = cpu_to_le32(flags);
3740 iwl4965_commit_rxon(priv); 3759 iwl4965_commit_rxon(priv);
3741 } 3760 }
@@ -3761,7 +3780,12 @@ static ssize_t store_filter_flags(struct device *d,
3761 const char *buf, size_t count) 3780 const char *buf, size_t count)
3762{ 3781{
3763 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3782 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3764 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3783 unsigned long val;
3784 u32 filter_flags;
3785 int ret = strict_strtoul(buf, 0, &val);
3786 if (ret)
3787 return ret;
3788 filter_flags = (u32)val;
3765 3789
3766 mutex_lock(&priv->mutex); 3790 mutex_lock(&priv->mutex);
3767 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3791 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
@@ -3862,10 +3886,12 @@ static ssize_t store_retry_rate(struct device *d,
3862 const char *buf, size_t count) 3886 const char *buf, size_t count)
3863{ 3887{
3864 struct iwl_priv *priv = dev_get_drvdata(d); 3888 struct iwl_priv *priv = dev_get_drvdata(d);
3889 long val;
3890 int ret = strict_strtol(buf, 10, &val);
3891 if (!ret)
3892 return ret;
3865 3893
3866 priv->retry_rate = simple_strtoul(buf, NULL, 0); 3894 priv->retry_rate = (val > 0) ? val : 1;
3867 if (priv->retry_rate <= 0)
3868 priv->retry_rate = 1;
3869 3895
3870 return count; 3896 return count;
3871} 3897}
@@ -3886,9 +3912,9 @@ static ssize_t store_power_level(struct device *d,
3886{ 3912{
3887 struct iwl_priv *priv = dev_get_drvdata(d); 3913 struct iwl_priv *priv = dev_get_drvdata(d);
3888 int ret; 3914 int ret;
3889 int mode; 3915 unsigned long mode;
3916
3890 3917
3891 mode = simple_strtoul(buf, NULL, 0);
3892 mutex_lock(&priv->mutex); 3918 mutex_lock(&priv->mutex);
3893 3919
3894 if (!iwl_is_ready(priv)) { 3920 if (!iwl_is_ready(priv)) {
@@ -3896,6 +3922,10 @@ static ssize_t store_power_level(struct device *d,
3896 goto out; 3922 goto out;
3897 } 3923 }
3898 3924
3925 ret = strict_strtoul(buf, 10, &mode);
3926 if (ret)
3927 goto out;
3928
3899 ret = iwl_power_set_user_mode(priv, mode); 3929 ret = iwl_power_set_user_mode(priv, mode);
3900 if (ret) { 3930 if (ret) {
3901 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 3931 IWL_DEBUG_MAC80211("failed setting power mode.\n");
@@ -4075,6 +4105,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4075 /* FIXME : remove when resolved PENDING */ 4105 /* FIXME : remove when resolved PENDING */
4076 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 4106 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
4077 iwl_setup_scan_deferred_work(priv); 4107 iwl_setup_scan_deferred_work(priv);
4108 iwl_setup_power_deferred_work(priv);
4078 4109
4079 if (priv->cfg->ops->lib->setup_deferred_work) 4110 if (priv->cfg->ops->lib->setup_deferred_work)
4080 priv->cfg->ops->lib->setup_deferred_work(priv); 4111 priv->cfg->ops->lib->setup_deferred_work(priv);
@@ -4094,6 +4125,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
4094 4125
4095 cancel_delayed_work_sync(&priv->init_alive_start); 4126 cancel_delayed_work_sync(&priv->init_alive_start);
4096 cancel_delayed_work(&priv->scan_check); 4127 cancel_delayed_work(&priv->scan_check);
4128 cancel_delayed_work_sync(&priv->set_power_save);
4097 cancel_delayed_work(&priv->alive_start); 4129 cancel_delayed_work(&priv->alive_start);
4098 cancel_work_sync(&priv->beacon_update); 4130 cancel_work_sync(&priv->beacon_update);
4099 del_timer_sync(&priv->statistics_periodic); 4131 del_timer_sync(&priv->statistics_periodic);
@@ -4142,7 +4174,7 @@ static struct ieee80211_ops iwl4965_hw_ops = {
4142 .reset_tsf = iwl4965_mac_reset_tsf, 4174 .reset_tsf = iwl4965_mac_reset_tsf,
4143 .bss_info_changed = iwl4965_bss_info_changed, 4175 .bss_info_changed = iwl4965_bss_info_changed,
4144 .ampdu_action = iwl4965_mac_ampdu_action, 4176 .ampdu_action = iwl4965_mac_ampdu_action,
4145 .hw_scan = iwl4965_mac_hw_scan 4177 .hw_scan = iwl_mac_hw_scan
4146}; 4178};
4147 4179
4148static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4180static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -4217,9 +4249,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4217 4249
4218 pci_set_drvdata(pdev, priv); 4250 pci_set_drvdata(pdev, priv);
4219 4251
4220 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4221 * PCI Tx retries from interfering with C3 CPU state */
4222 pci_write_config_byte(pdev, 0x41, 0x00);
4223 4252
4224 /*********************** 4253 /***********************
4225 * 3. Read REV register 4254 * 3. Read REV register
@@ -4239,6 +4268,10 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4239 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n", 4268 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
4240 priv->cfg->name, priv->hw_rev); 4269 priv->cfg->name, priv->hw_rev);
4241 4270
4271 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4272 * PCI Tx retries from interfering with C3 CPU state */
4273 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4274
4242 /* amp init */ 4275 /* amp init */
4243 err = priv->cfg->ops->lib->apm_ops.init(priv); 4276 err = priv->cfg->ops->lib->apm_ops.init(priv);
4244 if (err < 0) { 4277 if (err < 0) {
@@ -4364,15 +4397,18 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4364 iwl_dbgfs_unregister(priv); 4397 iwl_dbgfs_unregister(priv);
4365 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 4398 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
4366 4399
4400 /* ieee80211_unregister_hw call wil cause iwl4965_mac_stop to
4401 * to be called and iwl4965_down since we are removing the device
4402 * we need to set STATUS_EXIT_PENDING bit.
4403 */
4404 set_bit(STATUS_EXIT_PENDING, &priv->status);
4367 if (priv->mac80211_registered) { 4405 if (priv->mac80211_registered) {
4368 ieee80211_unregister_hw(priv->hw); 4406 ieee80211_unregister_hw(priv->hw);
4369 priv->mac80211_registered = 0; 4407 priv->mac80211_registered = 0;
4408 } else {
4409 iwl4965_down(priv);
4370 } 4410 }
4371 4411
4372 set_bit(STATUS_EXIT_PENDING, &priv->status);
4373
4374 iwl4965_down(priv);
4375
4376 /* make sure we flush any pending irq or 4412 /* make sure we flush any pending irq or
4377 * tasklet for the driver 4413 * tasklet for the driver
4378 */ 4414 */
@@ -4470,7 +4506,10 @@ static struct pci_device_id iwl_hw_card_ids[] = {
4470 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)}, 4506 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
4471 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)}, 4507 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
4472 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)}, 4508 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
4473 {IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)}, 4509/* 5350 WiFi/WiMax */
4510 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
4511 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
4512 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
4474#endif /* CONFIG_IWL5000 */ 4513#endif /* CONFIG_IWL5000 */
4475 {0} 4514 {0}
4476}; 4515};
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index ef49440bd7f6..72fbf47229db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -66,6 +66,66 @@
66#include "iwl-core.h" 66#include "iwl-core.h"
67#include "iwl-calib.h" 67#include "iwl-calib.h"
68 68
69/*****************************************************************************
70 * INIT calibrations framework
71 *****************************************************************************/
72
73 int iwl_send_calib_results(struct iwl_priv *priv)
74{
75 int ret = 0;
76 int i = 0;
77
78 struct iwl_host_cmd hcmd = {
79 .id = REPLY_PHY_CALIBRATION_CMD,
80 .meta.flags = CMD_SIZE_HUGE,
81 };
82
83 for (i = 0; i < IWL_CALIB_MAX; i++)
84 if (priv->calib_results[i].buf) {
85 hcmd.len = priv->calib_results[i].buf_len;
86 hcmd.data = priv->calib_results[i].buf;
87 ret = iwl_send_cmd_sync(priv, &hcmd);
88 if (ret)
89 goto err;
90 }
91
92 return 0;
93err:
94 IWL_ERROR("Error %d iteration %d\n", ret, i);
95 return ret;
96}
97EXPORT_SYMBOL(iwl_send_calib_results);
98
99int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
100{
101 if (res->buf_len != len) {
102 kfree(res->buf);
103 res->buf = kzalloc(len, GFP_ATOMIC);
104 }
105 if (unlikely(res->buf == NULL))
106 return -ENOMEM;
107
108 res->buf_len = len;
109 memcpy(res->buf, buf, len);
110 return 0;
111}
112EXPORT_SYMBOL(iwl_calib_set);
113
114void iwl_calib_free_results(struct iwl_priv *priv)
115{
116 int i;
117
118 for (i = 0; i < IWL_CALIB_MAX; i++) {
119 kfree(priv->calib_results[i].buf);
120 priv->calib_results[i].buf = NULL;
121 priv->calib_results[i].buf_len = 0;
122 }
123}
124
125/*****************************************************************************
126 * RUNTIME calibrations framework
127 *****************************************************************************/
128
69/* "false alarms" are signals that our DSP tries to lock onto, 129/* "false alarms" are signals that our DSP tries to lock onto,
70 * but then determines that they are either noise, or transmissions 130 * but then determines that they are either noise, or transmissions
71 * from a distant wireless network (also "noise", really) that get 131 * from a distant wireless network (also "noise", really) that get
@@ -748,13 +808,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
748 } 808 }
749 } 809 }
750 810
811 /* Save for use within RXON, TX, SCAN commands, etc. */
812 priv->chain_noise_data.active_chains = active_chains;
751 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", 813 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
752 active_chains); 814 active_chains);
753 815
754 /* Save for use within RXON, TX, SCAN commands, etc. */
755 /*priv->valid_antenna = active_chains;*/
756 /*FIXME: should be reflected in RX chains in RXON */
757
758 /* Analyze noise for rx balance */ 816 /* Analyze noise for rx balance */
759 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 817 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
760 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); 818 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
@@ -779,6 +837,15 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
779 837
780 priv->cfg->ops->utils->gain_computation(priv, average_noise, 838 priv->cfg->ops->utils->gain_computation(priv, average_noise,
781 min_average_noise_antenna_i, min_average_noise); 839 min_average_noise_antenna_i, min_average_noise);
840
841 /* Some power changes may have been made during the calibration.
842 * Update and commit the RXON
843 */
844 if (priv->cfg->ops->lib->update_chain_flags)
845 priv->cfg->ops->lib->update_chain_flags(priv);
846
847 data->state = IWL_CHAIN_NOISE_DONE;
848 iwl_power_enable_management(priv);
782} 849}
783EXPORT_SYMBOL(iwl_chain_noise_calibration); 850EXPORT_SYMBOL(iwl_chain_noise_calibration);
784 851
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 28b5b09996ed..8d04e966ad48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -163,6 +163,13 @@ enum {
163/* iwl_cmd_header flags value */ 163/* iwl_cmd_header flags value */
164#define IWL_CMD_FAILED_MSK 0x40 164#define IWL_CMD_FAILED_MSK 0x40
165 165
166#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
167#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
168#define SEQ_TO_INDEX(s) ((s) & 0xff)
169#define INDEX_TO_SEQ(i) ((i) & 0xff)
170#define SEQ_HUGE_FRAME __constant_cpu_to_le16(0x4000)
171#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
172
166/** 173/**
167 * struct iwl_cmd_header 174 * struct iwl_cmd_header
168 * 175 *
@@ -171,7 +178,7 @@ enum {
171 */ 178 */
172struct iwl_cmd_header { 179struct iwl_cmd_header {
173 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 180 u8 cmd; /* Command ID: REPLY_RXON, etc. */
174 u8 flags; /* IWL_CMD_* */ 181 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
175 /* 182 /*
176 * The driver sets up the sequence number to values of its chosing. 183 * The driver sets up the sequence number to values of its chosing.
177 * uCode does not use this value, but passes it back to the driver 184 * uCode does not use this value, but passes it back to the driver
@@ -187,11 +194,12 @@ struct iwl_cmd_header {
187 * 194 *
188 * The Linux driver uses the following format: 195 * The Linux driver uses the following format:
189 * 196 *
190 * 0:7 index/position within Tx queue 197 * 0:7 tfd index - position within TX queue
191 * 8:13 Tx queue selection 198 * 8:12 TX queue id
192 * 14:14 driver sets this to indicate command is in the 'huge' 199 * 13 reserved
193 * storage at the end of the command buffers, i.e. scan cmd 200 * 14 huge - driver sets this to indicate command is in the
194 * 15:15 uCode sets this in uCode-originated response/notification 201 * 'huge' storage at the end of the command buffers
202 * 15 unsolicited RX or uCode-originated notification
195 */ 203 */
196 __le16 sequence; 204 __le16 sequence;
197 205
@@ -2026,8 +2034,8 @@ struct iwl4965_spectrum_notification {
2026 * bit 2 - '0' PM have to walk up every DTIM 2034 * bit 2 - '0' PM have to walk up every DTIM
2027 * '1' PM could sleep over DTIM till listen Interval. 2035 * '1' PM could sleep over DTIM till listen Interval.
2028 * PCI power managed 2036 * PCI power managed
2029 * bit 3 - '0' (PCI_LINK_CTRL & 0x1) 2037 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2030 * '1' !(PCI_LINK_CTRL & 0x1) 2038 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2031 * Force sleep Modes 2039 * Force sleep Modes
2032 * bit 31/30- '00' use both mac/xtal sleeps 2040 * bit 31/30- '00' use both mac/xtal sleeps
2033 * '01' force Mac sleep 2041 * '01' force Mac sleep
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index c72f72579bea..d80184ee911c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -306,14 +306,14 @@ void iwl_reset_qos(struct iwl_priv *priv)
306 spin_lock_irqsave(&priv->lock, flags); 306 spin_lock_irqsave(&priv->lock, flags);
307 priv->qos_data.qos_active = 0; 307 priv->qos_data.qos_active = 0;
308 308
309 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { 309 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
310 if (priv->qos_data.qos_enable) 310 if (priv->qos_data.qos_enable)
311 priv->qos_data.qos_active = 1; 311 priv->qos_data.qos_active = 1;
312 if (!(priv->active_rate & 0xfff0)) { 312 if (!(priv->active_rate & 0xfff0)) {
313 cw_min = 31; 313 cw_min = 31;
314 is_legacy = 1; 314 is_legacy = 1;
315 } 315 }
316 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 316 } else if (priv->iw_mode == NL80211_IFTYPE_AP) {
317 if (priv->qos_data.qos_enable) 317 if (priv->qos_data.qos_enable)
318 priv->qos_data.qos_active = 1; 318 priv->qos_data.qos_active = 1;
319 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { 319 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -399,8 +399,8 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
399 399
400 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; 400 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
401 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; 401 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
402 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS & 402 ht_info->cap |= (u16)(IEEE80211_HT_CAP_SM_PS &
403 (IWL_MIMO_PS_NONE << 2)); 403 (WLAN_HT_CAP_SM_PS_DISABLED << 2));
404 404
405 max_bit_rate = MAX_BIT_RATE_20_MHZ; 405 max_bit_rate = MAX_BIT_RATE_20_MHZ;
406 if (priv->hw_params.fat_channel & BIT(band)) { 406 if (priv->hw_params.fat_channel & BIT(band)) {
@@ -592,12 +592,11 @@ static void iwlcore_free_geos(struct iwl_priv *priv)
592 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 592 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
593} 593}
594 594
595static u8 is_single_rx_stream(struct iwl_priv *priv) 595static bool is_single_rx_stream(struct iwl_priv *priv)
596{ 596{
597 return !priv->current_ht_config.is_ht || 597 return !priv->current_ht_config.is_ht ||
598 ((priv->current_ht_config.supp_mcs_set[1] == 0) && 598 ((priv->current_ht_config.supp_mcs_set[1] == 0) &&
599 (priv->current_ht_config.supp_mcs_set[2] == 0)) || 599 (priv->current_ht_config.supp_mcs_set[2] == 0));
600 priv->ps_mode == IWL_MIMO_PS_STATIC;
601} 600}
602 601
603static u8 iwl_is_channel_extension(struct iwl_priv *priv, 602static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -704,33 +703,52 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
704 * MIMO (dual stream) requires at least 2, but works better with 3. 703 * MIMO (dual stream) requires at least 2, but works better with 3.
705 * This does not determine *which* chains to use, just how many. 704 * This does not determine *which* chains to use, just how many.
706 */ 705 */
707static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv, 706static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
708 u8 *idle_state, u8 *rx_state)
709{ 707{
710 u8 is_single = is_single_rx_stream(priv); 708 bool is_single = is_single_rx_stream(priv);
711 u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; 709 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
712 710
713 /* # of Rx chains to use when expecting MIMO. */ 711 /* # of Rx chains to use when expecting MIMO. */
714 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) 712 if (is_single || (!is_cam && (priv->current_ht_config.sm_ps ==
715 *rx_state = 2; 713 WLAN_HT_CAP_SM_PS_STATIC)))
714 return 2;
716 else 715 else
717 *rx_state = 3; 716 return 3;
717}
718 718
719static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
720{
721 int idle_cnt;
722 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
719 /* # Rx chains when idling and maybe trying to save power */ 723 /* # Rx chains when idling and maybe trying to save power */
720 switch (priv->ps_mode) { 724 switch (priv->current_ht_config.sm_ps) {
721 case IWL_MIMO_PS_STATIC: 725 case WLAN_HT_CAP_SM_PS_STATIC:
722 case IWL_MIMO_PS_DYNAMIC: 726 case WLAN_HT_CAP_SM_PS_DYNAMIC:
723 *idle_state = (is_cam) ? 2 : 1; 727 idle_cnt = (is_cam) ? 2 : 1;
724 break; 728 break;
725 case IWL_MIMO_PS_NONE: 729 case WLAN_HT_CAP_SM_PS_DISABLED:
726 *idle_state = (is_cam) ? *rx_state : 1; 730 idle_cnt = (is_cam) ? active_cnt : 1;
727 break; 731 break;
732 case WLAN_HT_CAP_SM_PS_INVALID:
728 default: 733 default:
729 *idle_state = 1; 734 IWL_ERROR("invalide mimo ps mode %d\n",
735 priv->current_ht_config.sm_ps);
736 WARN_ON(1);
737 idle_cnt = -1;
730 break; 738 break;
731 } 739 }
740 return idle_cnt;
741}
732 742
733 return 0; 743/* up to 4 chains */
744static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
745{
746 u8 res;
747 res = (chain_bitmap & BIT(0)) >> 0;
748 res += (chain_bitmap & BIT(1)) >> 1;
749 res += (chain_bitmap & BIT(2)) >> 2;
750 res += (chain_bitmap & BIT(4)) >> 4;
751 return res;
734} 752}
735 753
736/** 754/**
@@ -741,39 +759,59 @@ static int iwlcore_get_rx_chain_counter(struct iwl_priv *priv,
741 */ 759 */
742void iwl_set_rxon_chain(struct iwl_priv *priv) 760void iwl_set_rxon_chain(struct iwl_priv *priv)
743{ 761{
744 u8 is_single = is_single_rx_stream(priv); 762 bool is_single = is_single_rx_stream(priv);
745 u8 idle_state, rx_state; 763 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
746 764 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
747 priv->staging_rxon.rx_chain = 0; 765 u32 active_chains;
748 rx_state = idle_state = 3; 766 u16 rx_chain;
749 767
750 /* Tell uCode which antennas are actually connected. 768 /* Tell uCode which antennas are actually connected.
751 * Before first association, we assume all antennas are connected. 769 * Before first association, we assume all antennas are connected.
752 * Just after first association, iwl_chain_noise_calibration() 770 * Just after first association, iwl_chain_noise_calibration()
753 * checks which antennas actually *are* connected. */ 771 * checks which antennas actually *are* connected. */
754 priv->staging_rxon.rx_chain |= 772 if (priv->chain_noise_data.active_chains)
755 cpu_to_le16(priv->hw_params.valid_rx_ant << 773 active_chains = priv->chain_noise_data.active_chains;
756 RXON_RX_CHAIN_VALID_POS); 774 else
775 active_chains = priv->hw_params.valid_rx_ant;
776
777 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
757 778
758 /* How many receivers should we use? */ 779 /* How many receivers should we use? */
759 iwlcore_get_rx_chain_counter(priv, &idle_state, &rx_state); 780 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
760 priv->staging_rxon.rx_chain |= 781 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
761 cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); 782
762 priv->staging_rxon.rx_chain |= 783
763 cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); 784 /* correct rx chain count according hw settings
764 785 * and chain noise calibration
765 if (!is_single && (rx_state >= 2) && 786 */
766 !test_bit(STATUS_POWER_PMI, &priv->status)) 787 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
788 if (valid_rx_cnt < active_rx_cnt)
789 active_rx_cnt = valid_rx_cnt;
790
791 if (valid_rx_cnt < idle_rx_cnt)
792 idle_rx_cnt = valid_rx_cnt;
793
794 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
795 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
796
797 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
798
799 if (!is_single && (active_rx_cnt >= 2) && is_cam)
767 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; 800 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
768 else 801 else
769 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 802 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
770 803
771 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); 804 IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
805 priv->staging_rxon.rx_chain,
806 active_rx_cnt, idle_rx_cnt);
807
808 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
809 active_rx_cnt < idle_rx_cnt);
772} 810}
773EXPORT_SYMBOL(iwl_set_rxon_chain); 811EXPORT_SYMBOL(iwl_set_rxon_chain);
774 812
775/** 813/**
776 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON 814 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
777 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz 815 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
778 * @channel: Any channel valid for the requested phymode 816 * @channel: Any channel valid for the requested phymode
779 817
@@ -782,10 +820,11 @@ EXPORT_SYMBOL(iwl_set_rxon_chain);
782 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 820 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
783 * in the staging RXON flag structure based on the phymode 821 * in the staging RXON flag structure based on the phymode
784 */ 822 */
785int iwl_set_rxon_channel(struct iwl_priv *priv, 823int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
786 enum ieee80211_band band,
787 u16 channel)
788{ 824{
825 enum ieee80211_band band = ch->band;
826 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
827
789 if (!iwl_get_channel_info(priv, band, channel)) { 828 if (!iwl_get_channel_info(priv, band, channel)) {
790 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", 829 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
791 channel, band); 830 channel, band);
@@ -819,6 +858,10 @@ int iwl_setup_mac(struct iwl_priv *priv)
819 /* Tell mac80211 our characteristics */ 858 /* Tell mac80211 our characteristics */
820 hw->flags = IEEE80211_HW_SIGNAL_DBM | 859 hw->flags = IEEE80211_HW_SIGNAL_DBM |
821 IEEE80211_HW_NOISE_DBM; 860 IEEE80211_HW_NOISE_DBM;
861 hw->wiphy->interface_modes =
862 BIT(NL80211_IFTYPE_AP) |
863 BIT(NL80211_IFTYPE_STATION) |
864 BIT(NL80211_IFTYPE_ADHOC);
822 /* Default value; 4 EDCA QOS priorities */ 865 /* Default value; 4 EDCA QOS priorities */
823 hw->queues = 4; 866 hw->queues = 4;
824 /* queues to support 11n aggregation */ 867 /* queues to support 11n aggregation */
@@ -876,7 +919,6 @@ int iwl_init_drv(struct iwl_priv *priv)
876 spin_lock_init(&priv->power_data.lock); 919 spin_lock_init(&priv->power_data.lock);
877 spin_lock_init(&priv->sta_lock); 920 spin_lock_init(&priv->sta_lock);
878 spin_lock_init(&priv->hcmd_lock); 921 spin_lock_init(&priv->hcmd_lock);
879 spin_lock_init(&priv->lq_mngr.lock);
880 922
881 INIT_LIST_HEAD(&priv->free_frames); 923 INIT_LIST_HEAD(&priv->free_frames);
882 924
@@ -890,10 +932,10 @@ int iwl_init_drv(struct iwl_priv *priv)
890 priv->ieee_rates = NULL; 932 priv->ieee_rates = NULL;
891 priv->band = IEEE80211_BAND_2GHZ; 933 priv->band = IEEE80211_BAND_2GHZ;
892 934
893 priv->iw_mode = IEEE80211_IF_TYPE_STA; 935 priv->iw_mode = NL80211_IFTYPE_STATION;
894 936
895 priv->use_ant_b_for_management_frame = 1; /* start with ant B */ 937 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
896 priv->ps_mode = IWL_MIMO_PS_NONE; 938 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
897 939
898 /* Choose which receivers/antennas to use */ 940 /* Choose which receivers/antennas to use */
899 iwl_set_rxon_chain(priv); 941 iwl_set_rxon_chain(priv);
@@ -907,8 +949,6 @@ int iwl_init_drv(struct iwl_priv *priv)
907 priv->qos_data.qos_active = 0; 949 priv->qos_data.qos_active = 0;
908 priv->qos_data.qos_cap.val = 0; 950 priv->qos_data.qos_cap.val = 0;
909 951
910 iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
911
912 priv->rates_mask = IWL_RATES_MASK; 952 priv->rates_mask = IWL_RATES_MASK;
913 /* If power management is turned on, default to AC mode */ 953 /* If power management is turned on, default to AC mode */
914 priv->power_mode = IWL_POWER_AC; 954 priv->power_mode = IWL_POWER_AC;
@@ -935,22 +975,6 @@ err:
935} 975}
936EXPORT_SYMBOL(iwl_init_drv); 976EXPORT_SYMBOL(iwl_init_drv);
937 977
938void iwl_free_calib_results(struct iwl_priv *priv)
939{
940 kfree(priv->calib_results.lo_res);
941 priv->calib_results.lo_res = NULL;
942 priv->calib_results.lo_res_len = 0;
943
944 kfree(priv->calib_results.tx_iq_res);
945 priv->calib_results.tx_iq_res = NULL;
946 priv->calib_results.tx_iq_res_len = 0;
947
948 kfree(priv->calib_results.tx_iq_perd_res);
949 priv->calib_results.tx_iq_perd_res = NULL;
950 priv->calib_results.tx_iq_perd_res_len = 0;
951}
952EXPORT_SYMBOL(iwl_free_calib_results);
953
954int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 978int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
955{ 979{
956 int ret = 0; 980 int ret = 0;
@@ -978,10 +1002,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
978} 1002}
979EXPORT_SYMBOL(iwl_set_tx_power); 1003EXPORT_SYMBOL(iwl_set_tx_power);
980 1004
981
982void iwl_uninit_drv(struct iwl_priv *priv) 1005void iwl_uninit_drv(struct iwl_priv *priv)
983{ 1006{
984 iwl_free_calib_results(priv); 1007 iwl_calib_free_results(priv);
985 iwlcore_free_geos(priv); 1008 iwlcore_free_geos(priv);
986 iwl_free_channel_map(priv); 1009 iwl_free_channel_map(priv);
987 kfree(priv->scan); 1010 kfree(priv->scan);
@@ -1135,7 +1158,6 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1135} 1158}
1136EXPORT_SYMBOL(iwl_verify_ucode); 1159EXPORT_SYMBOL(iwl_verify_ucode);
1137 1160
1138
1139static const char *desc_lookup(int i) 1161static const char *desc_lookup(int i)
1140{ 1162{
1141 switch (i) { 1163 switch (i) {
@@ -1216,9 +1238,9 @@ EXPORT_SYMBOL(iwl_dump_nic_error_log);
1216/** 1238/**
1217 * iwl_print_event_log - Dump error event log to syslog 1239 * iwl_print_event_log - Dump error event log to syslog
1218 * 1240 *
1219 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained! 1241 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
1220 */ 1242 */
1221void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 1243static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1222 u32 num_events, u32 mode) 1244 u32 num_events, u32 mode)
1223{ 1245{
1224 u32 i; 1246 u32 i;
@@ -1259,8 +1281,6 @@ void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1259 } 1281 }
1260 } 1282 }
1261} 1283}
1262EXPORT_SYMBOL(iwl_print_event_log);
1263
1264 1284
1265void iwl_dump_nic_event_log(struct iwl_priv *priv) 1285void iwl_dump_nic_event_log(struct iwl_priv *priv)
1266{ 1286{
@@ -1376,7 +1396,7 @@ void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1376 1396
1377 iwl_scan_cancel(priv); 1397 iwl_scan_cancel(priv);
1378 /* FIXME: This is a workaround for AP */ 1398 /* FIXME: This is a workaround for AP */
1379 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 1399 if (priv->iw_mode != NL80211_IFTYPE_AP) {
1380 spin_lock_irqsave(&priv->lock, flags); 1400 spin_lock_irqsave(&priv->lock, flags);
1381 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 1401 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
1382 CSR_UCODE_SW_BIT_RFKILL); 1402 CSR_UCODE_SW_BIT_RFKILL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64f139e97444..55a4b584ce07 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -184,14 +184,10 @@ struct iwl_cfg {
184struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 184struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
185 struct ieee80211_ops *hw_ops); 185 struct ieee80211_ops *hw_ops);
186void iwl_hw_detect(struct iwl_priv *priv); 186void iwl_hw_detect(struct iwl_priv *priv);
187
188void iwl_clear_stations_table(struct iwl_priv *priv); 187void iwl_clear_stations_table(struct iwl_priv *priv);
189void iwl_free_calib_results(struct iwl_priv *priv);
190void iwl_reset_qos(struct iwl_priv *priv); 188void iwl_reset_qos(struct iwl_priv *priv);
191void iwl_set_rxon_chain(struct iwl_priv *priv); 189void iwl_set_rxon_chain(struct iwl_priv *priv);
192int iwl_set_rxon_channel(struct iwl_priv *priv, 190int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
193 enum ieee80211_band band,
194 u16 channel);
195void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 191void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
196u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, 192u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
197 struct ieee80211_ht_info *sta_ht_inf); 193 struct ieee80211_ht_info *sta_ht_inf);
@@ -218,7 +214,6 @@ void iwl_rx_replenish(struct iwl_priv *priv);
218int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 214int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
219int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn); 215int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn);
220int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid); 216int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
221/* FIXME: remove when TX is moved to iwl core */
222int iwl_rx_queue_restock(struct iwl_priv *priv); 217int iwl_rx_queue_restock(struct iwl_priv *priv);
223int iwl_rx_queue_space(const struct iwl_rx_queue *q); 218int iwl_rx_queue_space(const struct iwl_rx_queue *q);
224void iwl_rx_allocate(struct iwl_priv *priv); 219void iwl_rx_allocate(struct iwl_priv *priv);
@@ -237,11 +232,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
237******************************************************/ 232******************************************************/
238int iwl_txq_ctx_reset(struct iwl_priv *priv); 233int iwl_txq_ctx_reset(struct iwl_priv *priv);
239int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 234int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
240/* FIXME: remove when free Tx is fully merged into iwlcore */
241int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
242void iwl_hw_txq_ctx_free(struct iwl_priv *priv); 235void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
243int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
244 dma_addr_t addr, u16 len);
245int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 236int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
246int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 237int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
247int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); 238int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
@@ -256,6 +247,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
256 * RF -Kill - here and not in iwl-rfkill.h to be available when 247 * RF -Kill - here and not in iwl-rfkill.h to be available when
257 * RF-kill subsystem is not compiled. 248 * RF-kill subsystem is not compiled.
258 ****************************************************/ 249 ****************************************************/
250void iwl_rf_kill(struct iwl_priv *priv);
259void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv); 251void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
260int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv); 252int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
261 253
@@ -286,11 +278,17 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
286void iwl_init_scan_params(struct iwl_priv *priv); 278void iwl_init_scan_params(struct iwl_priv *priv);
287int iwl_scan_cancel(struct iwl_priv *priv); 279int iwl_scan_cancel(struct iwl_priv *priv);
288int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 280int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
289const char *iwl_escape_essid(const char *essid, u8 essid_len);
290int iwl_scan_initiate(struct iwl_priv *priv); 281int iwl_scan_initiate(struct iwl_priv *priv);
291void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); 282void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
292void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 283void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
293 284
285/*******************************************************************************
286 * Calibrations - implemented in iwl-calib.c
287 ******************************************************************************/
288int iwl_send_calib_results(struct iwl_priv *priv);
289int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
290void iwl_calib_free_results(struct iwl_priv *priv);
291
294/***************************************************** 292/*****************************************************
295 * S e n d i n g H o s t C o m m a n d s * 293 * S e n d i n g H o s t C o m m a n d s *
296 *****************************************************/ 294 *****************************************************/
@@ -312,8 +310,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
312/***************************************************** 310/*****************************************************
313* Error Handling Debugging 311* Error Handling Debugging
314******************************************************/ 312******************************************************/
315void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
316 u32 num_events, u32 mode);
317void iwl_dump_nic_error_log(struct iwl_priv *priv); 313void iwl_dump_nic_error_log(struct iwl_priv *priv);
318void iwl_dump_nic_event_log(struct iwl_priv *priv); 314void iwl_dump_nic_event_log(struct iwl_priv *priv);
319 315
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d2daa174df22..e548d67f87fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -110,11 +110,12 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
110 * 110 *
111 */ 111 */
112 112
113#define IWL_DL_INFO (1 << 0) 113#define IWL_DL_INFO (1 << 0)
114#define IWL_DL_MAC80211 (1 << 1) 114#define IWL_DL_MAC80211 (1 << 1)
115#define IWL_DL_HOST_COMMAND (1 << 2) 115#define IWL_DL_HCMD (1 << 2)
116#define IWL_DL_STATE (1 << 3) 116#define IWL_DL_STATE (1 << 3)
117#define IWL_DL_MACDUMP (1 << 4) 117#define IWL_DL_MACDUMP (1 << 4)
118#define IWL_DL_HCMD_DUMP (1 << 5)
118#define IWL_DL_RADIO (1 << 7) 119#define IWL_DL_RADIO (1 << 7)
119#define IWL_DL_POWER (1 << 8) 120#define IWL_DL_POWER (1 << 8)
120#define IWL_DL_TEMP (1 << 9) 121#define IWL_DL_TEMP (1 << 9)
@@ -162,7 +163,8 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
162#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a) 163#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a)
163#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a) 164#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a)
164#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a) 165#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a)
165#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a) 166#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(f, a...) IWL_DEBUG(IWL_DL_HCMD_DUMP, f, ## a)
166#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a) 168#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a)
167#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a) 169#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a)
168#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a) 170#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c19db438306c..c018121085e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -225,12 +225,6 @@ struct iwl_frame {
225 struct list_head list; 225 struct list_head list;
226}; 226};
227 227
228#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
229#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
230#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
231#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
232#define SEQ_HUGE_FRAME (0x4000)
233#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
234#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 228#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
235#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 229#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
236#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 230#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -412,7 +406,7 @@ struct iwl_ht_info {
412 /* self configuration data */ 406 /* self configuration data */
413 u8 is_ht; 407 u8 is_ht;
414 u8 supported_chan_width; 408 u8 supported_chan_width;
415 u16 tx_mimo_ps_mode; 409 u8 sm_ps;
416 u8 is_green_field; 410 u8 is_green_field;
417 u8 sgf; /* HT_SHORT_GI_* short guard interval */ 411 u8 sgf; /* HT_SHORT_GI_* short guard interval */
418 u8 max_amsdu_size; 412 u8 max_amsdu_size;
@@ -571,50 +565,31 @@ struct iwl_hw_params {
571#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) 565#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
572#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) 566#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
573 567
574
575/****************************************************************************** 568/******************************************************************************
576 * 569 *
577 * Functions implemented in iwl-base.c which are forward declared here 570 * Functions implemented in core module which are forward declared here
578 * for use by iwl-*.c 571 * for use by iwl-[4-5].c
579 * 572 *
580 *****************************************************************************/ 573 * NOTE: The implementation of these functions are not hardware specific
581struct iwl_addsta_cmd; 574 * which is why they are in the core module files.
582extern int iwl_send_add_sta(struct iwl_priv *priv,
583 struct iwl_addsta_cmd *sta, u8 flags);
584u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
585 u8 flags, struct ieee80211_ht_info *ht_info);
586extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
587 struct ieee80211_hdr *hdr,
588 const u8 *dest, int left);
589extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
590int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
591extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
592
593extern const u8 iwl_bcast_addr[ETH_ALEN];
594
595/******************************************************************************
596 *
597 * Functions implemented in iwl-[34]*.c which are forward declared here
598 * for use by iwl-base.c
599 *
600 * NOTE: The implementation of these functions are hardware specific
601 * which is why they are in the hardware specific files (vs. iwl-base.c)
602 * 575 *
603 * Naming convention -- 576 * Naming convention --
604 * iwl4965_ <-- Its part of iwlwifi (should be changed to iwl4965_) 577 * iwl_ <-- Is part of iwlwifi
605 * iwl4965_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
606 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) 578 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
607 * iwl4965_bg_ <-- Called from work queue context 579 * iwl4965_bg_ <-- Called from work queue context
608 * iwl4965_mac_ <-- mac80211 callback 580 * iwl4965_mac_ <-- mac80211 callback
609 * 581 *
610 ****************************************************************************/ 582 ****************************************************************************/
583struct iwl_addsta_cmd;
584extern int iwl_send_add_sta(struct iwl_priv *priv,
585 struct iwl_addsta_cmd *sta, u8 flags);
586extern u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
587 int is_ap, u8 flags, struct ieee80211_ht_info *ht_info);
588extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
589extern int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
590extern const u8 iwl_bcast_addr[ETH_ALEN];
611extern int iwl_rxq_stop(struct iwl_priv *priv); 591extern int iwl_rxq_stop(struct iwl_priv *priv);
612extern void iwl_txq_ctx_stop(struct iwl_priv *priv); 592extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
613extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
614 struct iwl_frame *frame, u8 rate);
615extern void iwl4965_disable_events(struct iwl_priv *priv);
616
617extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
618extern int iwl_queue_space(const struct iwl_queue *q); 593extern int iwl_queue_space(const struct iwl_queue *q);
619static inline int iwl_queue_used(const struct iwl_queue *q, int i) 594static inline int iwl_queue_used(const struct iwl_queue *q, int i)
620{ 595{
@@ -637,12 +612,6 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
637 612
638struct iwl_priv; 613struct iwl_priv;
639 614
640/*
641 * Forward declare iwl-4965.c functions for iwl-base.c
642 */
643extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
644int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
645 u8 tid, int txq_id);
646 615
647/* Structures, enum, and defines specific to the 4965 */ 616/* Structures, enum, and defines specific to the 4965 */
648 617
@@ -657,11 +626,6 @@ struct iwl_kw {
657#define IWL_CHANNEL_WIDTH_20MHZ 0 626#define IWL_CHANNEL_WIDTH_20MHZ 0
658#define IWL_CHANNEL_WIDTH_40MHZ 1 627#define IWL_CHANNEL_WIDTH_40MHZ 1
659 628
660#define IWL_MIMO_PS_STATIC 0
661#define IWL_MIMO_PS_NONE 3
662#define IWL_MIMO_PS_DYNAMIC 1
663#define IWL_MIMO_PS_INVALID 2
664
665#define IWL_OPERATION_MODE_AUTO 0 629#define IWL_OPERATION_MODE_AUTO 0
666#define IWL_OPERATION_MODE_HT_ONLY 1 630#define IWL_OPERATION_MODE_HT_ONLY 1
667#define IWL_OPERATION_MODE_MIXED 2 631#define IWL_OPERATION_MODE_MIXED 2
@@ -672,18 +636,6 @@ struct iwl_kw {
672 636
673#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 637#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
674 638
675struct iwl4965_lq_mngr {
676 spinlock_t lock;
677 s32 max_window_size;
678 s32 *expected_tpt;
679 u8 *next_higher_rate;
680 u8 *next_lower_rate;
681 unsigned long stamp;
682 unsigned long stamp_last;
683 u32 flush_time;
684 u32 tx_packets;
685};
686
687/* Sensitivity and chain noise calibration */ 639/* Sensitivity and chain noise calibration */
688#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) 640#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
689#define INITIALIZATION_VALUE 0xFFFF 641#define INITIALIZATION_VALUE 0xFFFF
@@ -728,8 +680,9 @@ enum iwl4965_false_alarm_state {
728 680
729enum iwl4965_chain_noise_state { 681enum iwl4965_chain_noise_state {
730 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ 682 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
731 IWL_CHAIN_NOISE_ACCUMULATE = 1, 683 IWL_CHAIN_NOISE_ACCUMULATE,
732 IWL_CHAIN_NOISE_CALIBRATED = 2, 684 IWL_CHAIN_NOISE_CALIBRATED,
685 IWL_CHAIN_NOISE_DONE,
733}; 686};
734 687
735enum iwl4965_calib_enabled_state { 688enum iwl4965_calib_enabled_state {
@@ -746,13 +699,10 @@ struct statistics_general_data {
746 u32 beacon_energy_c; 699 u32 beacon_energy_c;
747}; 700};
748 701
749struct iwl_calib_results { 702/* Opaque calibration results */
750 void *tx_iq_res; 703struct iwl_calib_result {
751 void *tx_iq_perd_res; 704 void *buf;
752 void *lo_res; 705 size_t buf_len;
753 u32 tx_iq_res_len;
754 u32 tx_iq_perd_res_len;
755 u32 lo_res_len;
756}; 706};
757 707
758enum ucode_type { 708enum ucode_type {
@@ -790,17 +740,18 @@ struct iwl_sensitivity_data {
790 740
791/* Chain noise (differential Rx gain) calib data */ 741/* Chain noise (differential Rx gain) calib data */
792struct iwl_chain_noise_data { 742struct iwl_chain_noise_data {
793 u8 state; 743 u32 active_chains;
794 u16 beacon_count;
795 u32 chain_noise_a; 744 u32 chain_noise_a;
796 u32 chain_noise_b; 745 u32 chain_noise_b;
797 u32 chain_noise_c; 746 u32 chain_noise_c;
798 u32 chain_signal_a; 747 u32 chain_signal_a;
799 u32 chain_signal_b; 748 u32 chain_signal_b;
800 u32 chain_signal_c; 749 u32 chain_signal_c;
750 u16 beacon_count;
801 u8 disconn_array[NUM_RX_CHAINS]; 751 u8 disconn_array[NUM_RX_CHAINS];
802 u8 delta_gain_code[NUM_RX_CHAINS]; 752 u8 delta_gain_code[NUM_RX_CHAINS];
803 u8 radio_write; 753 u8 radio_write;
754 u8 state;
804}; 755};
805 756
806#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ 757#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
@@ -814,6 +765,7 @@ enum {
814 765
815 766
816#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 767#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
768#define IWL_CALIB_MAX 3
817 769
818struct iwl_priv { 770struct iwl_priv {
819 771
@@ -829,7 +781,6 @@ struct iwl_priv {
829 781
830 enum ieee80211_band band; 782 enum ieee80211_band band;
831 int alloc_rxb_skb; 783 int alloc_rxb_skb;
832 bool add_radiotap;
833 784
834 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 785 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
835 struct iwl_rx_mem_buffer *rxb); 786 struct iwl_rx_mem_buffer *rxb);
@@ -858,7 +809,7 @@ struct iwl_priv {
858 s32 last_temperature; 809 s32 last_temperature;
859 810
860 /* init calibration results */ 811 /* init calibration results */
861 struct iwl_calib_results calib_results; 812 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
862 813
863 /* Scan related variables */ 814 /* Scan related variables */
864 unsigned long last_scan_jiffies; 815 unsigned long last_scan_jiffies;
@@ -940,9 +891,6 @@ struct iwl_priv {
940 u8 last_phy_res[100]; 891 u8 last_phy_res[100];
941 892
942 /* Rate scaling data */ 893 /* Rate scaling data */
943 struct iwl4965_lq_mngr lq_mngr;
944
945 /* Rate scaling data */
946 s8 data_retry_limit; 894 s8 data_retry_limit;
947 u8 retry_rate; 895 u8 retry_rate;
948 896
@@ -1006,7 +954,7 @@ struct iwl_priv {
1006 u8 *eeprom; 954 u8 *eeprom;
1007 struct iwl_eeprom_calib_info *calib_info; 955 struct iwl_eeprom_calib_info *calib_info;
1008 956
1009 enum ieee80211_if_types iw_mode; 957 enum nl80211_iftype iw_mode;
1010 958
1011 struct sk_buff *ibss_beacon; 959 struct sk_buff *ibss_beacon;
1012 960
@@ -1026,7 +974,6 @@ struct iwl_priv {
1026 * hardware */ 974 * hardware */
1027 u16 assoc_id; 975 u16 assoc_id;
1028 u16 assoc_capability; 976 u16 assoc_capability;
1029 u8 ps_mode;
1030 977
1031 struct iwl_qos_info qos_data; 978 struct iwl_qos_info qos_data;
1032 979
@@ -1048,6 +995,7 @@ struct iwl_priv {
1048 995
1049 struct tasklet_struct irq_tasklet; 996 struct tasklet_struct irq_tasklet;
1050 997
998 struct delayed_work set_power_save;
1051 struct delayed_work init_alive_start; 999 struct delayed_work init_alive_start;
1052 struct delayed_work alive_start; 1000 struct delayed_work alive_start;
1053 struct delayed_work scan_check; 1001 struct delayed_work scan_check;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 944642450d3d..cd11c0ca2991 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -287,6 +287,7 @@
287 287
288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) 288#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
289 289
290#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
290 291
291/** 292/**
292 * Transmit DMA Channel Control/Status Registers (TCSR) 293 * Transmit DMA Channel Control/Status Registers (TCSR)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 2eb03eea1908..8300f3d00a06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -120,8 +120,18 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
120 return 1; 120 return 1;
121 } 121 }
122 122
123 IWL_DEBUG_HC("back from %s (0x%08X)\n", 123#ifdef CONFIG_IWLWIFI_DEBUG
124 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 124 switch (cmd->hdr.cmd) {
125 case REPLY_TX_LINK_QUALITY_CMD:
126 case SENSITIVITY_CMD:
127 IWL_DEBUG_HC_DUMP("back from %s (0x%08X)\n",
128 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
129 break;
130 default:
131 IWL_DEBUG_HC("back from %s (0x%08X)\n",
132 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
133 }
134#endif
125 135
126 /* Let iwl_tx_complete free the response skb */ 136 /* Let iwl_tx_complete free the response skb */
127 return 1; 137 return 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 5bc3df432d2d..9740fcc1805e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -61,7 +61,7 @@
61 * 61 *
62 */ 62 */
63 63
64#define _iwl_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs)) 64#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
65#ifdef CONFIG_IWLWIFI_DEBUG 65#ifdef CONFIG_IWLWIFI_DEBUG
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, 66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val) 67 u32 ofs, u32 val)
@@ -75,7 +75,7 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val) 75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
76#endif 76#endif
77 77
78#define _iwl_read32(priv, ofs) readl((priv)->hw_base + (ofs)) 78#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
79#ifdef CONFIG_IWLWIFI_DEBUG 79#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{ 81{
@@ -155,28 +155,10 @@ static inline void __iwl_clear_bit(const char *f, u32 l,
155static inline int _iwl_grab_nic_access(struct iwl_priv *priv) 155static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
156{ 156{
157 int ret; 157 int ret;
158 u32 gp_ctl;
159
160#ifdef CONFIG_IWLWIFI_DEBUG 158#ifdef CONFIG_IWLWIFI_DEBUG
161 if (atomic_read(&priv->restrict_refcnt)) 159 if (atomic_read(&priv->restrict_refcnt))
162 return 0; 160 return 0;
163#endif 161#endif
164 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
165 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
166 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
167 "wakes up NIC\n");
168
169 /* 10 msec allows time for NIC to complete its data save */
170 gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL);
171 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
172 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
173 "gpctl = 0x%08x\n", gp_ctl);
174 mdelay(10);
175 } else
176 IWL_DEBUG_RF_KILL("power-down complete, "
177 "gpctl = 0x%08x\n", gp_ctl);
178 }
179
180 /* this bit wakes up the NIC */ 162 /* this bit wakes up the NIC */
181 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 163 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
182 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL, 164 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index a099c9e30e55..60a03d2d2d0e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -152,9 +152,10 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
152/* initialize to default */ 152/* initialize to default */
153static int iwl_power_init_handle(struct iwl_priv *priv) 153static int iwl_power_init_handle(struct iwl_priv *priv)
154{ 154{
155 int ret = 0, i;
156 struct iwl_power_mgr *pow_data; 155 struct iwl_power_mgr *pow_data;
157 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX; 156 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
157 struct iwl_powertable_cmd *cmd;
158 int i;
158 u16 pci_pm; 159 u16 pci_pm;
159 160
160 IWL_DEBUG_POWER("Initialize power \n"); 161 IWL_DEBUG_POWER("Initialize power \n");
@@ -167,25 +168,19 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
167 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); 168 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
168 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size); 169 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
169 170
170 ret = pci_read_config_word(priv->pci_dev, 171 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &pci_pm);
171 PCI_LINK_CTRL, &pci_pm);
172 if (ret != 0)
173 return 0;
174 else {
175 struct iwl_powertable_cmd *cmd;
176 172
177 IWL_DEBUG_POWER("adjust power command flags\n"); 173 IWL_DEBUG_POWER("adjust power command flags\n");
178 174
179 for (i = 0; i < IWL_POWER_MAX; i++) { 175 for (i = 0; i < IWL_POWER_MAX; i++) {
180 cmd = &pow_data->pwr_range_0[i].cmd; 176 cmd = &pow_data->pwr_range_0[i].cmd;
181 177
182 if (pci_pm & 0x1) 178 if (pci_pm & PCI_CFG_LINK_CTRL_VAL_L0S_EN)
183 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 179 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
184 else 180 else
185 cmd->flags |= IWL_POWER_PCI_PM_MSK; 181 cmd->flags |= IWL_POWER_PCI_PM_MSK;
186 }
187 } 182 }
188 return ret; 183 return 0;
189} 184}
190 185
191/* adjust power command according to dtim period and power level*/ 186/* adjust power command according to dtim period and power level*/
@@ -255,17 +250,26 @@ static int iwl_update_power_command(struct iwl_priv *priv,
255 250
256 251
257/* 252/*
258 * calucaute the final power mode index 253 * compute the final power mode index
259 */ 254 */
260int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh) 255int iwl_power_update_mode(struct iwl_priv *priv, bool force)
261{ 256{
262 struct iwl_power_mgr *setting = &(priv->power_data); 257 struct iwl_power_mgr *setting = &(priv->power_data);
263 int ret = 0; 258 int ret = 0;
264 u16 uninitialized_var(final_mode); 259 u16 uninitialized_var(final_mode);
265 260
266 /* If on battery, set to 3, 261 /* Don't update the RX chain when chain noise calibration is running */
267 * if plugged into AC power, set to CAM ("continuously aware mode"), 262 if (priv->chain_noise_data.state != IWL_CHAIN_NOISE_DONE &&
268 * else user level */ 263 priv->chain_noise_data.state != IWL_CHAIN_NOISE_ALIVE) {
264 IWL_DEBUG_POWER("Cannot update the power, chain noise "
265 "calibration running: %d\n",
266 priv->chain_noise_data.state);
267 return -EAGAIN;
268 }
269
270 /* If on battery, set to 3,
271 * if plugged into AC power, set to CAM ("continuously aware mode"),
272 * else user level */
269 273
270 switch (setting->system_power_setting) { 274 switch (setting->system_power_setting) {
271 case IWL_POWER_SYS_AUTO: 275 case IWL_POWER_SYS_AUTO:
@@ -286,11 +290,11 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
286 final_mode = setting->critical_power_setting; 290 final_mode = setting->critical_power_setting;
287 291
288 /* driver only support CAM for non STA network */ 292 /* driver only support CAM for non STA network */
289 if (priv->iw_mode != IEEE80211_IF_TYPE_STA) 293 if (priv->iw_mode != NL80211_IFTYPE_STATION)
290 final_mode = IWL_POWER_MODE_CAM; 294 final_mode = IWL_POWER_MODE_CAM;
291 295
292 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 296 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
293 ((setting->power_mode != final_mode) || refresh)) { 297 ((setting->power_mode != final_mode) || force)) {
294 struct iwl_powertable_cmd cmd; 298 struct iwl_powertable_cmd cmd;
295 299
296 if (final_mode != IWL_POWER_MODE_CAM) 300 if (final_mode != IWL_POWER_MODE_CAM)
@@ -324,7 +328,7 @@ EXPORT_SYMBOL(iwl_power_update_mode);
324 * this will be usefull for rate scale to disable PM during heavy 328 * this will be usefull for rate scale to disable PM during heavy
325 * Tx/Rx activities 329 * Tx/Rx activities
326 */ 330 */
327int iwl_power_disable_management(struct iwl_priv *priv) 331int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
328{ 332{
329 u16 prev_mode; 333 u16 prev_mode;
330 int ret = 0; 334 int ret = 0;
@@ -337,6 +341,11 @@ int iwl_power_disable_management(struct iwl_priv *priv)
337 ret = iwl_power_update_mode(priv, 0); 341 ret = iwl_power_update_mode(priv, 0);
338 priv->power_data.power_disabled = 1; 342 priv->power_data.power_disabled = 1;
339 priv->power_data.user_power_setting = prev_mode; 343 priv->power_data.user_power_setting = prev_mode;
344 cancel_delayed_work(&priv->set_power_save);
345 if (ms)
346 queue_delayed_work(priv->workqueue, &priv->set_power_save,
347 msecs_to_jiffies(ms));
348
340 349
341 return ret; 350 return ret;
342} 351}
@@ -359,35 +368,26 @@ EXPORT_SYMBOL(iwl_power_enable_management);
359/* set user_power_setting */ 368/* set user_power_setting */
360int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode) 369int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
361{ 370{
362 int ret = 0;
363
364 if (mode > IWL_POWER_LIMIT) 371 if (mode > IWL_POWER_LIMIT)
365 return -EINVAL; 372 return -EINVAL;
366 373
367 priv->power_data.user_power_setting = mode; 374 priv->power_data.user_power_setting = mode;
368 375
369 ret = iwl_power_update_mode(priv, 0); 376 return iwl_power_update_mode(priv, 0);
370
371 return ret;
372} 377}
373EXPORT_SYMBOL(iwl_power_set_user_mode); 378EXPORT_SYMBOL(iwl_power_set_user_mode);
374 379
375
376/* set system_power_setting. This should be set by over all 380/* set system_power_setting. This should be set by over all
377 * PM application. 381 * PM application.
378 */ 382 */
379int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode) 383int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
380{ 384{
381 int ret = 0;
382
383 if (mode > IWL_POWER_LIMIT) 385 if (mode > IWL_POWER_LIMIT)
384 return -EINVAL; 386 return -EINVAL;
385 387
386 priv->power_data.system_power_setting = mode; 388 priv->power_data.system_power_setting = mode;
387 389
388 ret = iwl_power_update_mode(priv, 0); 390 return iwl_power_update_mode(priv, 0);
389
390 return ret;
391} 391}
392EXPORT_SYMBOL(iwl_power_set_system_mode); 392EXPORT_SYMBOL(iwl_power_set_system_mode);
393 393
@@ -431,3 +431,35 @@ int iwl_power_temperature_change(struct iwl_priv *priv)
431 return ret; 431 return ret;
432} 432}
433EXPORT_SYMBOL(iwl_power_temperature_change); 433EXPORT_SYMBOL(iwl_power_temperature_change);
434
435static void iwl_bg_set_power_save(struct work_struct *work)
436{
437 struct iwl_priv *priv = container_of(work,
438 struct iwl_priv, set_power_save.work);
439 IWL_DEBUG(IWL_DL_STATE, "update power\n");
440
441 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
442 return;
443
444 mutex_lock(&priv->mutex);
445
446 /* on starting association we disable power managment
447 * until association, if association failed then this
448 * timer will expire and enable PM again.
449 */
450 if (!iwl_is_associated(priv))
451 iwl_power_enable_management(priv);
452
453 mutex_unlock(&priv->mutex);
454}
455void iwl_setup_power_deferred_work(struct iwl_priv *priv)
456{
457 INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
458}
459EXPORT_SYMBOL(iwl_setup_power_deferred_work);
460
461void iwl_power_cancel_timeout(struct iwl_priv *priv)
462{
463 cancel_delayed_work(&priv->set_power_save);
464}
465EXPORT_SYMBOL(iwl_power_cancel_timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index abcbbf96a84e..df484a90ae64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -72,14 +72,16 @@ struct iwl_power_mgr {
72 /* final power level that used to calculate final power command */ 72 /* final power level that used to calculate final power command */
73 u8 power_mode; 73 u8 power_mode;
74 u8 user_power_setting; /* set by user through mac80211 or sysfs */ 74 u8 user_power_setting; /* set by user through mac80211 or sysfs */
75 u8 system_power_setting; /* set by kernel syatem tools */ 75 u8 system_power_setting; /* set by kernel system tools */
76 u8 critical_power_setting; /* set if driver over heated */ 76 u8 critical_power_setting; /* set if driver over heated */
77 u8 is_battery_active; /* DC/AC power */ 77 u8 is_battery_active; /* DC/AC power */
78 u8 power_disabled; /* flag to disable using power saving level */ 78 u8 power_disabled; /* flag to disable using power saving level */
79}; 79};
80 80
81int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh); 81void iwl_setup_power_deferred_work(struct iwl_priv *priv);
82int iwl_power_disable_management(struct iwl_priv *priv); 82void iwl_power_cancel_timeout(struct iwl_priv *priv);
83int iwl_power_update_mode(struct iwl_priv *priv, bool force);
84int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
83int iwl_power_enable_management(struct iwl_priv *priv); 85int iwl_power_enable_management(struct iwl_priv *priv);
84int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode); 86int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
85int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode); 87int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index f3f6ea49fdd2..38b2946b1d81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -789,107 +789,6 @@ static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
789} 789}
790#endif 790#endif
791 791
792static void iwl_add_radiotap(struct iwl_priv *priv,
793 struct sk_buff *skb,
794 struct iwl_rx_phy_res *rx_start,
795 struct ieee80211_rx_status *stats,
796 u32 ampdu_status)
797{
798 s8 signal = stats->signal;
799 s8 noise = 0;
800 int rate = stats->rate_idx;
801 u64 tsf = stats->mactime;
802 __le16 antenna;
803 __le16 phy_flags_hw = rx_start->phy_flags;
804 struct iwl4965_rt_rx_hdr {
805 struct ieee80211_radiotap_header rt_hdr;
806 __le64 rt_tsf; /* TSF */
807 u8 rt_flags; /* radiotap packet flags */
808 u8 rt_rate; /* rate in 500kb/s */
809 __le16 rt_channelMHz; /* channel in MHz */
810 __le16 rt_chbitmask; /* channel bitfield */
811 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
812 s8 rt_dbmnoise;
813 u8 rt_antenna; /* antenna number */
814 } __attribute__ ((packed)) *iwl4965_rt;
815
816 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
817 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
818 if (net_ratelimit())
819 printk(KERN_ERR "not enough headroom [%d] for "
820 "radiotap head [%zd]\n",
821 skb_headroom(skb), sizeof(*iwl4965_rt));
822 return;
823 }
824
825 /* put radiotap header in front of 802.11 header and data */
826 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
827
828 /* initialise radiotap header */
829 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
830 iwl4965_rt->rt_hdr.it_pad = 0;
831
832 /* total header + data */
833 put_unaligned_le16(sizeof(*iwl4965_rt), &iwl4965_rt->rt_hdr.it_len);
834
835 /* Indicate all the fields we add to the radiotap header */
836 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
837 (1 << IEEE80211_RADIOTAP_FLAGS) |
838 (1 << IEEE80211_RADIOTAP_RATE) |
839 (1 << IEEE80211_RADIOTAP_CHANNEL) |
840 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
841 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
842 (1 << IEEE80211_RADIOTAP_ANTENNA),
843 &(iwl4965_rt->rt_hdr.it_present));
844
845 /* Zero the flags, we'll add to them as we go */
846 iwl4965_rt->rt_flags = 0;
847
848 put_unaligned_le64(tsf, &iwl4965_rt->rt_tsf);
849
850 iwl4965_rt->rt_dbmsignal = signal;
851 iwl4965_rt->rt_dbmnoise = noise;
852
853 /* Convert the channel frequency and set the flags */
854 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
855 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
856 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
857 &iwl4965_rt->rt_chbitmask);
858 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
859 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
860 &iwl4965_rt->rt_chbitmask);
861 else /* 802.11g */
862 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
863 &iwl4965_rt->rt_chbitmask);
864
865 if (rate == -1)
866 iwl4965_rt->rt_rate = 0;
867 else
868 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
869
870 /*
871 * "antenna number"
872 *
873 * It seems that the antenna field in the phy flags value
874 * is actually a bitfield. This is undefined by radiotap,
875 * it wants an actual antenna number but I always get "7"
876 * for most legacy frames I receive indicating that the
877 * same frame was received on all three RX chains.
878 *
879 * I think this field should be removed in favour of a
880 * new 802.11n radiotap field "RX chains" that is defined
881 * as a bitmask.
882 */
883 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
884 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
885
886 /* set the preamble flag if appropriate */
887 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
888 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
889
890 stats->flag |= RX_FLAG_RADIOTAP;
891}
892
893static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len) 792static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
894{ 793{
895 /* 0 - mgmt, 1 - cnt, 2 - data */ 794 /* 0 - mgmt, 1 - cnt, 2 - data */
@@ -1074,9 +973,6 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1074 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 973 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1075 return; 974 return;
1076 975
1077 if (priv->add_radiotap)
1078 iwl_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
1079
1080 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len); 976 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
1081 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); 977 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
1082 priv->alloc_rxb_skb--; 978 priv->alloc_rxb_skb--;
@@ -1130,10 +1026,10 @@ static int iwl_is_network_packet(struct iwl_priv *priv,
1130 /* Filter incoming packets to determine if they are targeted toward 1026 /* Filter incoming packets to determine if they are targeted toward
1131 * this network, discarding packets coming from ourselves */ 1027 * this network, discarding packets coming from ourselves */
1132 switch (priv->iw_mode) { 1028 switch (priv->iw_mode) {
1133 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ 1029 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
1134 /* packets to our IBSS update information */ 1030 /* packets to our IBSS update information */
1135 return !compare_ether_addr(header->addr3, priv->bssid); 1031 return !compare_ether_addr(header->addr3, priv->bssid);
1136 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ 1032 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
1137 /* packets to our IBSS update information */ 1033 /* packets to our IBSS update information */
1138 return !compare_ether_addr(header->addr2, priv->bssid); 1034 return !compare_ether_addr(header->addr2, priv->bssid);
1139 default: 1035 default:
@@ -1171,9 +1067,11 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1171 if (rx_status.band == IEEE80211_BAND_5GHZ) 1067 if (rx_status.band == IEEE80211_BAND_5GHZ)
1172 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; 1068 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
1173 1069
1174 rx_status.antenna = 0;
1175 rx_status.flag = 0; 1070 rx_status.flag = 0;
1176 rx_status.flag |= RX_FLAG_TSFT; 1071
1072 /* TSF isn't reliable. In order to allow smooth user experience,
1073 * this W/A doesn't propagate it to the mac80211 */
1074 /*rx_status.flag |= RX_FLAG_TSFT;*/
1177 1075
1178 if ((unlikely(rx_start->cfg_phy_cnt > 20))) { 1076 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
1179 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n", 1077 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
@@ -1250,8 +1148,28 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1250 rx_status.signal, rx_status.noise, rx_status.signal, 1148 rx_status.signal, rx_status.noise, rx_status.signal,
1251 (unsigned long long)rx_status.mactime); 1149 (unsigned long long)rx_status.mactime);
1252 1150
1151 /*
1152 * "antenna number"
1153 *
1154 * It seems that the antenna field in the phy flags value
1155 * is actually a bitfield. This is undefined by radiotap,
1156 * it wants an actual antenna number but I always get "7"
1157 * for most legacy frames I receive indicating that the
1158 * same frame was received on all three RX chains.
1159 *
1160 * I think this field should be removed in favour of a
1161 * new 802.11n radiotap field "RX chains" that is defined
1162 * as a bitmask.
1163 */
1164 rx_status.antenna = le16_to_cpu(rx_start->phy_flags &
1165 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
1166
1167 /* set the preamble flag if appropriate */
1168 if (rx_start->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1169 rx_status.flag |= RX_FLAG_SHORTPRE;
1170
1253 /* Take shortcut when only in monitor mode */ 1171 /* Take shortcut when only in monitor mode */
1254 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 1172 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
1255 iwl_pass_packet_to_mac80211(priv, include_phy, 1173 iwl_pass_packet_to_mac80211(priv, include_phy,
1256 rxb, &rx_status); 1174 rxb, &rx_status);
1257 return; 1175 return;
@@ -1268,7 +1186,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1268 switch (fc & IEEE80211_FCTL_FTYPE) { 1186 switch (fc & IEEE80211_FCTL_FTYPE) {
1269 case IEEE80211_FTYPE_MGMT: 1187 case IEEE80211_FTYPE_MGMT:
1270 case IEEE80211_FTYPE_DATA: 1188 case IEEE80211_FTYPE_DATA:
1271 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 1189 if (priv->iw_mode == NL80211_IFTYPE_AP)
1272 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, 1190 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
1273 header->addr2); 1191 header->addr2);
1274 /* fall through */ 1192 /* fall through */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 9bb6adb28b73..09c264be0496 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -88,7 +88,7 @@ static int iwl_is_empty_essid(const char *essid, int essid_len)
88 88
89 89
90 90
91const char *iwl_escape_essid(const char *essid, u8 essid_len) 91static const char *iwl_escape_essid(const char *essid, u8 essid_len)
92{ 92{
93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
94 const char *s = essid; 94 const char *s = essid;
@@ -111,7 +111,6 @@ const char *iwl_escape_essid(const char *essid, u8 essid_len)
111 *d = '\0'; 111 *d = '\0';
112 return escaped; 112 return escaped;
113} 113}
114EXPORT_SYMBOL(iwl_escape_essid);
115 114
116/** 115/**
117 * iwl_scan_cancel - Cancel any currently executing HW scan 116 * iwl_scan_cancel - Cancel any currently executing HW scan
@@ -421,7 +420,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
421 else 420 else
422 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; 421 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
423 422
424 if ((scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) && n_probes) 423 if (n_probes)
425 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 424 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
426 425
427 scan_ch->active_dwell = cpu_to_le16(active_dwell); 426 scan_ch->active_dwell = cpu_to_le16(active_dwell);
@@ -464,7 +463,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
464 463
465int iwl_scan_initiate(struct iwl_priv *priv) 464int iwl_scan_initiate(struct iwl_priv *priv)
466{ 465{
467 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 466 if (priv->iw_mode == NL80211_IFTYPE_AP) {
468 IWL_ERROR("APs don't scan.\n"); 467 IWL_ERROR("APs don't scan.\n");
469 return 0; 468 return 0;
470 } 469 }
@@ -869,7 +868,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
869 868
870 scan->tx_cmd.len = cpu_to_le16(cmd_len); 869 scan->tx_cmd.len = cpu_to_le16(cmd_len);
871 870
872 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 871 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
873 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 872 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
874 873
875 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 874 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6283a3a707f5..61797f3f8d5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -47,8 +47,8 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
47 unsigned long flags; 47 unsigned long flags;
48 DECLARE_MAC_BUF(mac); 48 DECLARE_MAC_BUF(mac);
49 49
50 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) || 50 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
51 (priv->iw_mode == IEEE80211_IF_TYPE_AP)) 51 (priv->iw_mode == NL80211_IFTYPE_AP))
52 start = IWL_STA_ID; 52 start = IWL_STA_ID;
53 53
54 if (is_broadcast_ether_addr(addr)) 54 if (is_broadcast_ether_addr(addr))
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(iwl_find_station);
74 74
75int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) 75int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
76{ 76{
77 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 77 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
78 return IWL_AP_ID; 78 return IWL_AP_ID;
79 } else { 79 } else {
80 u8 *da = ieee80211_get_DA(hdr); 80 u8 *da = ieee80211_get_DA(hdr);
@@ -191,20 +191,20 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
191 if (!sta_ht_inf || !sta_ht_inf->ht_supported) 191 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
192 goto done; 192 goto done;
193 193
194 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2; 194 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
195 195
196 sta_flags = priv->stations[index].sta.station_flags; 196 sta_flags = priv->stations[index].sta.station_flags;
197 197
198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
199 199
200 switch (mimo_ps_mode) { 200 switch (mimo_ps_mode) {
201 case WLAN_HT_CAP_MIMO_PS_STATIC: 201 case WLAN_HT_CAP_SM_PS_STATIC:
202 sta_flags |= STA_FLG_MIMO_DIS_MSK; 202 sta_flags |= STA_FLG_MIMO_DIS_MSK;
203 break; 203 break;
204 case WLAN_HT_CAP_MIMO_PS_DYNAMIC: 204 case WLAN_HT_CAP_SM_PS_DYNAMIC:
205 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 205 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
206 break; 206 break;
207 case WLAN_HT_CAP_MIMO_PS_DISABLED: 207 case WLAN_HT_CAP_SM_PS_DISABLED:
208 break; 208 break;
209 default: 209 default:
210 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); 210 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
@@ -286,7 +286,7 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
286 286
287 /* BCAST station and IBSS stations do not work in HT mode */ 287 /* BCAST station and IBSS stations do not work in HT mode */
288 if (sta_id != priv->hw_params.bcast_sta_id && 288 if (sta_id != priv->hw_params.bcast_sta_id &&
289 priv->iw_mode != IEEE80211_IF_TYPE_IBSS) 289 priv->iw_mode != NL80211_IFTYPE_ADHOC)
290 iwl_set_ht_add_station(priv, sta_id, ht_info); 290 iwl_set_ht_add_station(priv, sta_id, ht_info);
291 291
292 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 292 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -817,7 +817,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
817 }; 817 };
818 818
819 if ((lq->sta_id == 0xFF) && 819 if ((lq->sta_id == 0xFF) &&
820 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 820 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
821 return -EINVAL; 821 return -EINVAL;
822 822
823 if (lq->sta_id == 0xFF) 823 if (lq->sta_id == 0xFF)
@@ -904,7 +904,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
904 904
905 if ((is_ap) && 905 if ((is_ap) &&
906 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 906 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
907 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) 907 (priv->iw_mode == NL80211_IFTYPE_STATION))
908 sta_id = iwl_add_station_flags(priv, addr, is_ap, 908 sta_id = iwl_add_station_flags(priv, addr, is_ap,
909 0, cur_ht_config); 909 0, cur_ht_config);
910 else 910 else
@@ -938,11 +938,11 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
938 938
939 /* If we are a client station in a BSS network, use the special 939 /* If we are a client station in a BSS network, use the special
940 * AP station entry (that's the only station we communicate with) */ 940 * AP station entry (that's the only station we communicate with) */
941 case IEEE80211_IF_TYPE_STA: 941 case NL80211_IFTYPE_STATION:
942 return IWL_AP_ID; 942 return IWL_AP_ID;
943 943
944 /* If we are an AP, then find the station, or use BCAST */ 944 /* If we are an AP, then find the station, or use BCAST */
945 case IEEE80211_IF_TYPE_AP: 945 case NL80211_IFTYPE_AP:
946 sta_id = iwl_find_station(priv, hdr->addr1); 946 sta_id = iwl_find_station(priv, hdr->addr1);
947 if (sta_id != IWL_INVALID_STATION) 947 if (sta_id != IWL_INVALID_STATION)
948 return sta_id; 948 return sta_id;
@@ -950,7 +950,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
950 950
951 /* If this frame is going out to an IBSS network, find the station, 951 /* If this frame is going out to an IBSS network, find the station,
952 * or create a new station table entry */ 952 * or create a new station table entry */
953 case IEEE80211_IF_TYPE_IBSS: 953 case NL80211_IFTYPE_ADHOC:
954 sta_id = iwl_find_station(priv, hdr->addr1); 954 sta_id = iwl_find_station(priv, hdr->addr1);
955 if (sta_id != IWL_INVALID_STATION) 955 if (sta_id != IWL_INVALID_STATION)
956 return sta_id; 956 return sta_id;
@@ -968,6 +968,11 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
968 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 968 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
969 return priv->hw_params.bcast_sta_id; 969 return priv->hw_params.bcast_sta_id;
970 970
971 /* If we are in monitor mode, use BCAST. This is required for
972 * packet injection. */
973 case NL80211_IFTYPE_MONITOR:
974 return priv->hw_params.bcast_sta_id;
975
971 default: 976 default:
972 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 977 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
973 return priv->hw_params.bcast_sta_id; 978 return priv->hw_params.bcast_sta_id;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index d82823b5c8ab..e9feca4033f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -63,7 +63,7 @@ static const u16 default_tid_to_tx_fifo[] = {
63 * Does NOT advance any TFD circular buffer read/write indexes 63 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer) 64 * Does NOT free the TFD itself (which is within circular buffer)
65 */ 65 */
66int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 66static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
67{ 67{
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; 68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
@@ -115,10 +115,8 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
115 } 115 }
116 return 0; 116 return 0;
117} 117}
118EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
119 118
120 119static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
121int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
122 dma_addr_t addr, u16 len) 120 dma_addr_t addr, u16 len)
123{ 121{
124 int index, is_odd; 122 int index, is_odd;
@@ -126,7 +124,7 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
126 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 124 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
127 125
128 /* Each TFD can point to a maximum 20 Tx buffers */ 126 /* Each TFD can point to a maximum 20 Tx buffers */
129 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { 127 if (num_tbs >= MAX_NUM_OF_TBS) {
130 IWL_ERROR("Error can not send more than %d chunks\n", 128 IWL_ERROR("Error can not send more than %d chunks\n",
131 MAX_NUM_OF_TBS); 129 MAX_NUM_OF_TBS);
132 return -EINVAL; 130 return -EINVAL;
@@ -151,7 +149,6 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
151 149
152 return 0; 150 return 0;
153} 151}
154EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
155 152
156/** 153/**
157 * iwl_txq_update_write_ptr - Send new write index to hardware 154 * iwl_txq_update_write_ptr - Send new write index to hardware
@@ -402,12 +399,11 @@ static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
402/** 399/**
403 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue 400 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
404 */ 401 */
405static int iwl_tx_queue_init(struct iwl_priv *priv, 402static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
406 struct iwl_tx_queue *txq,
407 int slots_num, u32 txq_id) 403 int slots_num, u32 txq_id)
408{ 404{
409 int i, len; 405 int i, len;
410 int rc = 0; 406 int ret;
411 407
412 /* 408 /*
413 * Alloc buffer array for commands (Tx or other types of commands). 409 * Alloc buffer array for commands (Tx or other types of commands).
@@ -426,19 +422,16 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
426 continue; 422 continue;
427 } 423 }
428 424
429 txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA); 425 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
430 if (!txq->cmd[i]) 426 if (!txq->cmd[i])
431 return -ENOMEM; 427 goto err;
432 } 428 }
433 429
434 /* Alloc driver data array and TFD circular buffer */ 430 /* Alloc driver data array and TFD circular buffer */
435 rc = iwl_tx_queue_alloc(priv, txq, txq_id); 431 ret = iwl_tx_queue_alloc(priv, txq, txq_id);
436 if (rc) { 432 if (ret)
437 for (i = 0; i < slots_num; i++) 433 goto err;
438 kfree(txq->cmd[i]);
439 434
440 return -ENOMEM;
441 }
442 txq->need_update = 0; 435 txq->need_update = 0;
443 436
444 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 437 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -452,6 +445,17 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
452 iwl_hw_tx_queue_init(priv, txq); 445 iwl_hw_tx_queue_init(priv, txq);
453 446
454 return 0; 447 return 0;
448err:
449 for (i = 0; i < slots_num; i++) {
450 kfree(txq->cmd[i]);
451 txq->cmd[i] = NULL;
452 }
453
454 if (txq_id == IWL_CMD_QUEUE_NUM) {
455 kfree(txq->cmd[slots_num]);
456 txq->cmd[slots_num] = NULL;
457 }
458 return -ENOMEM;
455} 459}
456/** 460/**
457 * iwl_hw_txq_ctx_free - Free TXQ Context 461 * iwl_hw_txq_ctx_free - Free TXQ Context
@@ -471,7 +475,6 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
471} 475}
472EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 476EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
473 477
474
475/** 478/**
476 * iwl_txq_ctx_reset - Reset TX queue context 479 * iwl_txq_ctx_reset - Reset TX queue context
477 * Destroys all DMA structures and initialise them again 480 * Destroys all DMA structures and initialise them again
@@ -538,6 +541,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
538 error_kw: 541 error_kw:
539 return ret; 542 return ret;
540} 543}
544
541/** 545/**
542 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 546 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
543 */ 547 */
@@ -789,11 +793,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
789 goto drop_unlock; 793 goto drop_unlock;
790 } 794 }
791 795
792 if (!priv->vif) {
793 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
794 goto drop_unlock;
795 }
796
797 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == 796 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
798 IWL_INVALID_RATE) { 797 IWL_INVALID_RATE) {
799 IWL_ERROR("ERROR: No TX rate available.\n"); 798 IWL_ERROR("ERROR: No TX rate available.\n");
@@ -815,16 +814,18 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
815 814
816 /* drop all data frame if we are not associated */ 815 /* drop all data frame if we are not associated */
817 if (ieee80211_is_data(fc) && 816 if (ieee80211_is_data(fc) &&
818 (!iwl_is_associated(priv) || 817 (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
819 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || 818 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
820 !priv->assoc_station_added)) { 819 (!iwl_is_associated(priv) ||
820 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
821 !priv->assoc_station_added)) {
821 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); 822 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
822 goto drop_unlock; 823 goto drop_unlock;
823 } 824 }
824 825
825 spin_unlock_irqrestore(&priv->lock, flags); 826 spin_unlock_irqrestore(&priv->lock, flags);
826 827
827 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc)); 828 hdr_len = ieee80211_hdrlen(fc);
828 829
829 /* Find (or create) index into station table for destination station */ 830 /* Find (or create) index into station table for destination station */
830 sta_id = iwl_get_sta_id(priv, hdr); 831 sta_id = iwl_get_sta_id(priv, hdr);
@@ -842,7 +843,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
842 txq_id = swq_id; 843 txq_id = swq_id;
843 if (ieee80211_is_data_qos(fc)) { 844 if (ieee80211_is_data_qos(fc)) {
844 qc = ieee80211_get_qos_ctl(hdr); 845 qc = ieee80211_get_qos_ctl(hdr);
845 tid = qc[0] & 0xf; 846 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
846 seq_number = priv->stations[sta_id].tid[tid].seq_number; 847 seq_number = priv->stations[sta_id].tid[tid].seq_number;
847 seq_number &= IEEE80211_SCTL_SEQ; 848 seq_number &= IEEE80211_SCTL_SEQ;
848 hdr->seq_ctrl = hdr->seq_ctrl & 849 hdr->seq_ctrl = hdr->seq_ctrl &
@@ -1057,7 +1058,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1057 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | 1058 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1058 INDEX_TO_SEQ(q->write_ptr)); 1059 INDEX_TO_SEQ(q->write_ptr));
1059 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 1060 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1060 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 1061 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1061 len = (idx == TFD_CMD_SLOTS) ? 1062 len = (idx == TFD_CMD_SLOTS) ?
1062 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 1063 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1063 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len, 1064 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
@@ -1065,12 +1066,26 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1065 phys_addr += offsetof(struct iwl_cmd, hdr); 1066 phys_addr += offsetof(struct iwl_cmd, hdr);
1066 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1067 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1067 1068
1068 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 1069#ifdef CONFIG_IWLWIFI_DEBUG
1069 "%d bytes at %d[%d]:%d\n", 1070 switch (out_cmd->hdr.cmd) {
1070 get_cmd_string(out_cmd->hdr.cmd), 1071 case REPLY_TX_LINK_QUALITY_CMD:
1071 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1072 case SENSITIVITY_CMD:
1072 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 1073 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
1073 1074 "%d bytes at %d[%d]:%d\n",
1075 get_cmd_string(out_cmd->hdr.cmd),
1076 out_cmd->hdr.cmd,
1077 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1078 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1079 break;
1080 default:
1081 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1082 "%d bytes at %d[%d]:%d\n",
1083 get_cmd_string(out_cmd->hdr.cmd),
1084 out_cmd->hdr.cmd,
1085 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1086 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1087 }
1088#endif
1074 txq->need_update = 1; 1089 txq->need_update = 1;
1075 1090
1076 /* Set up entry in queue's byte count circular buffer */ 1091 /* Set up entry in queue's byte count circular buffer */
@@ -1178,8 +1193,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1178 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1193 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1179 int txq_id = SEQ_TO_QUEUE(sequence); 1194 int txq_id = SEQ_TO_QUEUE(sequence);
1180 int index = SEQ_TO_INDEX(sequence); 1195 int index = SEQ_TO_INDEX(sequence);
1181 int huge = sequence & SEQ_HUGE_FRAME;
1182 int cmd_index; 1196 int cmd_index;
1197 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1183 struct iwl_cmd *cmd; 1198 struct iwl_cmd *cmd;
1184 1199
1185 /* If a Tx command is being handled and it isn't in the actual 1200 /* If a Tx command is being handled and it isn't in the actual
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b775d5bab668..62b26befddc5 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1160,7 +1160,7 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1160 /* If we have set the ASSOC_MSK and we are in BSS mode then 1160 /* If we have set the ASSOC_MSK and we are in BSS mode then
1161 * add the IWL_AP_ID to the station rate table */ 1161 * add the IWL_AP_ID to the station rate table */
1162 if (iwl3945_is_associated(priv) && 1162 if (iwl3945_is_associated(priv) &&
1163 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) 1163 (priv->iw_mode == NL80211_IFTYPE_STATION))
1164 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0) 1164 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0)
1165 == IWL_INVALID_STATION) { 1165 == IWL_INVALID_STATION) {
1166 IWL_ERROR("Error adding AP address for transmit.\n"); 1166 IWL_ERROR("Error adding AP address for transmit.\n");
@@ -1447,8 +1447,8 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
1447{ 1447{
1448 1448
1449 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon || 1449 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
1450 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 1450 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1451 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 1451 (priv->iw_mode != NL80211_IFTYPE_AP)))
1452 return 0; 1452 return 0;
1453 1453
1454 if (priv->ibss_beacon->len > left) 1454 if (priv->ibss_beacon->len > left)
@@ -1746,14 +1746,14 @@ static void iwl3945_reset_qos(struct iwl3945_priv *priv)
1746 spin_lock_irqsave(&priv->lock, flags); 1746 spin_lock_irqsave(&priv->lock, flags);
1747 priv->qos_data.qos_active = 0; 1747 priv->qos_data.qos_active = 0;
1748 1748
1749 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { 1749 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1750 if (priv->qos_data.qos_enable) 1750 if (priv->qos_data.qos_enable)
1751 priv->qos_data.qos_active = 1; 1751 priv->qos_data.qos_active = 1;
1752 if (!(priv->active_rate & 0xfff0)) { 1752 if (!(priv->active_rate & 0xfff0)) {
1753 cw_min = 31; 1753 cw_min = 31;
1754 is_legacy = 1; 1754 is_legacy = 1;
1755 } 1755 }
1756 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 1756 } else if (priv->iw_mode == NL80211_IFTYPE_AP) {
1757 if (priv->qos_data.qos_enable) 1757 if (priv->qos_data.qos_enable)
1758 priv->qos_data.qos_active = 1; 1758 priv->qos_data.qos_active = 1;
1759 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { 1759 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -2120,7 +2120,7 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2120 beacon_int = priv->beacon_int; 2120 beacon_int = priv->beacon_int;
2121 spin_unlock_irqrestore(&priv->lock, flags); 2121 spin_unlock_irqrestore(&priv->lock, flags);
2122 2122
2123 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 2123 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2124 if (beacon_int == 0) { 2124 if (beacon_int == 0) {
2125 priv->rxon_timing.beacon_interval = cpu_to_le16(100); 2125 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2126 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); 2126 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -2156,7 +2156,7 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2156 2156
2157static int iwl3945_scan_initiate(struct iwl3945_priv *priv) 2157static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
2158{ 2158{
2159 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2159 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2160 IWL_ERROR("APs don't scan.\n"); 2160 IWL_ERROR("APs don't scan.\n");
2161 return 0; 2161 return 0;
2162 } 2162 }
@@ -2218,7 +2218,7 @@ static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2218 else 2218 else
2219 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2219 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2220 2220
2221 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2221 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2222 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2222 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2223 2223
2224 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 2224 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -2237,23 +2237,23 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2237 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 2237 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2238 2238
2239 switch (priv->iw_mode) { 2239 switch (priv->iw_mode) {
2240 case IEEE80211_IF_TYPE_AP: 2240 case NL80211_IFTYPE_AP:
2241 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 2241 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2242 break; 2242 break;
2243 2243
2244 case IEEE80211_IF_TYPE_STA: 2244 case NL80211_IFTYPE_STATION:
2245 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 2245 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2246 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 2246 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2247 break; 2247 break;
2248 2248
2249 case IEEE80211_IF_TYPE_IBSS: 2249 case NL80211_IFTYPE_ADHOC:
2250 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 2250 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2251 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 2251 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2252 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 2252 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2253 RXON_FILTER_ACCEPT_GRP_MSK; 2253 RXON_FILTER_ACCEPT_GRP_MSK;
2254 break; 2254 break;
2255 2255
2256 case IEEE80211_IF_TYPE_MNTR: 2256 case NL80211_IFTYPE_MONITOR:
2257 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; 2257 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2258 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 2258 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2259 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 2259 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -2282,7 +2282,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2282 * in some case A channels are all non IBSS 2282 * in some case A channels are all non IBSS
2283 * in this case force B/G channel 2283 * in this case force B/G channel
2284 */ 2284 */
2285 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2285 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2286 !(is_channel_ibss(ch_info))) 2286 !(is_channel_ibss(ch_info)))
2287 ch_info = &priv->channel_info[0]; 2287 ch_info = &priv->channel_info[0];
2288 2288
@@ -2302,7 +2302,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2302 2302
2303static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode) 2303static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2304{ 2304{
2305 if (mode == IEEE80211_IF_TYPE_IBSS) { 2305 if (mode == NL80211_IFTYPE_ADHOC) {
2306 const struct iwl3945_channel_info *ch_info; 2306 const struct iwl3945_channel_info *ch_info;
2307 2307
2308 ch_info = iwl3945_get_channel_info(priv, 2308 ch_info = iwl3945_get_channel_info(priv,
@@ -2469,11 +2469,11 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2469 2469
2470 /* If we are a client station in a BSS network, use the special 2470 /* If we are a client station in a BSS network, use the special
2471 * AP station entry (that's the only station we communicate with) */ 2471 * AP station entry (that's the only station we communicate with) */
2472 case IEEE80211_IF_TYPE_STA: 2472 case NL80211_IFTYPE_STATION:
2473 return IWL_AP_ID; 2473 return IWL_AP_ID;
2474 2474
2475 /* If we are an AP, then find the station, or use BCAST */ 2475 /* If we are an AP, then find the station, or use BCAST */
2476 case IEEE80211_IF_TYPE_AP: 2476 case NL80211_IFTYPE_AP:
2477 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 2477 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2478 if (sta_id != IWL_INVALID_STATION) 2478 if (sta_id != IWL_INVALID_STATION)
2479 return sta_id; 2479 return sta_id;
@@ -2481,7 +2481,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2481 2481
2482 /* If this frame is going out to an IBSS network, find the station, 2482 /* If this frame is going out to an IBSS network, find the station,
2483 * or create a new station table entry */ 2483 * or create a new station table entry */
2484 case IEEE80211_IF_TYPE_IBSS: { 2484 case NL80211_IFTYPE_ADHOC: {
2485 DECLARE_MAC_BUF(mac); 2485 DECLARE_MAC_BUF(mac);
2486 2486
2487 /* Create new station table entry */ 2487 /* Create new station table entry */
@@ -2502,7 +2502,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2502 } 2502 }
2503 /* If we are in monitor mode, use BCAST. This is required for 2503 /* If we are in monitor mode, use BCAST. This is required for
2504 * packet injection. */ 2504 * packet injection. */
2505 case IEEE80211_IF_TYPE_MNTR: 2505 case NL80211_IFTYPE_MONITOR:
2506 return priv->hw_setting.bcast_sta_id; 2506 return priv->hw_setting.bcast_sta_id;
2507 2507
2508 default: 2508 default:
@@ -2565,16 +2565,16 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2565 2565
2566 /* drop all data frame if we are not associated */ 2566 /* drop all data frame if we are not associated */
2567 if (ieee80211_is_data(fc) && 2567 if (ieee80211_is_data(fc) &&
2568 (priv->iw_mode != IEEE80211_IF_TYPE_MNTR) && /* packet injection */ 2568 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
2569 (!iwl3945_is_associated(priv) || 2569 (!iwl3945_is_associated(priv) ||
2570 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id))) { 2570 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
2571 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n"); 2571 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
2572 goto drop_unlock; 2572 goto drop_unlock;
2573 } 2573 }
2574 2574
2575 spin_unlock_irqrestore(&priv->lock, flags); 2575 spin_unlock_irqrestore(&priv->lock, flags);
2576 2576
2577 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc)); 2577 hdr_len = ieee80211_hdrlen(fc);
2578 2578
2579 /* Find (or create) index into station table for destination station */ 2579 /* Find (or create) index into station table for destination station */
2580 sta_id = iwl3945_get_sta_id(priv, hdr); 2580 sta_id = iwl3945_get_sta_id(priv, hdr);
@@ -2590,7 +2590,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2590 2590
2591 if (ieee80211_is_data_qos(fc)) { 2591 if (ieee80211_is_data_qos(fc)) {
2592 qc = ieee80211_get_qos_ctl(hdr); 2592 qc = ieee80211_get_qos_ctl(hdr);
2593 tid = qc[0] & 0xf; 2593 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
2594 seq_number = priv->stations[sta_id].tid[tid].seq_number & 2594 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2595 IEEE80211_SCTL_SEQ; 2595 IEEE80211_SCTL_SEQ;
2596 hdr->seq_ctrl = cpu_to_le16(seq_number) | 2596 hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2709,7 +2709,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2709 sizeof(out_cmd->cmd.tx)); 2709 sizeof(out_cmd->cmd.tx));
2710 2710
2711 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, 2711 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2712 ieee80211_get_hdrlen(le16_to_cpu(fc))); 2712 ieee80211_hdrlen(fc));
2713 2713
2714 /* Tell device the write index *just past* this latest filled TFD */ 2714 /* Tell device the write index *just past* this latest filled TFD */
2715 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 2715 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -2806,7 +2806,7 @@ static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2806 if (disable_radio) { 2806 if (disable_radio) {
2807 iwl3945_scan_cancel(priv); 2807 iwl3945_scan_cancel(priv);
2808 /* FIXME: This is a workaround for AP */ 2808 /* FIXME: This is a workaround for AP */
2809 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 2809 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2810 spin_lock_irqsave(&priv->lock, flags); 2810 spin_lock_irqsave(&priv->lock, flags);
2811 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET, 2811 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET,
2812 CSR_UCODE_SW_BIT_RFKILL); 2812 CSR_UCODE_SW_BIT_RFKILL);
@@ -3161,7 +3161,7 @@ static void iwl3945_rx_beacon_notif(struct iwl3945_priv *priv,
3161 le32_to_cpu(beacon->low_tsf), rate); 3161 le32_to_cpu(beacon->low_tsf), rate);
3162#endif 3162#endif
3163 3163
3164 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 3164 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
3165 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 3165 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3166 queue_work(priv->workqueue, &priv->beacon_update); 3166 queue_work(priv->workqueue, &priv->beacon_update);
3167} 3167}
@@ -4782,8 +4782,11 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4782/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 4782/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4783 * sending probe req. This should be set long enough to hear probe responses 4783 * sending probe req. This should be set long enough to hear probe responses
4784 * from more than one AP. */ 4784 * from more than one AP. */
4785#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ 4785#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
4786#define IWL_ACTIVE_DWELL_TIME_52 (10) 4786#define IWL_ACTIVE_DWELL_TIME_52 (20)
4787
4788#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4789#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
4787 4790
4788/* For faster active scanning, scan will move to the next channel if fewer than 4791/* For faster active scanning, scan will move to the next channel if fewer than
4789 * PLCP_QUIET_THRESH packets are heard on this channel within 4792 * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -4792,7 +4795,7 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4792 * no other traffic). 4795 * no other traffic).
4793 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ 4796 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4794#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ 4797#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
4795#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ 4798#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
4796 4799
4797/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 4800/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4798 * Must be set longer than active dwell time. 4801 * Must be set longer than active dwell time.
@@ -4802,19 +4805,23 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4802#define IWL_PASSIVE_DWELL_BASE (100) 4805#define IWL_PASSIVE_DWELL_BASE (100)
4803#define IWL_CHANNEL_TUNE_TIME 5 4806#define IWL_CHANNEL_TUNE_TIME 5
4804 4807
4808#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
4809
4805static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv, 4810static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
4806 enum ieee80211_band band) 4811 enum ieee80211_band band,
4812 u8 n_probes)
4807{ 4813{
4808 if (band == IEEE80211_BAND_5GHZ) 4814 if (band == IEEE80211_BAND_5GHZ)
4809 return IWL_ACTIVE_DWELL_TIME_52; 4815 return IWL_ACTIVE_DWELL_TIME_52 +
4816 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
4810 else 4817 else
4811 return IWL_ACTIVE_DWELL_TIME_24; 4818 return IWL_ACTIVE_DWELL_TIME_24 +
4819 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
4812} 4820}
4813 4821
4814static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv, 4822static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4815 enum ieee80211_band band) 4823 enum ieee80211_band band)
4816{ 4824{
4817 u16 active = iwl3945_get_active_dwell_time(priv, band);
4818 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 4825 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
4819 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 4826 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4820 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 4827 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
@@ -4829,15 +4836,12 @@ static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4829 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; 4836 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4830 } 4837 }
4831 4838
4832 if (passive <= active)
4833 passive = active + 1;
4834
4835 return passive; 4839 return passive;
4836} 4840}
4837 4841
4838static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, 4842static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4839 enum ieee80211_band band, 4843 enum ieee80211_band band,
4840 u8 is_active, u8 direct_mask, 4844 u8 is_active, u8 n_probes,
4841 struct iwl3945_scan_channel *scan_ch) 4845 struct iwl3945_scan_channel *scan_ch)
4842{ 4846{
4843 const struct ieee80211_channel *channels = NULL; 4847 const struct ieee80211_channel *channels = NULL;
@@ -4853,9 +4857,12 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4853 4857
4854 channels = sband->channels; 4858 channels = sband->channels;
4855 4859
4856 active_dwell = iwl3945_get_active_dwell_time(priv, band); 4860 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
4857 passive_dwell = iwl3945_get_passive_dwell_time(priv, band); 4861 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
4858 4862
4863 if (passive_dwell <= active_dwell)
4864 passive_dwell = active_dwell + 1;
4865
4859 for (i = 0, added = 0; i < sband->n_channels; i++) { 4866 for (i = 0, added = 0; i < sband->n_channels; i++) {
4860 if (channels[i].flags & IEEE80211_CHAN_DISABLED) 4867 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4861 continue; 4868 continue;
@@ -4875,8 +4882,8 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4875 else 4882 else
4876 scan_ch->type = 1; /* active */ 4883 scan_ch->type = 1; /* active */
4877 4884
4878 if (scan_ch->type & 1) 4885 if ((scan_ch->type & 1) && n_probes)
4879 scan_ch->type |= (direct_mask << 1); 4886 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4880 4887
4881 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4888 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4882 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4889 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
@@ -6052,7 +6059,7 @@ static void iwl3945_bg_set_monitor(struct work_struct *work)
6052 if (!iwl3945_is_ready(priv)) 6059 if (!iwl3945_is_ready(priv))
6053 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n"); 6060 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6054 else 6061 else
6055 if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0) 6062 if (iwl3945_set_mode(priv, NL80211_IFTYPE_MONITOR) != 0)
6056 IWL_ERROR("iwl3945_set_mode() failed\n"); 6063 IWL_ERROR("iwl3945_set_mode() failed\n");
6057 6064
6058 mutex_unlock(&priv->mutex); 6065 mutex_unlock(&priv->mutex);
@@ -6093,7 +6100,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6093 int rc = 0; 6100 int rc = 0;
6094 struct iwl3945_scan_cmd *scan; 6101 struct iwl3945_scan_cmd *scan;
6095 struct ieee80211_conf *conf = NULL; 6102 struct ieee80211_conf *conf = NULL;
6096 u8 direct_mask; 6103 u8 n_probes = 2;
6097 enum ieee80211_band band; 6104 enum ieee80211_band band;
6098 6105
6099 conf = ieee80211_get_hw_conf(priv->hw); 6106 conf = ieee80211_get_hw_conf(priv->hw);
@@ -6201,7 +6208,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6201 scan->direct_scan[0].len = priv->direct_ssid_len; 6208 scan->direct_scan[0].len = priv->direct_ssid_len;
6202 memcpy(scan->direct_scan[0].ssid, 6209 memcpy(scan->direct_scan[0].ssid,
6203 priv->direct_ssid, priv->direct_ssid_len); 6210 priv->direct_ssid, priv->direct_ssid_len);
6204 direct_mask = 1; 6211 n_probes++;
6205 } else if (!iwl3945_is_associated(priv) && priv->essid_len) { 6212 } else if (!iwl3945_is_associated(priv) && priv->essid_len) {
6206 IWL_DEBUG_SCAN 6213 IWL_DEBUG_SCAN
6207 ("Kicking off one direct scan for '%s' when not associated\n", 6214 ("Kicking off one direct scan for '%s' when not associated\n",
@@ -6209,11 +6216,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6209 scan->direct_scan[0].id = WLAN_EID_SSID; 6216 scan->direct_scan[0].id = WLAN_EID_SSID;
6210 scan->direct_scan[0].len = priv->essid_len; 6217 scan->direct_scan[0].len = priv->essid_len;
6211 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); 6218 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6212 direct_mask = 1; 6219 n_probes++;
6213 } else { 6220 } else
6214 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); 6221 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
6215 direct_mask = 0;
6216 }
6217 6222
6218 /* We don't build a direct scan probe request; the uCode will do 6223 /* We don't build a direct scan probe request; the uCode will do
6219 * that based on the direct_mask added to each channel entry */ 6224 * that based on the direct_mask added to each channel entry */
@@ -6243,21 +6248,13 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6243 /* select Rx antennas */ 6248 /* select Rx antennas */
6244 scan->flags |= iwl3945_get_antenna_flags(priv); 6249 scan->flags |= iwl3945_get_antenna_flags(priv);
6245 6250
6246 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 6251 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
6247 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 6252 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6248 6253
6249 if (direct_mask) 6254 scan->channel_count =
6250 scan->channel_count = 6255 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
6251 iwl3945_get_channels_for_scan( 6256 n_probes,
6252 priv, band, 1, /* active */ 6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6253 direct_mask,
6254 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6255 else
6256 scan->channel_count =
6257 iwl3945_get_channels_for_scan(
6258 priv, band, 0, /* passive */
6259 direct_mask,
6260 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6261 6258
6262 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6259 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6263 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6260 scan->channel_count * sizeof(struct iwl3945_scan_channel);
@@ -6320,16 +6317,13 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
6320 6317
6321#define IWL_DELAY_NEXT_SCAN (HZ*2) 6318#define IWL_DELAY_NEXT_SCAN (HZ*2)
6322 6319
6323static void iwl3945_bg_post_associate(struct work_struct *data) 6320static void iwl3945_post_associate(struct iwl3945_priv *priv)
6324{ 6321{
6325 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv,
6326 post_associate.work);
6327
6328 int rc = 0; 6322 int rc = 0;
6329 struct ieee80211_conf *conf = NULL; 6323 struct ieee80211_conf *conf = NULL;
6330 DECLARE_MAC_BUF(mac); 6324 DECLARE_MAC_BUF(mac);
6331 6325
6332 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6326 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6333 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 6327 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
6334 return; 6328 return;
6335 } 6329 }
@@ -6342,12 +6336,9 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6342 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 6336 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6343 return; 6337 return;
6344 6338
6345 mutex_lock(&priv->mutex); 6339 if (!priv->vif || !priv->is_open)
6346
6347 if (!priv->vif || !priv->is_open) {
6348 mutex_unlock(&priv->mutex);
6349 return; 6340 return;
6350 } 6341
6351 iwl3945_scan_cancel_timeout(priv, 200); 6342 iwl3945_scan_cancel_timeout(priv, 200);
6352 6343
6353 conf = ieee80211_get_hw_conf(priv->hw); 6344 conf = ieee80211_get_hw_conf(priv->hw);
@@ -6381,7 +6372,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6381 else 6372 else
6382 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 6373 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6383 6374
6384 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 6375 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
6385 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 6376 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6386 6377
6387 } 6378 }
@@ -6389,11 +6380,11 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6389 iwl3945_commit_rxon(priv); 6380 iwl3945_commit_rxon(priv);
6390 6381
6391 switch (priv->iw_mode) { 6382 switch (priv->iw_mode) {
6392 case IEEE80211_IF_TYPE_STA: 6383 case NL80211_IFTYPE_STATION:
6393 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 6384 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
6394 break; 6385 break;
6395 6386
6396 case IEEE80211_IF_TYPE_IBSS: 6387 case NL80211_IFTYPE_ADHOC:
6397 6388
6398 /* clear out the station table */ 6389 /* clear out the station table */
6399 iwl3945_clear_stations_table(priv); 6390 iwl3945_clear_stations_table(priv);
@@ -6419,7 +6410,6 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6419 6410
6420 /* we have just associated, don't start scan too early */ 6411 /* we have just associated, don't start scan too early */
6421 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 6412 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6422 mutex_unlock(&priv->mutex);
6423} 6413}
6424 6414
6425static void iwl3945_bg_abort_scan(struct work_struct *work) 6415static void iwl3945_bg_abort_scan(struct work_struct *work)
@@ -6567,7 +6557,6 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6567 */ 6557 */
6568 mutex_lock(&priv->mutex); 6558 mutex_lock(&priv->mutex);
6569 iwl3945_scan_cancel_timeout(priv, 100); 6559 iwl3945_scan_cancel_timeout(priv, 100);
6570 cancel_delayed_work(&priv->post_associate);
6571 mutex_unlock(&priv->mutex); 6560 mutex_unlock(&priv->mutex);
6572 } 6561 }
6573 6562
@@ -6650,8 +6639,6 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6650 mutex_lock(&priv->mutex); 6639 mutex_lock(&priv->mutex);
6651 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 6640 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
6652 6641
6653 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
6654
6655 if (!iwl3945_is_ready(priv)) { 6642 if (!iwl3945_is_ready(priv)) {
6656 IWL_DEBUG_MAC80211("leave - not ready\n"); 6643 IWL_DEBUG_MAC80211("leave - not ready\n");
6657 ret = -EIO; 6644 ret = -EIO;
@@ -6767,7 +6754,7 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6767 priv->staging_rxon.flags &= 6754 priv->staging_rxon.flags &=
6768 ~RXON_FLG_SHORT_SLOT_MSK; 6755 ~RXON_FLG_SHORT_SLOT_MSK;
6769 6756
6770 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 6757 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
6771 priv->staging_rxon.flags &= 6758 priv->staging_rxon.flags &=
6772 ~RXON_FLG_SHORT_SLOT_MSK; 6759 ~RXON_FLG_SHORT_SLOT_MSK;
6773 } 6760 }
@@ -6804,7 +6791,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6804 } 6791 }
6805 6792
6806 /* handle this temporarily here */ 6793 /* handle this temporarily here */
6807 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 6794 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
6808 conf->changed & IEEE80211_IFCC_BEACON) { 6795 conf->changed & IEEE80211_IFCC_BEACON) {
6809 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 6796 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6810 if (!beacon) 6797 if (!beacon)
@@ -6816,7 +6803,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6816 6803
6817 /* XXX: this MUST use conf->mac_addr */ 6804 /* XXX: this MUST use conf->mac_addr */
6818 6805
6819 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 6806 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
6820 (!conf->ssid_len)) { 6807 (!conf->ssid_len)) {
6821 IWL_DEBUG_MAC80211 6808 IWL_DEBUG_MAC80211
6822 ("Leaving in AP mode because HostAPD is not ready.\n"); 6809 ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -6839,7 +6826,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6839 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 6826 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
6840 */ 6827 */
6841 6828
6842 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6829 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6843 if (!conf->bssid) { 6830 if (!conf->bssid) {
6844 conf->bssid = priv->mac_addr; 6831 conf->bssid = priv->mac_addr;
6845 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 6832 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -6874,11 +6861,11 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6874 * to verify) - jpk */ 6861 * to verify) - jpk */
6875 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 6862 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
6876 6863
6877 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 6864 if (priv->iw_mode == NL80211_IFTYPE_AP)
6878 iwl3945_config_ap(priv); 6865 iwl3945_config_ap(priv);
6879 else { 6866 else {
6880 rc = iwl3945_commit_rxon(priv); 6867 rc = iwl3945_commit_rxon(priv);
6881 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 6868 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
6882 iwl3945_add_station(priv, 6869 iwl3945_add_station(priv,
6883 priv->active_rxon.bssid_addr, 1, 0); 6870 priv->active_rxon.bssid_addr, 1, 0);
6884 } 6871 }
@@ -6914,7 +6901,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6914 6901
6915 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 6902 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
6916 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 6903 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
6917 IEEE80211_IF_TYPE_MNTR, 6904 NL80211_IFTYPE_MONITOR,
6918 changed_flags, *total_flags); 6905 changed_flags, *total_flags);
6919 /* queue work 'cuz mac80211 is holding a lock which 6906 /* queue work 'cuz mac80211 is holding a lock which
6920 * prevents us from issuing (synchronous) f/w cmds */ 6907 * prevents us from issuing (synchronous) f/w cmds */
@@ -6935,7 +6922,6 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6935 6922
6936 if (iwl3945_is_ready_rf(priv)) { 6923 if (iwl3945_is_ready_rf(priv)) {
6937 iwl3945_scan_cancel_timeout(priv, 100); 6924 iwl3945_scan_cancel_timeout(priv, 100);
6938 cancel_delayed_work(&priv->post_associate);
6939 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 6925 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6940 iwl3945_commit_rxon(priv); 6926 iwl3945_commit_rxon(priv);
6941 } 6927 }
@@ -6950,6 +6936,63 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6950 IWL_DEBUG_MAC80211("leave\n"); 6936 IWL_DEBUG_MAC80211("leave\n");
6951} 6937}
6952 6938
6939#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6940
6941static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6942 struct ieee80211_vif *vif,
6943 struct ieee80211_bss_conf *bss_conf,
6944 u32 changes)
6945{
6946 struct iwl3945_priv *priv = hw->priv;
6947
6948 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6949
6950 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6951 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6952 bss_conf->use_short_preamble);
6953 if (bss_conf->use_short_preamble)
6954 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6955 else
6956 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6957 }
6958
6959 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6960 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6961 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
6962 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
6963 else
6964 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
6965 }
6966
6967 if (changes & BSS_CHANGED_ASSOC) {
6968 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6969 /* This should never happen as this function should
6970 * never be called from interrupt context. */
6971 if (WARN_ON_ONCE(in_interrupt()))
6972 return;
6973 if (bss_conf->assoc) {
6974 priv->assoc_id = bss_conf->aid;
6975 priv->beacon_int = bss_conf->beacon_int;
6976 priv->timestamp0 = bss_conf->timestamp & 0xFFFFFFFF;
6977 priv->timestamp1 = (bss_conf->timestamp >> 32) &
6978 0xFFFFFFFF;
6979 priv->assoc_capability = bss_conf->assoc_capability;
6980 priv->next_scan_jiffies = jiffies +
6981 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6982 mutex_lock(&priv->mutex);
6983 iwl3945_post_associate(priv);
6984 mutex_unlock(&priv->mutex);
6985 } else {
6986 priv->assoc_id = 0;
6987 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6988 }
6989 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
6990 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6991 iwl3945_send_rxon_assoc(priv);
6992 }
6993
6994}
6995
6953static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 6996static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6954{ 6997{
6955 int rc = 0; 6998 int rc = 0;
@@ -6967,7 +7010,7 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6967 goto out_unlock; 7010 goto out_unlock;
6968 } 7011 }
6969 7012
6970 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ 7013 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
6971 rc = -EIO; 7014 rc = -EIO;
6972 IWL_ERROR("ERROR: APs don't scan\n"); 7015 IWL_ERROR("ERROR: APs don't scan\n");
6973 goto out_unlock; 7016 goto out_unlock;
@@ -7109,7 +7152,7 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7109 spin_unlock_irqrestore(&priv->lock, flags); 7152 spin_unlock_irqrestore(&priv->lock, flags);
7110 7153
7111 mutex_lock(&priv->mutex); 7154 mutex_lock(&priv->mutex);
7112 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 7155 if (priv->iw_mode == NL80211_IFTYPE_AP)
7113 iwl3945_activate_qos(priv, 1); 7156 iwl3945_activate_qos(priv, 1);
7114 else if (priv->assoc_id && iwl3945_is_associated(priv)) 7157 else if (priv->assoc_id && iwl3945_is_associated(priv))
7115 iwl3945_activate_qos(priv, 0); 7158 iwl3945_activate_qos(priv, 0);
@@ -7182,8 +7225,6 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7182 7225
7183 iwl3945_reset_qos(priv); 7226 iwl3945_reset_qos(priv);
7184 7227
7185 cancel_delayed_work(&priv->post_associate);
7186
7187 spin_lock_irqsave(&priv->lock, flags); 7228 spin_lock_irqsave(&priv->lock, flags);
7188 priv->assoc_id = 0; 7229 priv->assoc_id = 0;
7189 priv->assoc_capability = 0; 7230 priv->assoc_capability = 0;
@@ -7198,7 +7239,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7198 priv->beacon_int = priv->hw->conf.beacon_int; 7239 priv->beacon_int = priv->hw->conf.beacon_int;
7199 priv->timestamp1 = 0; 7240 priv->timestamp1 = 0;
7200 priv->timestamp0 = 0; 7241 priv->timestamp0 = 0;
7201 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) 7242 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
7202 priv->beacon_int = 0; 7243 priv->beacon_int = 0;
7203 7244
7204 spin_unlock_irqrestore(&priv->lock, flags); 7245 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7212,14 +7253,14 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7212 /* we are restarting association process 7253 /* we are restarting association process
7213 * clear RXON_FILTER_ASSOC_MSK bit 7254 * clear RXON_FILTER_ASSOC_MSK bit
7214 */ 7255 */
7215 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 7256 if (priv->iw_mode != NL80211_IFTYPE_AP) {
7216 iwl3945_scan_cancel_timeout(priv, 100); 7257 iwl3945_scan_cancel_timeout(priv, 100);
7217 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 7258 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7218 iwl3945_commit_rxon(priv); 7259 iwl3945_commit_rxon(priv);
7219 } 7260 }
7220 7261
7221 /* Per mac80211.h: This is only used in IBSS mode... */ 7262 /* Per mac80211.h: This is only used in IBSS mode... */
7222 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 7263 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7223 7264
7224 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 7265 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7225 mutex_unlock(&priv->mutex); 7266 mutex_unlock(&priv->mutex);
@@ -7248,7 +7289,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7248 return -EIO; 7289 return -EIO;
7249 } 7290 }
7250 7291
7251 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 7292 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7252 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 7293 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7253 mutex_unlock(&priv->mutex); 7294 mutex_unlock(&priv->mutex);
7254 return -EIO; 7295 return -EIO;
@@ -7268,7 +7309,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7268 7309
7269 iwl3945_reset_qos(priv); 7310 iwl3945_reset_qos(priv);
7270 7311
7271 queue_work(priv->workqueue, &priv->post_associate.work); 7312 iwl3945_post_associate(priv);
7272 7313
7273 mutex_unlock(&priv->mutex); 7314 mutex_unlock(&priv->mutex);
7274 7315
@@ -7767,7 +7808,6 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7767 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7808 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7768 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7809 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7769 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor); 7810 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7770 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
7771 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7811 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7772 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7812 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7773 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check); 7813 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
@@ -7785,7 +7825,6 @@ static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv)
7785 cancel_delayed_work_sync(&priv->init_alive_start); 7825 cancel_delayed_work_sync(&priv->init_alive_start);
7786 cancel_delayed_work(&priv->scan_check); 7826 cancel_delayed_work(&priv->scan_check);
7787 cancel_delayed_work(&priv->alive_start); 7827 cancel_delayed_work(&priv->alive_start);
7788 cancel_delayed_work(&priv->post_associate);
7789 cancel_work_sync(&priv->beacon_update); 7828 cancel_work_sync(&priv->beacon_update);
7790} 7829}
7791 7830
@@ -7830,6 +7869,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7830 .conf_tx = iwl3945_mac_conf_tx, 7869 .conf_tx = iwl3945_mac_conf_tx,
7831 .get_tsf = iwl3945_mac_get_tsf, 7870 .get_tsf = iwl3945_mac_get_tsf,
7832 .reset_tsf = iwl3945_mac_reset_tsf, 7871 .reset_tsf = iwl3945_mac_reset_tsf,
7872 .bss_info_changed = iwl3945_bss_info_changed,
7833 .hw_scan = iwl3945_mac_hw_scan 7873 .hw_scan = iwl3945_mac_hw_scan
7834}; 7874};
7835 7875
@@ -7890,6 +7930,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7890 hw->flags = IEEE80211_HW_SIGNAL_DBM | 7930 hw->flags = IEEE80211_HW_SIGNAL_DBM |
7891 IEEE80211_HW_NOISE_DBM; 7931 IEEE80211_HW_NOISE_DBM;
7892 7932
7933 hw->wiphy->interface_modes =
7934 BIT(NL80211_IFTYPE_AP) |
7935 BIT(NL80211_IFTYPE_STATION) |
7936 BIT(NL80211_IFTYPE_ADHOC);
7937
7893 /* 4 EDCA QOS priorities */ 7938 /* 4 EDCA QOS priorities */
7894 hw->queues = 4; 7939 hw->queues = 4;
7895 7940
@@ -7951,7 +7996,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7951 IWL_DEBUG_INFO("Radio disabled.\n"); 7996 IWL_DEBUG_INFO("Radio disabled.\n");
7952 } 7997 }
7953 7998
7954 priv->iw_mode = IEEE80211_IF_TYPE_STA; 7999 priv->iw_mode = NL80211_IFTYPE_STATION;
7955 8000
7956 printk(KERN_INFO DRV_NAME 8001 printk(KERN_INFO DRV_NAME
7957 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 8002 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
@@ -8331,6 +8376,8 @@ static void __exit iwl3945_exit(void)
8331 iwl3945_rate_control_unregister(); 8376 iwl3945_rate_control_unregister();
8332} 8377}
8333 8378
8379MODULE_FIRMWARE("iwlwifi-3945" IWL3945_UCODE_API ".ucode");
8380
8334module_param_named(antenna, iwl3945_param_antenna, int, 0444); 8381module_param_named(antenna, iwl3945_param_antenna, int, 0444);
8335MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 8382MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
8336module_param_named(disable, iwl3945_param_disable, int, 0444); 8383module_param_named(disable, iwl3945_param_disable, int, 0444);
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index a267d6e65f03..92be60415d04 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -8,6 +8,7 @@
8#include "scan.h" 8#include "scan.h"
9#include "cmd.h" 9#include "cmd.h"
10 10
11static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp);
11 12
12static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) = 13static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
13 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 14 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -20,12 +21,88 @@ static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
20#define CAPINFO_MASK (~(0xda00)) 21#define CAPINFO_MASK (~(0xda00))
21 22
22 23
24/**
25 * @brief This function finds common rates between rates and card rates.
26 *
27 * It will fill common rates in rates as output if found.
28 *
29 * NOTE: Setting the MSB of the basic rates need to be taken
30 * care, either before or after calling this function
31 *
32 * @param priv A pointer to struct lbs_private structure
33 * @param rates the buffer which keeps input and output
34 * @param rates_size the size of rate1 buffer; new size of buffer on return
35 *
36 * @return 0 on success, or -1 on error
37 */
38static int get_common_rates(struct lbs_private *priv,
39 u8 *rates,
40 u16 *rates_size)
41{
42 u8 *card_rates = lbs_bg_rates;
43 size_t num_card_rates = sizeof(lbs_bg_rates);
44 int ret = 0, i, j;
45 u8 tmp[30];
46 size_t tmp_size = 0;
47
48 /* For each rate in card_rates that exists in rate1, copy to tmp */
49 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
50 for (j = 0; rates[j] && (j < *rates_size); j++) {
51 if (rates[j] == card_rates[i])
52 tmp[tmp_size++] = card_rates[i];
53 }
54 }
55
56 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
57 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
58 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
59 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
60
61 if (!priv->enablehwauto) {
62 for (i = 0; i < tmp_size; i++) {
63 if (tmp[i] == priv->cur_rate)
64 goto done;
65 }
66 lbs_pr_alert("Previously set fixed data rate %#x isn't "
67 "compatible with the network.\n", priv->cur_rate);
68 ret = -1;
69 goto done;
70 }
71 ret = 0;
72
73done:
74 memset(rates, 0, *rates_size);
75 *rates_size = min_t(int, tmp_size, *rates_size);
76 memcpy(rates, tmp, *rates_size);
77 return ret;
78}
79
80
81/**
82 * @brief Sets the MSB on basic rates as the firmware requires
83 *
84 * Scan through an array and set the MSB for basic data rates.
85 *
86 * @param rates buffer of data rates
87 * @param len size of buffer
88 */
89static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
90{
91 int i;
92
93 for (i = 0; i < len; i++) {
94 if (rates[i] == 0x02 || rates[i] == 0x04 ||
95 rates[i] == 0x0b || rates[i] == 0x16)
96 rates[i] |= 0x80;
97 }
98}
99
23 100
24/** 101/**
25 * @brief Associate to a specific BSS discovered in a scan 102 * @brief Associate to a specific BSS discovered in a scan
26 * 103 *
27 * @param priv A pointer to struct lbs_private structure 104 * @param priv A pointer to struct lbs_private structure
28 * @param pbssdesc Pointer to the BSS descriptor to associate with. 105 * @param assoc_req The association request describing the BSS to associate with
29 * 106 *
30 * @return 0-success, otherwise fail 107 * @return 0-success, otherwise fail
31 */ 108 */
@@ -33,29 +110,29 @@ static int lbs_associate(struct lbs_private *priv,
33 struct assoc_request *assoc_req) 110 struct assoc_request *assoc_req)
34{ 111{
35 int ret; 112 int ret;
113 u8 preamble = RADIO_PREAMBLE_LONG;
36 114
37 lbs_deb_enter(LBS_DEB_ASSOC); 115 lbs_deb_enter(LBS_DEB_ASSOC);
38 116
39 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE, 117 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
40 0, CMD_OPTION_WAITFORRSP, 118 0, CMD_OPTION_WAITFORRSP,
41 0, assoc_req->bss.bssid); 119 0, assoc_req->bss.bssid);
42
43 if (ret) 120 if (ret)
44 goto done; 121 goto out;
45 122
46 /* set preamble to firmware */ 123 /* Use short preamble only when both the BSS and firmware support it */
47 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 124 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
48 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) 125 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
49 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 126 preamble = RADIO_PREAMBLE_SHORT;
50 else
51 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
52 127
53 lbs_set_radio_control(priv); 128 ret = lbs_set_radio(priv, preamble, 1);
129 if (ret)
130 goto out;
54 131
55 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE, 132 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
56 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); 133 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
57 134
58done: 135out:
59 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 136 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
60 return ret; 137 return ret;
61} 138}
@@ -64,17 +141,22 @@ done:
64 * @brief Join an adhoc network found in a previous scan 141 * @brief Join an adhoc network found in a previous scan
65 * 142 *
66 * @param priv A pointer to struct lbs_private structure 143 * @param priv A pointer to struct lbs_private structure
67 * @param pbssdesc Pointer to a BSS descriptor found in a previous scan 144 * @param assoc_req The association request describing the BSS to join
68 * to attempt to join
69 * 145 *
70 * @return 0--success, -1--fail 146 * @return 0 on success, error on failure
71 */ 147 */
72static int lbs_join_adhoc_network(struct lbs_private *priv, 148static int lbs_adhoc_join(struct lbs_private *priv,
73 struct assoc_request *assoc_req) 149 struct assoc_request *assoc_req)
74{ 150{
151 struct cmd_ds_802_11_ad_hoc_join cmd;
75 struct bss_descriptor *bss = &assoc_req->bss; 152 struct bss_descriptor *bss = &assoc_req->bss;
153 u8 preamble = RADIO_PREAMBLE_LONG;
154 DECLARE_MAC_BUF(mac);
155 u16 ratesize = 0;
76 int ret = 0; 156 int ret = 0;
77 157
158 lbs_deb_enter(LBS_DEB_ASSOC);
159
78 lbs_deb_join("current SSID '%s', ssid length %u\n", 160 lbs_deb_join("current SSID '%s', ssid length %u\n",
79 escape_essid(priv->curbssparams.ssid, 161 escape_essid(priv->curbssparams.ssid,
80 priv->curbssparams.ssid_len), 162 priv->curbssparams.ssid_len),
@@ -106,29 +188,106 @@ static int lbs_join_adhoc_network(struct lbs_private *priv,
106 goto out; 188 goto out;
107 } 189 }
108 190
109 /* Use shortpreamble only when both creator and card supports 191 /* Use short preamble only when both the BSS and firmware support it */
110 short preamble */ 192 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
111 if (!(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) || 193 (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
112 !(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
113 lbs_deb_join("AdhocJoin: Long preamble\n");
114 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
115 } else {
116 lbs_deb_join("AdhocJoin: Short preamble\n"); 194 lbs_deb_join("AdhocJoin: Short preamble\n");
117 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 195 preamble = RADIO_PREAMBLE_SHORT;
118 } 196 }
119 197
120 lbs_set_radio_control(priv); 198 ret = lbs_set_radio(priv, preamble, 1);
199 if (ret)
200 goto out;
121 201
122 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel); 202 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
123 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); 203 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
124 204
125 priv->adhoccreate = 0; 205 priv->adhoccreate = 0;
206 priv->curbssparams.channel = bss->channel;
126 207
127 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN, 208 /* Build the join command */
128 0, CMD_OPTION_WAITFORRSP, 209 memset(&cmd, 0, sizeof(cmd));
129 OID_802_11_SSID, assoc_req); 210 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
211
212 cmd.bss.type = CMD_BSS_TYPE_IBSS;
213 cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
214
215 memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
216 memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
217
218 memcpy(&cmd.bss.phyparamset, &bss->phyparamset,
219 sizeof(union ieeetypes_phyparamset));
220
221 memcpy(&cmd.bss.ssparamset, &bss->ssparamset,
222 sizeof(union IEEEtypes_ssparamset));
223
224 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
225 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
226 bss->capability, CAPINFO_MASK);
227
228 /* information on BSSID descriptor passed to FW */
229 lbs_deb_join("ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
230 print_mac(mac, cmd.bss.bssid), cmd.bss.ssid);
231
232 /* Only v8 and below support setting these */
233 if (priv->fwrelease < 0x09000000) {
234 /* failtimeout */
235 cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
236 /* probedelay */
237 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
238 }
239
240 /* Copy Data rates from the rates recorded in scan response */
241 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
242 ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
243 memcpy(cmd.bss.rates, bss->rates, ratesize);
244 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
245 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
246 ret = -1;
247 goto out;
248 }
249
250 /* Copy the ad-hoc creation rates into Current BSS state structure */
251 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
252 memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
253
254 /* Set MSB on basic rates as the firmware requires, but _after_
255 * copying to current bss rates.
256 */
257 lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
258
259 cmd.bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow);
260
261 if (assoc_req->secinfo.wep_enabled) {
262 u16 tmp = le16_to_cpu(cmd.bss.capability);
263 tmp |= WLAN_CAPABILITY_PRIVACY;
264 cmd.bss.capability = cpu_to_le16(tmp);
265 }
266
267 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
268 __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
269
270 /* wake up first */
271 ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
272 CMD_ACT_SET, 0, 0,
273 &local_ps_mode);
274 if (ret) {
275 ret = -1;
276 goto out;
277 }
278 }
279
280 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
281 ret = -1;
282 goto out;
283 }
284
285 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
286 if (ret == 0)
287 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
130 288
131out: 289out:
290 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
132 return ret; 291 return ret;
133} 292}
134 293
@@ -136,39 +295,131 @@ out:
136 * @brief Start an Adhoc Network 295 * @brief Start an Adhoc Network
137 * 296 *
138 * @param priv A pointer to struct lbs_private structure 297 * @param priv A pointer to struct lbs_private structure
139 * @param adhocssid The ssid of the Adhoc Network 298 * @param assoc_req The association request describing the BSS to start
140 * @return 0--success, -1--fail 299 *
300 * @return 0 on success, error on failure
141 */ 301 */
142static int lbs_start_adhoc_network(struct lbs_private *priv, 302static int lbs_adhoc_start(struct lbs_private *priv,
143 struct assoc_request *assoc_req) 303 struct assoc_request *assoc_req)
144{ 304{
305 struct cmd_ds_802_11_ad_hoc_start cmd;
306 u8 preamble = RADIO_PREAMBLE_LONG;
307 size_t ratesize = 0;
308 u16 tmpcap = 0;
145 int ret = 0; 309 int ret = 0;
146 310
147 priv->adhoccreate = 1; 311 lbs_deb_enter(LBS_DEB_ASSOC);
148 312
149 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { 313 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
150 lbs_deb_join("AdhocStart: Short preamble\n"); 314 lbs_deb_join("ADHOC_START: Will use short preamble\n");
151 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 315 preamble = RADIO_PREAMBLE_SHORT;
152 } else {
153 lbs_deb_join("AdhocStart: Long preamble\n");
154 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
155 } 316 }
156 317
157 lbs_set_radio_control(priv); 318 ret = lbs_set_radio(priv, preamble, 1);
319 if (ret)
320 goto out;
158 321
159 lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel); 322 /* Build the start command */
160 lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band); 323 memset(&cmd, 0, sizeof(cmd));
324 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
161 325
162 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START, 326 memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
163 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); 327
328 lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
329 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
330 assoc_req->ssid_len);
331
332 cmd.bsstype = CMD_BSS_TYPE_IBSS;
333
334 if (priv->beacon_period == 0)
335 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
336 cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
337
338 WARN_ON(!assoc_req->channel);
339
340 /* set Physical parameter set */
341 cmd.phyparamset.dsparamset.elementid = MFIE_TYPE_DS_SET;
342 cmd.phyparamset.dsparamset.len = 1;
343 cmd.phyparamset.dsparamset.currentchan = assoc_req->channel;
344
345 /* set IBSS parameter set */
346 cmd.ssparamset.ibssparamset.elementid = MFIE_TYPE_IBSS_SET;
347 cmd.ssparamset.ibssparamset.len = 2;
348 cmd.ssparamset.ibssparamset.atimwindow = 0;
349
350 /* set capability info */
351 tmpcap = WLAN_CAPABILITY_IBSS;
352 if (assoc_req->secinfo.wep_enabled) {
353 lbs_deb_join("ADHOC_START: WEP enabled, setting privacy on\n");
354 tmpcap |= WLAN_CAPABILITY_PRIVACY;
355 } else
356 lbs_deb_join("ADHOC_START: WEP disabled, setting privacy off\n");
357
358 cmd.capability = cpu_to_le16(tmpcap);
359
360 /* Only v8 and below support setting probe delay */
361 if (priv->fwrelease < 0x09000000)
362 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
363
364 ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
365 memcpy(cmd.rates, lbs_bg_rates, ratesize);
366
367 /* Copy the ad-hoc creating rates into Current BSS state structure */
368 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
369 memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
164 370
371 /* Set MSB on basic rates as the firmware requires, but _after_
372 * copying to current bss rates.
373 */
374 lbs_set_basic_rate_flags(cmd.rates, ratesize);
375
376 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
377 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
378
379 if (lbs_create_dnld_countryinfo_11d(priv)) {
380 lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
381 ret = -1;
382 goto out;
383 }
384
385 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
386 assoc_req->channel, assoc_req->band);
387
388 priv->adhoccreate = 1;
389 priv->mode = IW_MODE_ADHOC;
390
391 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
392 if (ret == 0)
393 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
394
395out:
396 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
165 return ret; 397 return ret;
166} 398}
167 399
168int lbs_stop_adhoc_network(struct lbs_private *priv) 400/**
401 * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
402 *
403 * @param priv A pointer to struct lbs_private structure
404 * @return 0 on success, or an error
405 */
406int lbs_adhoc_stop(struct lbs_private *priv)
169{ 407{
170 return lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP, 408 struct cmd_ds_802_11_ad_hoc_stop cmd;
171 0, CMD_OPTION_WAITFORRSP, 0, NULL); 409 int ret;
410
411 lbs_deb_enter(LBS_DEB_JOIN);
412
413 memset(&cmd, 0, sizeof (cmd));
414 cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
415
416 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
417
418 /* Clean up everything even if there was an error */
419 lbs_mac_event_disconnected(priv);
420
421 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
422 return ret;
172} 423}
173 424
174static inline int match_bss_no_security(struct lbs_802_11_security *secinfo, 425static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
@@ -480,14 +731,14 @@ static int assoc_helper_essid(struct lbs_private *priv,
480 if (bss != NULL) { 731 if (bss != NULL) {
481 lbs_deb_assoc("SSID found, will join\n"); 732 lbs_deb_assoc("SSID found, will join\n");
482 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); 733 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
483 lbs_join_adhoc_network(priv, assoc_req); 734 lbs_adhoc_join(priv, assoc_req);
484 } else { 735 } else {
485 /* else send START command */ 736 /* else send START command */
486 lbs_deb_assoc("SSID not found, creating adhoc network\n"); 737 lbs_deb_assoc("SSID not found, creating adhoc network\n");
487 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, 738 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
488 IW_ESSID_MAX_SIZE); 739 IW_ESSID_MAX_SIZE);
489 assoc_req->bss.ssid_len = assoc_req->ssid_len; 740 assoc_req->bss.ssid_len = assoc_req->ssid_len;
490 lbs_start_adhoc_network(priv, assoc_req); 741 lbs_adhoc_start(priv, assoc_req);
491 } 742 }
492 } 743 }
493 744
@@ -520,7 +771,7 @@ static int assoc_helper_bssid(struct lbs_private *priv,
520 ret = lbs_associate(priv, assoc_req); 771 ret = lbs_associate(priv, assoc_req);
521 lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret); 772 lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret);
522 } else if (assoc_req->mode == IW_MODE_ADHOC) { 773 } else if (assoc_req->mode == IW_MODE_ADHOC) {
523 lbs_join_adhoc_network(priv, assoc_req); 774 lbs_adhoc_join(priv, assoc_req);
524 } 775 }
525 776
526out: 777out:
@@ -572,11 +823,7 @@ static int assoc_helper_mode(struct lbs_private *priv,
572 } 823 }
573 824
574 priv->mode = assoc_req->mode; 825 priv->mode = assoc_req->mode;
575 ret = lbs_prepare_and_send_command(priv, 826 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode);
576 CMD_802_11_SNMP_MIB,
577 0, CMD_OPTION_WAITFORRSP,
578 OID_802_11_INFRASTRUCTURE_MODE,
579 /* Shoot me now */ (void *) (size_t) assoc_req->mode);
580 827
581done: 828done:
582 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 829 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1029,7 +1276,9 @@ void lbs_association_worker(struct work_struct *work)
1029 */ 1276 */
1030 if (priv->mode == IW_MODE_INFRA) { 1277 if (priv->mode == IW_MODE_INFRA) {
1031 if (should_deauth_infrastructure(priv, assoc_req)) { 1278 if (should_deauth_infrastructure(priv, assoc_req)) {
1032 ret = lbs_send_deauthentication(priv); 1279 ret = lbs_cmd_80211_deauthenticate(priv,
1280 priv->curbssparams.bssid,
1281 WLAN_REASON_DEAUTH_LEAVING);
1033 if (ret) { 1282 if (ret) {
1034 lbs_deb_assoc("Deauthentication due to new " 1283 lbs_deb_assoc("Deauthentication due to new "
1035 "configuration request failed: %d\n", 1284 "configuration request failed: %d\n",
@@ -1038,7 +1287,7 @@ void lbs_association_worker(struct work_struct *work)
1038 } 1287 }
1039 } else if (priv->mode == IW_MODE_ADHOC) { 1288 } else if (priv->mode == IW_MODE_ADHOC) {
1040 if (should_stop_adhoc(priv, assoc_req)) { 1289 if (should_stop_adhoc(priv, assoc_req)) {
1041 ret = lbs_stop_adhoc_network(priv); 1290 ret = lbs_adhoc_stop(priv);
1042 if (ret) { 1291 if (ret) {
1043 lbs_deb_assoc("Teardown of AdHoc network due to " 1292 lbs_deb_assoc("Teardown of AdHoc network due to "
1044 "new configuration request failed: %d\n", 1293 "new configuration request failed: %d\n",
@@ -1214,94 +1463,6 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1214 1463
1215 1464
1216/** 1465/**
1217 * @brief This function finds common rates between rate1 and card rates.
1218 *
1219 * It will fill common rates in rate1 as output if found.
1220 *
1221 * NOTE: Setting the MSB of the basic rates need to be taken
1222 * care, either before or after calling this function
1223 *
1224 * @param priv A pointer to struct lbs_private structure
1225 * @param rate1 the buffer which keeps input and output
1226 * @param rate1_size the size of rate1 buffer; new size of buffer on return
1227 *
1228 * @return 0 or -1
1229 */
1230static int get_common_rates(struct lbs_private *priv,
1231 u8 *rates,
1232 u16 *rates_size)
1233{
1234 u8 *card_rates = lbs_bg_rates;
1235 size_t num_card_rates = sizeof(lbs_bg_rates);
1236 int ret = 0, i, j;
1237 u8 tmp[30];
1238 size_t tmp_size = 0;
1239
1240 /* For each rate in card_rates that exists in rate1, copy to tmp */
1241 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
1242 for (j = 0; rates[j] && (j < *rates_size); j++) {
1243 if (rates[j] == card_rates[i])
1244 tmp[tmp_size++] = card_rates[i];
1245 }
1246 }
1247
1248 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
1249 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
1250 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
1251 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
1252
1253 if (!priv->enablehwauto) {
1254 for (i = 0; i < tmp_size; i++) {
1255 if (tmp[i] == priv->cur_rate)
1256 goto done;
1257 }
1258 lbs_pr_alert("Previously set fixed data rate %#x isn't "
1259 "compatible with the network.\n", priv->cur_rate);
1260 ret = -1;
1261 goto done;
1262 }
1263 ret = 0;
1264
1265done:
1266 memset(rates, 0, *rates_size);
1267 *rates_size = min_t(int, tmp_size, *rates_size);
1268 memcpy(rates, tmp, *rates_size);
1269 return ret;
1270}
1271
1272
1273/**
1274 * @brief Sets the MSB on basic rates as the firmware requires
1275 *
1276 * Scan through an array and set the MSB for basic data rates.
1277 *
1278 * @param rates buffer of data rates
1279 * @param len size of buffer
1280 */
1281static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
1282{
1283 int i;
1284
1285 for (i = 0; i < len; i++) {
1286 if (rates[i] == 0x02 || rates[i] == 0x04 ||
1287 rates[i] == 0x0b || rates[i] == 0x16)
1288 rates[i] |= 0x80;
1289 }
1290}
1291
1292/**
1293 * @brief Send Deauthentication Request
1294 *
1295 * @param priv A pointer to struct lbs_private structure
1296 * @return 0--success, -1--fail
1297 */
1298int lbs_send_deauthentication(struct lbs_private *priv)
1299{
1300 return lbs_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE,
1301 0, CMD_OPTION_WAITFORRSP, 0, NULL);
1302}
1303
1304/**
1305 * @brief This function prepares command of authenticate. 1466 * @brief This function prepares command of authenticate.
1306 * 1467 *
1307 * @param priv A pointer to struct lbs_private structure 1468 * @param priv A pointer to struct lbs_private structure
@@ -1353,26 +1514,37 @@ out:
1353 return ret; 1514 return ret;
1354} 1515}
1355 1516
1356int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 1517/**
1357 struct cmd_ds_command *cmd) 1518 * @brief Deauthenticate from a specific BSS
1519 *
1520 * @param priv A pointer to struct lbs_private structure
1521 * @param bssid The specific BSS to deauthenticate from
1522 * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
1523 *
1524 * @return 0 on success, error on failure
1525 */
1526int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
1527 u16 reason)
1358{ 1528{
1359 struct cmd_ds_802_11_deauthenticate *dauth = &cmd->params.deauth; 1529 struct cmd_ds_802_11_deauthenticate cmd;
1530 int ret;
1360 1531
1361 lbs_deb_enter(LBS_DEB_JOIN); 1532 lbs_deb_enter(LBS_DEB_JOIN);
1362 1533
1363 cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE); 1534 memset(&cmd, 0, sizeof(cmd));
1364 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) + 1535 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1365 S_DS_GEN); 1536 memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
1537 cmd.reasoncode = cpu_to_le16(reason);
1366 1538
1367 /* set AP MAC address */ 1539 ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
1368 memmove(dauth->macaddr, priv->curbssparams.bssid, ETH_ALEN);
1369 1540
1370 /* Reason code 3 = Station is leaving */ 1541 /* Clean up everything even if there was an error; can't assume that
1371#define REASON_CODE_STA_LEAVING 3 1542 * we're still authenticated to the AP after trying to deauth.
1372 dauth->reasoncode = cpu_to_le16(REASON_CODE_STA_LEAVING); 1543 */
1544 lbs_mac_event_disconnected(priv);
1373 1545
1374 lbs_deb_leave(LBS_DEB_JOIN); 1546 lbs_deb_leave(LBS_DEB_JOIN);
1375 return 0; 1547 return ret;
1376} 1548}
1377 1549
1378int lbs_cmd_80211_associate(struct lbs_private *priv, 1550int lbs_cmd_80211_associate(struct lbs_private *priv,
@@ -1489,231 +1661,6 @@ done:
1489 return ret; 1661 return ret;
1490} 1662}
1491 1663
1492int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
1493 struct cmd_ds_command *cmd, void *pdata_buf)
1494{
1495 struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads;
1496 int ret = 0;
1497 int cmdappendsize = 0;
1498 struct assoc_request *assoc_req = pdata_buf;
1499 u16 tmpcap = 0;
1500 size_t ratesize = 0;
1501
1502 lbs_deb_enter(LBS_DEB_JOIN);
1503
1504 if (!priv) {
1505 ret = -1;
1506 goto done;
1507 }
1508
1509 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START);
1510
1511 /*
1512 * Fill in the parameters for 2 data structures:
1513 * 1. cmd_ds_802_11_ad_hoc_start command
1514 * 2. priv->scantable[i]
1515 *
1516 * Driver will fill up SSID, bsstype,IBSS param, Physical Param,
1517 * probe delay, and cap info.
1518 *
1519 * Firmware will fill up beacon period, DTIM, Basic rates
1520 * and operational rates.
1521 */
1522
1523 memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE);
1524 memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len);
1525
1526 lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n",
1527 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
1528 assoc_req->ssid_len);
1529
1530 /* set the BSS type */
1531 adhs->bsstype = CMD_BSS_TYPE_IBSS;
1532 priv->mode = IW_MODE_ADHOC;
1533 if (priv->beacon_period == 0)
1534 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
1535 adhs->beaconperiod = cpu_to_le16(priv->beacon_period);
1536
1537 /* set Physical param set */
1538#define DS_PARA_IE_ID 3
1539#define DS_PARA_IE_LEN 1
1540
1541 adhs->phyparamset.dsparamset.elementid = DS_PARA_IE_ID;
1542 adhs->phyparamset.dsparamset.len = DS_PARA_IE_LEN;
1543
1544 WARN_ON(!assoc_req->channel);
1545
1546 lbs_deb_join("ADHOC_S_CMD: Creating ADHOC on channel %d\n",
1547 assoc_req->channel);
1548
1549 adhs->phyparamset.dsparamset.currentchan = assoc_req->channel;
1550
1551 /* set IBSS param set */
1552#define IBSS_PARA_IE_ID 6
1553#define IBSS_PARA_IE_LEN 2
1554
1555 adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID;
1556 adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN;
1557 adhs->ssparamset.ibssparamset.atimwindow = 0;
1558
1559 /* set capability info */
1560 tmpcap = WLAN_CAPABILITY_IBSS;
1561 if (assoc_req->secinfo.wep_enabled) {
1562 lbs_deb_join("ADHOC_S_CMD: WEP enabled, "
1563 "setting privacy on\n");
1564 tmpcap |= WLAN_CAPABILITY_PRIVACY;
1565 } else {
1566 lbs_deb_join("ADHOC_S_CMD: WEP disabled, "
1567 "setting privacy off\n");
1568 }
1569 adhs->capability = cpu_to_le16(tmpcap);
1570
1571 /* probedelay */
1572 adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1573
1574 memset(adhs->rates, 0, sizeof(adhs->rates));
1575 ratesize = min(sizeof(adhs->rates), sizeof(lbs_bg_rates));
1576 memcpy(adhs->rates, lbs_bg_rates, ratesize);
1577
1578 /* Copy the ad-hoc creating rates into Current BSS state structure */
1579 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1580 memcpy(&priv->curbssparams.rates, &adhs->rates, ratesize);
1581
1582 /* Set MSB on basic rates as the firmware requires, but _after_
1583 * copying to current bss rates.
1584 */
1585 lbs_set_basic_rate_flags(adhs->rates, ratesize);
1586
1587 lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n",
1588 adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]);
1589
1590 lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n");
1591
1592 if (lbs_create_dnld_countryinfo_11d(priv)) {
1593 lbs_deb_join("ADHOC_S_CMD: dnld_countryinfo_11d failed\n");
1594 ret = -1;
1595 goto done;
1596 }
1597
1598 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_start) +
1599 S_DS_GEN + cmdappendsize);
1600
1601 ret = 0;
1602done:
1603 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1604 return ret;
1605}
1606
1607int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd)
1608{
1609 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP);
1610 cmd->size = cpu_to_le16(S_DS_GEN);
1611
1612 return 0;
1613}
1614
1615int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
1616 struct cmd_ds_command *cmd, void *pdata_buf)
1617{
1618 struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj;
1619 struct assoc_request *assoc_req = pdata_buf;
1620 struct bss_descriptor *bss = &assoc_req->bss;
1621 int cmdappendsize = 0;
1622 int ret = 0;
1623 u16 ratesize = 0;
1624 DECLARE_MAC_BUF(mac);
1625
1626 lbs_deb_enter(LBS_DEB_JOIN);
1627
1628 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN);
1629
1630 join_cmd->bss.type = CMD_BSS_TYPE_IBSS;
1631 join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
1632
1633 memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN);
1634 memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len);
1635
1636 memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset,
1637 sizeof(union ieeetypes_phyparamset));
1638
1639 memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset,
1640 sizeof(union IEEEtypes_ssparamset));
1641
1642 join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
1643 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
1644 bss->capability, CAPINFO_MASK);
1645
1646 /* information on BSSID descriptor passed to FW */
1647 lbs_deb_join(
1648 "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
1649 print_mac(mac, join_cmd->bss.bssid),
1650 join_cmd->bss.ssid);
1651
1652 /* failtimeout */
1653 join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
1654
1655 /* probedelay */
1656 join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1657
1658 priv->curbssparams.channel = bss->channel;
1659
1660 /* Copy Data rates from the rates recorded in scan response */
1661 memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates));
1662 ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES);
1663 memcpy(join_cmd->bss.rates, bss->rates, ratesize);
1664 if (get_common_rates(priv, join_cmd->bss.rates, &ratesize)) {
1665 lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n");
1666 ret = -1;
1667 goto done;
1668 }
1669
1670 /* Copy the ad-hoc creating rates into Current BSS state structure */
1671 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1672 memcpy(&priv->curbssparams.rates, join_cmd->bss.rates, ratesize);
1673
1674 /* Set MSB on basic rates as the firmware requires, but _after_
1675 * copying to current bss rates.
1676 */
1677 lbs_set_basic_rate_flags(join_cmd->bss.rates, ratesize);
1678
1679 join_cmd->bss.ssparamset.ibssparamset.atimwindow =
1680 cpu_to_le16(bss->atimwindow);
1681
1682 if (assoc_req->secinfo.wep_enabled) {
1683 u16 tmp = le16_to_cpu(join_cmd->bss.capability);
1684 tmp |= WLAN_CAPABILITY_PRIVACY;
1685 join_cmd->bss.capability = cpu_to_le16(tmp);
1686 }
1687
1688 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1689 /* wake up first */
1690 __le32 Localpsmode;
1691
1692 Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
1693 ret = lbs_prepare_and_send_command(priv,
1694 CMD_802_11_PS_MODE,
1695 CMD_ACT_SET,
1696 0, 0, &Localpsmode);
1697
1698 if (ret) {
1699 ret = -1;
1700 goto done;
1701 }
1702 }
1703
1704 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
1705 ret = -1;
1706 goto done;
1707 }
1708
1709 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_join) +
1710 S_DS_GEN + cmdappendsize);
1711
1712done:
1713 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1714 return ret;
1715}
1716
1717int lbs_ret_80211_associate(struct lbs_private *priv, 1664int lbs_ret_80211_associate(struct lbs_private *priv,
1718 struct cmd_ds_command *resp) 1665 struct cmd_ds_command *resp)
1719{ 1666{
@@ -1815,34 +1762,19 @@ done:
1815 return ret; 1762 return ret;
1816} 1763}
1817 1764
1818int lbs_ret_80211_disassociate(struct lbs_private *priv) 1765static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
1819{
1820 lbs_deb_enter(LBS_DEB_JOIN);
1821
1822 lbs_mac_event_disconnected(priv);
1823
1824 lbs_deb_leave(LBS_DEB_JOIN);
1825 return 0;
1826}
1827
1828int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1829 struct cmd_ds_command *resp)
1830{ 1766{
1831 int ret = 0; 1767 int ret = 0;
1832 u16 command = le16_to_cpu(resp->command); 1768 u16 command = le16_to_cpu(resp->command);
1833 u16 result = le16_to_cpu(resp->result); 1769 u16 result = le16_to_cpu(resp->result);
1834 struct cmd_ds_802_11_ad_hoc_result *padhocresult; 1770 struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
1835 union iwreq_data wrqu; 1771 union iwreq_data wrqu;
1836 struct bss_descriptor *bss; 1772 struct bss_descriptor *bss;
1837 DECLARE_MAC_BUF(mac); 1773 DECLARE_MAC_BUF(mac);
1838 1774
1839 lbs_deb_enter(LBS_DEB_JOIN); 1775 lbs_deb_enter(LBS_DEB_JOIN);
1840 1776
1841 padhocresult = &resp->params.result; 1777 adhoc_resp = (struct cmd_ds_802_11_ad_hoc_result *) resp;
1842
1843 lbs_deb_join("ADHOC_RESP: size = %d\n", le16_to_cpu(resp->size));
1844 lbs_deb_join("ADHOC_RESP: command = %x\n", command);
1845 lbs_deb_join("ADHOC_RESP: result = %x\n", result);
1846 1778
1847 if (!priv->in_progress_assoc_req) { 1779 if (!priv->in_progress_assoc_req) {
1848 lbs_deb_join("ADHOC_RESP: no in-progress association " 1780 lbs_deb_join("ADHOC_RESP: no in-progress association "
@@ -1856,26 +1788,19 @@ int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1856 * Join result code 0 --> SUCCESS 1788 * Join result code 0 --> SUCCESS
1857 */ 1789 */
1858 if (result) { 1790 if (result) {
1859 lbs_deb_join("ADHOC_RESP: failed\n"); 1791 lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
1860 if (priv->connect_status == LBS_CONNECTED) 1792 if (priv->connect_status == LBS_CONNECTED)
1861 lbs_mac_event_disconnected(priv); 1793 lbs_mac_event_disconnected(priv);
1862 ret = -1; 1794 ret = -1;
1863 goto done; 1795 goto done;
1864 } 1796 }
1865 1797
1866 /*
1867 * Now the join cmd should be successful
1868 * If BSSID has changed use SSID to compare instead of BSSID
1869 */
1870 lbs_deb_join("ADHOC_RESP: associated to '%s'\n",
1871 escape_essid(bss->ssid, bss->ssid_len));
1872
1873 /* Send a Media Connected event, according to the Spec */ 1798 /* Send a Media Connected event, according to the Spec */
1874 priv->connect_status = LBS_CONNECTED; 1799 priv->connect_status = LBS_CONNECTED;
1875 1800
1876 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { 1801 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
1877 /* Update the created network descriptor with the new BSSID */ 1802 /* Update the created network descriptor with the new BSSID */
1878 memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN); 1803 memcpy(bss->bssid, adhoc_resp->bssid, ETH_ALEN);
1879 } 1804 }
1880 1805
1881 /* Set the BSSID from the joined/started descriptor */ 1806 /* Set the BSSID from the joined/started descriptor */
@@ -1894,22 +1819,13 @@ int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1894 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1819 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1895 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 1820 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1896 1821
1897 lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n"); 1822 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %s, channel %d\n",
1898 lbs_deb_join("ADHOC_RESP: channel = %d\n", priv->curbssparams.channel); 1823 escape_essid(bss->ssid, bss->ssid_len),
1899 lbs_deb_join("ADHOC_RESP: BSSID = %s\n", 1824 print_mac(mac, priv->curbssparams.bssid),
1900 print_mac(mac, padhocresult->bssid)); 1825 priv->curbssparams.channel);
1901 1826
1902done: 1827done:
1903 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); 1828 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1904 return ret; 1829 return ret;
1905} 1830}
1906 1831
1907int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv)
1908{
1909 lbs_deb_enter(LBS_DEB_JOIN);
1910
1911 lbs_mac_event_disconnected(priv);
1912
1913 lbs_deb_leave(LBS_DEB_JOIN);
1914 return 0;
1915}
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index c516fbe518fd..8b7336dd02a3 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -12,28 +12,18 @@ struct cmd_ds_command;
12int lbs_cmd_80211_authenticate(struct lbs_private *priv, 12int lbs_cmd_80211_authenticate(struct lbs_private *priv,
13 struct cmd_ds_command *cmd, 13 struct cmd_ds_command *cmd,
14 void *pdata_buf); 14 void *pdata_buf);
15int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv, 15
16 struct cmd_ds_command *cmd, 16int lbs_adhoc_stop(struct lbs_private *priv);
17 void *pdata_buf); 17
18int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd);
19int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
20 struct cmd_ds_command *cmd,
21 void *pdata_buf);
22int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 18int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
23 struct cmd_ds_command *cmd); 19 u8 bssid[ETH_ALEN], u16 reason);
24int lbs_cmd_80211_associate(struct lbs_private *priv, 20int lbs_cmd_80211_associate(struct lbs_private *priv,
25 struct cmd_ds_command *cmd, 21 struct cmd_ds_command *cmd,
26 void *pdata_buf); 22 void *pdata_buf);
27 23
28int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv, 24int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
29 struct cmd_ds_command *resp); 25 struct cmd_ds_command *resp);
30int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv);
31int lbs_ret_80211_disassociate(struct lbs_private *priv);
32int lbs_ret_80211_associate(struct lbs_private *priv, 26int lbs_ret_80211_associate(struct lbs_private *priv,
33 struct cmd_ds_command *resp); 27 struct cmd_ds_command *resp);
34 28
35int lbs_stop_adhoc_network(struct lbs_private *priv);
36
37int lbs_send_deauthentication(struct lbs_private *priv);
38
39#endif /* _LBS_ASSOC_H */ 29#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 75427e61898d..aee19fa844e4 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -480,181 +480,166 @@ int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
480 return ret; 480 return ret;
481} 481}
482 482
483static int lbs_cmd_802_11_reset(struct cmd_ds_command *cmd, int cmd_action) 483/**
484{ 484 * @brief Set an SNMP MIB value
485 struct cmd_ds_802_11_reset *reset = &cmd->params.reset; 485 *
486 486 * @param priv A pointer to struct lbs_private structure
487 lbs_deb_enter(LBS_DEB_CMD); 487 * @param oid The OID to set in the firmware
488 488 * @param val Value to set the OID to
489 cmd->command = cpu_to_le16(CMD_802_11_RESET); 489 *
490 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); 490 * @return 0 on success, error on failure
491 reset->action = cpu_to_le16(cmd_action); 491 */
492 492int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
493 lbs_deb_leave(LBS_DEB_CMD);
494 return 0;
495}
496
497static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv,
498 struct cmd_ds_command *cmd,
499 int cmd_action,
500 int cmd_oid, void *pdata_buf)
501{ 493{
502 struct cmd_ds_802_11_snmp_mib *pSNMPMIB = &cmd->params.smib; 494 struct cmd_ds_802_11_snmp_mib cmd;
503 u8 ucTemp; 495 int ret;
504 496
505 lbs_deb_enter(LBS_DEB_CMD); 497 lbs_deb_enter(LBS_DEB_CMD);
506 498
507 lbs_deb_cmd("SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); 499 memset(&cmd, 0, sizeof (cmd));
508 500 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
509 cmd->command = cpu_to_le16(CMD_802_11_SNMP_MIB); 501 cmd.action = cpu_to_le16(CMD_ACT_SET);
510 cmd->size = cpu_to_le16(sizeof(*pSNMPMIB) + S_DS_GEN); 502 cmd.oid = cpu_to_le16((u16) oid);
511
512 switch (cmd_oid) {
513 case OID_802_11_INFRASTRUCTURE_MODE:
514 {
515 u8 mode = (u8) (size_t) pdata_buf;
516 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
517 pSNMPMIB->oid = cpu_to_le16((u16) DESIRED_BSSTYPE_I);
518 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u8));
519 if (mode == IW_MODE_ADHOC) {
520 ucTemp = SNMP_MIB_VALUE_ADHOC;
521 } else {
522 /* Infra and Auto modes */
523 ucTemp = SNMP_MIB_VALUE_INFRA;
524 }
525
526 memmove(pSNMPMIB->value, &ucTemp, sizeof(u8));
527 503
504 switch (oid) {
505 case SNMP_MIB_OID_BSS_TYPE:
506 cmd.bufsize = cpu_to_le16(sizeof(u8));
507 cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1;
528 break; 508 break;
509 case SNMP_MIB_OID_11D_ENABLE:
510 case SNMP_MIB_OID_FRAG_THRESHOLD:
511 case SNMP_MIB_OID_RTS_THRESHOLD:
512 case SNMP_MIB_OID_SHORT_RETRY_LIMIT:
513 case SNMP_MIB_OID_LONG_RETRY_LIMIT:
514 cmd.bufsize = cpu_to_le16(sizeof(u16));
515 *((__le16 *)(&cmd.value)) = cpu_to_le16(val);
516 break;
517 default:
518 lbs_deb_cmd("SNMP_CMD: (set) unhandled OID 0x%x\n", oid);
519 ret = -EINVAL;
520 goto out;
529 } 521 }
530 522
531 case OID_802_11D_ENABLE: 523 lbs_deb_cmd("SNMP_CMD: (set) oid 0x%x, oid size 0x%x, value 0x%x\n",
532 { 524 le16_to_cpu(cmd.oid), le16_to_cpu(cmd.bufsize), val);
533 u32 ulTemp;
534
535 pSNMPMIB->oid = cpu_to_le16((u16) DOT11D_I);
536
537 if (cmd_action == CMD_ACT_SET) {
538 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
539 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
540 ulTemp = *(u32 *)pdata_buf;
541 *((__le16 *)(pSNMPMIB->value)) =
542 cpu_to_le16((u16) ulTemp);
543 }
544 break;
545 }
546
547 case OID_802_11_FRAGMENTATION_THRESHOLD:
548 {
549 u32 ulTemp;
550
551 pSNMPMIB->oid = cpu_to_le16((u16) FRAGTHRESH_I);
552
553 if (cmd_action == CMD_ACT_GET) {
554 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
555 } else if (cmd_action == CMD_ACT_SET) {
556 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
557 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
558 ulTemp = *((u32 *) pdata_buf);
559 *((__le16 *)(pSNMPMIB->value)) =
560 cpu_to_le16((u16) ulTemp);
561 525
562 } 526 ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
563 527
564 break; 528out:
565 } 529 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
530 return ret;
531}
566 532
567 case OID_802_11_RTS_THRESHOLD: 533/**
568 { 534 * @brief Get an SNMP MIB value
535 *
536 * @param priv A pointer to struct lbs_private structure
537 * @param oid The OID to retrieve from the firmware
538 * @param out_val Location for the returned value
539 *
540 * @return 0 on success, error on failure
541 */
542int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
543{
544 struct cmd_ds_802_11_snmp_mib cmd;
545 int ret;
569 546
570 u32 ulTemp; 547 lbs_deb_enter(LBS_DEB_CMD);
571 pSNMPMIB->oid = cpu_to_le16(RTSTHRESH_I);
572 548
573 if (cmd_action == CMD_ACT_GET) { 549 memset(&cmd, 0, sizeof (cmd));
574 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); 550 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
575 } else if (cmd_action == CMD_ACT_SET) { 551 cmd.action = cpu_to_le16(CMD_ACT_GET);
576 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); 552 cmd.oid = cpu_to_le16(oid);
577 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
578 ulTemp = *((u32 *)pdata_buf);
579 *(__le16 *)(pSNMPMIB->value) =
580 cpu_to_le16((u16) ulTemp);
581 553
582 } 554 ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
583 break; 555 if (ret)
584 } 556 goto out;
585 case OID_802_11_TX_RETRYCOUNT:
586 pSNMPMIB->oid = cpu_to_le16((u16) SHORT_RETRYLIM_I);
587
588 if (cmd_action == CMD_ACT_GET) {
589 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
590 } else if (cmd_action == CMD_ACT_SET) {
591 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
592 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
593 *((__le16 *)(pSNMPMIB->value)) =
594 cpu_to_le16((u16) priv->txretrycount);
595 }
596 557
558 switch (le16_to_cpu(cmd.bufsize)) {
559 case sizeof(u8):
560 if (oid == SNMP_MIB_OID_BSS_TYPE) {
561 if (cmd.value[0] == 2)
562 *out_val = IW_MODE_ADHOC;
563 else
564 *out_val = IW_MODE_INFRA;
565 } else
566 *out_val = cmd.value[0];
567 break;
568 case sizeof(u16):
569 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
597 break; 570 break;
598 default: 571 default:
572 lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n",
573 oid, le16_to_cpu(cmd.bufsize));
599 break; 574 break;
600 } 575 }
601 576
602 lbs_deb_cmd( 577out:
603 "SNMP_CMD: command=0x%x, size=0x%x, seqnum=0x%x, result=0x%x\n", 578 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
604 le16_to_cpu(cmd->command), le16_to_cpu(cmd->size), 579 return ret;
605 le16_to_cpu(cmd->seqnum), le16_to_cpu(cmd->result));
606
607 lbs_deb_cmd(
608 "SNMP_CMD: action 0x%x, oid 0x%x, oidsize 0x%x, value 0x%x\n",
609 le16_to_cpu(pSNMPMIB->querytype), le16_to_cpu(pSNMPMIB->oid),
610 le16_to_cpu(pSNMPMIB->bufsize),
611 le16_to_cpu(*(__le16 *) pSNMPMIB->value));
612
613 lbs_deb_leave(LBS_DEB_CMD);
614 return 0;
615} 580}
616 581
617static int lbs_cmd_802_11_rf_tx_power(struct cmd_ds_command *cmd, 582/**
618 u16 cmd_action, void *pdata_buf) 583 * @brief Get the min, max, and current TX power
584 *
585 * @param priv A pointer to struct lbs_private structure
586 * @param curlevel Current power level in dBm
587 * @param minlevel Minimum supported power level in dBm (optional)
588 * @param maxlevel Maximum supported power level in dBm (optional)
589 *
590 * @return 0 on success, error on failure
591 */
592int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
593 s16 *maxlevel)
619{ 594{
620 595 struct cmd_ds_802_11_rf_tx_power cmd;
621 struct cmd_ds_802_11_rf_tx_power *prtp = &cmd->params.txp; 596 int ret;
622 597
623 lbs_deb_enter(LBS_DEB_CMD); 598 lbs_deb_enter(LBS_DEB_CMD);
624 599
625 cmd->size = 600 memset(&cmd, 0, sizeof(cmd));
626 cpu_to_le16((sizeof(struct cmd_ds_802_11_rf_tx_power)) + S_DS_GEN); 601 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
627 cmd->command = cpu_to_le16(CMD_802_11_RF_TX_POWER); 602 cmd.action = cpu_to_le16(CMD_ACT_GET);
628 prtp->action = cpu_to_le16(cmd_action); 603
604 ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
605 if (ret == 0) {
606 *curlevel = le16_to_cpu(cmd.curlevel);
607 if (minlevel)
608 *minlevel = le16_to_cpu(cmd.minlevel);
609 if (maxlevel)
610 *maxlevel = le16_to_cpu(cmd.maxlevel);
611 }
629 612
630 lbs_deb_cmd("RF_TX_POWER_CMD: size:%d cmd:0x%x Act:%d\n", 613 lbs_deb_leave(LBS_DEB_CMD);
631 le16_to_cpu(cmd->size), le16_to_cpu(cmd->command), 614 return ret;
632 le16_to_cpu(prtp->action)); 615}
633 616
634 switch (cmd_action) { 617/**
635 case CMD_ACT_TX_POWER_OPT_GET: 618 * @brief Set the TX power
636 prtp->action = cpu_to_le16(CMD_ACT_GET); 619 *
637 prtp->currentlevel = 0; 620 * @param priv A pointer to struct lbs_private structure
638 break; 621 * @param dbm The desired power level in dBm
622 *
623 * @return 0 on success, error on failure
624 */
625int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
626{
627 struct cmd_ds_802_11_rf_tx_power cmd;
628 int ret;
639 629
640 case CMD_ACT_TX_POWER_OPT_SET_HIGH: 630 lbs_deb_enter(LBS_DEB_CMD);
641 prtp->action = cpu_to_le16(CMD_ACT_SET);
642 prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_HIGH);
643 break;
644 631
645 case CMD_ACT_TX_POWER_OPT_SET_MID: 632 memset(&cmd, 0, sizeof(cmd));
646 prtp->action = cpu_to_le16(CMD_ACT_SET); 633 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
647 prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_MID); 634 cmd.action = cpu_to_le16(CMD_ACT_SET);
648 break; 635 cmd.curlevel = cpu_to_le16(dbm);
649 636
650 case CMD_ACT_TX_POWER_OPT_SET_LOW: 637 lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm);
651 prtp->action = cpu_to_le16(CMD_ACT_SET); 638
652 prtp->currentlevel = cpu_to_le16(*((u16 *) pdata_buf)); 639 ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
653 break;
654 }
655 640
656 lbs_deb_leave(LBS_DEB_CMD); 641 lbs_deb_leave(LBS_DEB_CMD);
657 return 0; 642 return ret;
658} 643}
659 644
660static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd, 645static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
@@ -1033,9 +1018,9 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
1033 return ret; 1018 return ret;
1034} 1019}
1035 1020
1036int lbs_mesh_config_send(struct lbs_private *priv, 1021static int __lbs_mesh_config_send(struct lbs_private *priv,
1037 struct cmd_ds_mesh_config *cmd, 1022 struct cmd_ds_mesh_config *cmd,
1038 uint16_t action, uint16_t type) 1023 uint16_t action, uint16_t type)
1039{ 1024{
1040 int ret; 1025 int ret;
1041 1026
@@ -1054,6 +1039,19 @@ int lbs_mesh_config_send(struct lbs_private *priv,
1054 return ret; 1039 return ret;
1055} 1040}
1056 1041
1042int lbs_mesh_config_send(struct lbs_private *priv,
1043 struct cmd_ds_mesh_config *cmd,
1044 uint16_t action, uint16_t type)
1045{
1046 int ret;
1047
1048 if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
1049 return -EOPNOTSUPP;
1050
1051 ret = __lbs_mesh_config_send(priv, cmd, action, type);
1052 return ret;
1053}
1054
1057/* This function is the CMD_MESH_CONFIG legacy function. It only handles the 1055/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
1058 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG 1056 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
1059 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to 1057 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
@@ -1095,7 +1093,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1095 action, priv->mesh_tlv, chan, 1093 action, priv->mesh_tlv, chan,
1096 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len)); 1094 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len));
1097 1095
1098 return lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 1096 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1099} 1097}
1100 1098
1101static int lbs_cmd_bcn_ctrl(struct lbs_private * priv, 1099static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
@@ -1256,41 +1254,47 @@ void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
1256 priv->cur_cmd = NULL; 1254 priv->cur_cmd = NULL;
1257} 1255}
1258 1256
1259int lbs_set_radio_control(struct lbs_private *priv) 1257int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
1260{ 1258{
1261 int ret = 0;
1262 struct cmd_ds_802_11_radio_control cmd; 1259 struct cmd_ds_802_11_radio_control cmd;
1260 int ret = -EINVAL;
1263 1261
1264 lbs_deb_enter(LBS_DEB_CMD); 1262 lbs_deb_enter(LBS_DEB_CMD);
1265 1263
1266 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 1264 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1267 cmd.action = cpu_to_le16(CMD_ACT_SET); 1265 cmd.action = cpu_to_le16(CMD_ACT_SET);
1268 1266
1269 switch (priv->preamble) { 1267 /* Only v8 and below support setting the preamble */
1270 case CMD_TYPE_SHORT_PREAMBLE: 1268 if (priv->fwrelease < 0x09000000) {
1271 cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE); 1269 switch (preamble) {
1272 break; 1270 case RADIO_PREAMBLE_SHORT:
1273 1271 if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
1274 case CMD_TYPE_LONG_PREAMBLE: 1272 goto out;
1275 cmd.control = cpu_to_le16(SET_LONG_PREAMBLE); 1273 /* Fall through */
1276 break; 1274 case RADIO_PREAMBLE_AUTO:
1275 case RADIO_PREAMBLE_LONG:
1276 cmd.control = cpu_to_le16(preamble);
1277 break;
1278 default:
1279 goto out;
1280 }
1281 }
1277 1282
1278 case CMD_TYPE_AUTO_PREAMBLE: 1283 if (radio_on)
1279 default: 1284 cmd.control |= cpu_to_le16(0x1);
1280 cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE); 1285 else {
1281 break; 1286 cmd.control &= cpu_to_le16(~0x1);
1287 priv->txpower_cur = 0;
1282 } 1288 }
1283 1289
1284 if (priv->radioon) 1290 lbs_deb_cmd("RADIO_CONTROL: radio %s, preamble %d\n",
1285 cmd.control |= cpu_to_le16(TURN_ON_RF); 1291 radio_on ? "ON" : "OFF", preamble);
1286 else
1287 cmd.control &= cpu_to_le16(~TURN_ON_RF);
1288 1292
1289 lbs_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon, 1293 priv->radio_on = radio_on;
1290 priv->preamble);
1291 1294
1292 ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); 1295 ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
1293 1296
1297out:
1294 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 1298 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
1295 return ret; 1299 return ret;
1296} 1300}
@@ -1380,55 +1384,25 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1380 ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf); 1384 ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf);
1381 break; 1385 break;
1382 1386
1383 case CMD_802_11_DEAUTHENTICATE:
1384 ret = lbs_cmd_80211_deauthenticate(priv, cmdptr);
1385 break;
1386
1387 case CMD_802_11_AD_HOC_START:
1388 ret = lbs_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf);
1389 break;
1390
1391 case CMD_802_11_RESET:
1392 ret = lbs_cmd_802_11_reset(cmdptr, cmd_action);
1393 break;
1394
1395 case CMD_802_11_AUTHENTICATE: 1387 case CMD_802_11_AUTHENTICATE:
1396 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf); 1388 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
1397 break; 1389 break;
1398 1390
1399 case CMD_802_11_SNMP_MIB:
1400 ret = lbs_cmd_802_11_snmp_mib(priv, cmdptr,
1401 cmd_action, cmd_oid, pdata_buf);
1402 break;
1403
1404 case CMD_MAC_REG_ACCESS: 1391 case CMD_MAC_REG_ACCESS:
1405 case CMD_BBP_REG_ACCESS: 1392 case CMD_BBP_REG_ACCESS:
1406 case CMD_RF_REG_ACCESS: 1393 case CMD_RF_REG_ACCESS:
1407 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf); 1394 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf);
1408 break; 1395 break;
1409 1396
1410 case CMD_802_11_RF_TX_POWER:
1411 ret = lbs_cmd_802_11_rf_tx_power(cmdptr,
1412 cmd_action, pdata_buf);
1413 break;
1414
1415 case CMD_802_11_MONITOR_MODE: 1397 case CMD_802_11_MONITOR_MODE:
1416 ret = lbs_cmd_802_11_monitor_mode(cmdptr, 1398 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
1417 cmd_action, pdata_buf); 1399 cmd_action, pdata_buf);
1418 break; 1400 break;
1419 1401
1420 case CMD_802_11_AD_HOC_JOIN:
1421 ret = lbs_cmd_80211_ad_hoc_join(priv, cmdptr, pdata_buf);
1422 break;
1423
1424 case CMD_802_11_RSSI: 1402 case CMD_802_11_RSSI:
1425 ret = lbs_cmd_802_11_rssi(priv, cmdptr); 1403 ret = lbs_cmd_802_11_rssi(priv, cmdptr);
1426 break; 1404 break;
1427 1405
1428 case CMD_802_11_AD_HOC_STOP:
1429 ret = lbs_cmd_80211_ad_hoc_stop(cmdptr);
1430 break;
1431
1432 case CMD_802_11_SET_AFC: 1406 case CMD_802_11_SET_AFC:
1433 case CMD_802_11_GET_AFC: 1407 case CMD_802_11_GET_AFC:
1434 1408
@@ -1953,6 +1927,70 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
1953} 1927}
1954 1928
1955 1929
1930/**
1931 * @brief Configures the transmission power control functionality.
1932 *
1933 * @param priv A pointer to struct lbs_private structure
1934 * @param enable Transmission power control enable
1935 * @param p0 Power level when link quality is good (dBm).
1936 * @param p1 Power level when link quality is fair (dBm).
1937 * @param p2 Power level when link quality is poor (dBm).
1938 * @param usesnr Use Signal to Noise Ratio in TPC
1939 *
1940 * @return 0 on success
1941 */
1942int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
1943 int8_t p2, int usesnr)
1944{
1945 struct cmd_ds_802_11_tpc_cfg cmd;
1946 int ret;
1947
1948 memset(&cmd, 0, sizeof(cmd));
1949 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1950 cmd.action = cpu_to_le16(CMD_ACT_SET);
1951 cmd.enable = !!enable;
1952 cmd.usesnr = !!enable;
1953 cmd.P0 = p0;
1954 cmd.P1 = p1;
1955 cmd.P2 = p2;
1956
1957 ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd);
1958
1959 return ret;
1960}
1961
1962/**
1963 * @brief Configures the power adaptation settings.
1964 *
1965 * @param priv A pointer to struct lbs_private structure
1966 * @param enable Power adaptation enable
1967 * @param p0 Power level for 1, 2, 5.5 and 11 Mbps (dBm).
1968 * @param p1 Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
1969 * @param p2 Power level for 48 and 54 Mbps (dBm).
1970 *
1971 * @return 0 on Success
1972 */
1973
1974int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
1975 int8_t p1, int8_t p2)
1976{
1977 struct cmd_ds_802_11_pa_cfg cmd;
1978 int ret;
1979
1980 memset(&cmd, 0, sizeof(cmd));
1981 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1982 cmd.action = cpu_to_le16(CMD_ACT_SET);
1983 cmd.enable = !!enable;
1984 cmd.P0 = p0;
1985 cmd.P1 = p1;
1986 cmd.P2 = p2;
1987
1988 ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd);
1989
1990 return ret;
1991}
1992
1993
1956static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, 1994static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
1957 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, 1995 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
1958 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1996 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index a53b51f8bdb4..d002160f597d 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -26,6 +26,12 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
27 unsigned long callback_arg); 27 unsigned long callback_arg);
28 28
29int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
30 int8_t p1, int8_t p2);
31
32int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
33 int8_t p2, int usesnr);
34
29int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra, 35int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
30 struct cmd_header *resp); 36 struct cmd_header *resp);
31 37
@@ -61,4 +67,14 @@ int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
61int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, 67int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
62 struct assoc_request *assoc); 68 struct assoc_request *assoc);
63 69
70int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
71 s16 *maxlevel);
72int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
73
74int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
75
76int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
77
78int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
79
64#endif /* _LBS_CMD_H */ 80#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 24de3c3cf877..bcf2a9756fb6 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -146,63 +146,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
146 return ret; 146 return ret;
147} 147}
148 148
149static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv,
150 struct cmd_ds_command *resp)
151{
152 struct cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
153 u16 oid = le16_to_cpu(smib->oid);
154 u16 querytype = le16_to_cpu(smib->querytype);
155
156 lbs_deb_enter(LBS_DEB_CMD);
157
158 lbs_deb_cmd("SNMP_RESP: oid 0x%x, querytype 0x%x\n", oid,
159 querytype);
160 lbs_deb_cmd("SNMP_RESP: Buf size %d\n", le16_to_cpu(smib->bufsize));
161
162 if (querytype == CMD_ACT_GET) {
163 switch (oid) {
164 case FRAGTHRESH_I:
165 priv->fragthsd =
166 le16_to_cpu(*((__le16 *)(smib->value)));
167 lbs_deb_cmd("SNMP_RESP: frag threshold %u\n",
168 priv->fragthsd);
169 break;
170 case RTSTHRESH_I:
171 priv->rtsthsd =
172 le16_to_cpu(*((__le16 *)(smib->value)));
173 lbs_deb_cmd("SNMP_RESP: rts threshold %u\n",
174 priv->rtsthsd);
175 break;
176 case SHORT_RETRYLIM_I:
177 priv->txretrycount =
178 le16_to_cpu(*((__le16 *)(smib->value)));
179 lbs_deb_cmd("SNMP_RESP: tx retry count %u\n",
180 priv->rtsthsd);
181 break;
182 default:
183 break;
184 }
185 }
186
187 lbs_deb_enter(LBS_DEB_CMD);
188 return 0;
189}
190
191static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
192 struct cmd_ds_command *resp)
193{
194 struct cmd_ds_802_11_rf_tx_power *rtp = &resp->params.txp;
195
196 lbs_deb_enter(LBS_DEB_CMD);
197
198 priv->txpowerlevel = le16_to_cpu(rtp->currentlevel);
199
200 lbs_deb_cmd("TX power currently %d\n", priv->txpowerlevel);
201
202 lbs_deb_leave(LBS_DEB_CMD);
203 return 0;
204}
205
206static int lbs_ret_802_11_rssi(struct lbs_private *priv, 149static int lbs_ret_802_11_rssi(struct lbs_private *priv,
207 struct cmd_ds_command *resp) 150 struct cmd_ds_command *resp)
208{ 151{
@@ -273,24 +216,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
273 ret = lbs_ret_80211_associate(priv, resp); 216 ret = lbs_ret_80211_associate(priv, resp);
274 break; 217 break;
275 218
276 case CMD_RET(CMD_802_11_DISASSOCIATE):
277 case CMD_RET(CMD_802_11_DEAUTHENTICATE):
278 ret = lbs_ret_80211_disassociate(priv);
279 break;
280
281 case CMD_RET(CMD_802_11_AD_HOC_START):
282 case CMD_RET(CMD_802_11_AD_HOC_JOIN):
283 ret = lbs_ret_80211_ad_hoc_start(priv, resp);
284 break;
285
286 case CMD_RET(CMD_802_11_SNMP_MIB):
287 ret = lbs_ret_802_11_snmp_mib(priv, resp);
288 break;
289
290 case CMD_RET(CMD_802_11_RF_TX_POWER):
291 ret = lbs_ret_802_11_rf_tx_power(priv, resp);
292 break;
293
294 case CMD_RET(CMD_802_11_SET_AFC): 219 case CMD_RET(CMD_802_11_SET_AFC):
295 case CMD_RET(CMD_802_11_GET_AFC): 220 case CMD_RET(CMD_802_11_GET_AFC):
296 spin_lock_irqsave(&priv->driver_lock, flags); 221 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -300,7 +225,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
300 225
301 break; 226 break;
302 227
303 case CMD_RET(CMD_802_11_RESET):
304 case CMD_RET(CMD_802_11_AUTHENTICATE): 228 case CMD_RET(CMD_802_11_AUTHENTICATE):
305 case CMD_RET(CMD_802_11_BEACON_STOP): 229 case CMD_RET(CMD_802_11_BEACON_STOP):
306 break; 230 break;
@@ -309,10 +233,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
309 ret = lbs_ret_802_11_rssi(priv, resp); 233 ret = lbs_ret_802_11_rssi(priv, resp);
310 break; 234 break;
311 235
312 case CMD_RET(CMD_802_11_AD_HOC_STOP):
313 ret = lbs_ret_80211_ad_hoc_stop(priv);
314 break;
315
316 case CMD_RET(CMD_802_11D_DOMAIN_INFO): 236 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
317 ret = lbs_ret_802_11d_domain_info(resp); 237 ret = lbs_ret_802_11d_domain_info(resp);
318 break; 238 break;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index a8ac974dacac..1a8888cceadc 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -34,7 +34,6 @@ int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_queue_event(struct lbs_private *priv, u32 event); 34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); 35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36 36
37int lbs_set_radio_control(struct lbs_private *priv);
38u32 lbs_fw_index_to_data_rate(u8 index); 37u32 lbs_fw_index_to_data_rate(u8 index);
39u8 lbs_data_rate_to_fw_index(u32 rate); 38u8 lbs_data_rate_to_fw_index(u32 rate);
40 39
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 12e687550bce..58d11a35e61b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -189,6 +189,15 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
189#define MRVDRV_CMD_UPLD_RDY 0x0008 189#define MRVDRV_CMD_UPLD_RDY 0x0008
190#define MRVDRV_CARDEVENT 0x0010 190#define MRVDRV_CARDEVENT 0x0010
191 191
192
193/* Automatic TX control default levels */
194#define POW_ADAPT_DEFAULT_P0 13
195#define POW_ADAPT_DEFAULT_P1 15
196#define POW_ADAPT_DEFAULT_P2 18
197#define TPC_DEFAULT_P0 5
198#define TPC_DEFAULT_P1 10
199#define TPC_DEFAULT_P2 13
200
192/** TxPD status */ 201/** TxPD status */
193 202
194/* Station firmware use TxPD status field to report final Tx transmit 203/* Station firmware use TxPD status field to report final Tx transmit
@@ -243,6 +252,9 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
243 252
244#define CMD_F_HOSTCMD (1 << 0) 253#define CMD_F_HOSTCMD (1 << 0)
245#define FW_CAPINFO_WPA (1 << 0) 254#define FW_CAPINFO_WPA (1 << 0)
255#define FW_CAPINFO_FIRMWARE_UPGRADE (1 << 13)
256#define FW_CAPINFO_BOOT2_UPGRADE (1<<14)
257#define FW_CAPINFO_PERSISTENT_CONFIG (1<<15)
246 258
247#define KEY_LEN_WPA_AES 16 259#define KEY_LEN_WPA_AES 16
248#define KEY_LEN_WPA_TKIP 32 260#define KEY_LEN_WPA_TKIP 32
@@ -316,7 +328,8 @@ enum PS_STATE {
316enum DNLD_STATE { 328enum DNLD_STATE {
317 DNLD_RES_RECEIVED, 329 DNLD_RES_RECEIVED,
318 DNLD_DATA_SENT, 330 DNLD_DATA_SENT,
319 DNLD_CMD_SENT 331 DNLD_CMD_SENT,
332 DNLD_BOOTCMD_SENT,
320}; 333};
321 334
322/** LBS_MEDIA_STATE */ 335/** LBS_MEDIA_STATE */
@@ -339,27 +352,6 @@ enum mv_ms_type {
339 MVMS_EVENT 352 MVMS_EVENT
340}; 353};
341 354
342/** SNMP_MIB_INDEX_e */
343enum SNMP_MIB_INDEX_e {
344 DESIRED_BSSTYPE_I = 0,
345 OP_RATESET_I,
346 BCNPERIOD_I,
347 DTIMPERIOD_I,
348 ASSOCRSP_TIMEOUT_I,
349 RTSTHRESH_I,
350 SHORT_RETRYLIM_I,
351 LONG_RETRYLIM_I,
352 FRAGTHRESH_I,
353 DOT11D_I,
354 DOT11H_I,
355 MANUFID_I,
356 PRODID_I,
357 MANUF_OUI_I,
358 MANUF_NAME_I,
359 MANUF_PRODNAME_I,
360 MANUF_PRODVER_I,
361};
362
363/** KEY_TYPE_ID */ 355/** KEY_TYPE_ID */
364enum KEY_TYPE_ID { 356enum KEY_TYPE_ID {
365 KEY_TYPE_ID_WEP = 0, 357 KEY_TYPE_ID_WEP = 0,
@@ -374,12 +366,6 @@ enum KEY_INFO_WPA {
374 KEY_INFO_WPA_ENABLED = 0x04 366 KEY_INFO_WPA_ENABLED = 0x04
375}; 367};
376 368
377/** SNMP_MIB_VALUE_e */
378enum SNMP_MIB_VALUE_e {
379 SNMP_MIB_VALUE_INFRA = 1,
380 SNMP_MIB_VALUE_ADHOC
381};
382
383/* Default values for fwt commands. */ 369/* Default values for fwt commands. */
384#define FWT_DEFAULT_METRIC 0 370#define FWT_DEFAULT_METRIC 0
385#define FWT_DEFAULT_DIR 1 371#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f5bb40c54d85..acb889e25900 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -240,9 +240,6 @@ struct lbs_private {
240 uint16_t enablehwauto; 240 uint16_t enablehwauto;
241 uint16_t ratebitmap; 241 uint16_t ratebitmap;
242 242
243 u32 fragthsd;
244 u32 rtsthsd;
245
246 u8 txretrycount; 243 u8 txretrycount;
247 244
248 /** Tx-related variables (for single packet tx) */ 245 /** Tx-related variables (for single packet tx) */
@@ -253,7 +250,9 @@ struct lbs_private {
253 u32 connect_status; 250 u32 connect_status;
254 u32 mesh_connect_status; 251 u32 mesh_connect_status;
255 u16 regioncode; 252 u16 regioncode;
256 u16 txpowerlevel; 253 s16 txpower_cur;
254 s16 txpower_min;
255 s16 txpower_max;
257 256
258 /** POWER MANAGEMENT AND PnP SUPPORT */ 257 /** POWER MANAGEMENT AND PnP SUPPORT */
259 u8 surpriseremoved; 258 u8 surpriseremoved;
@@ -291,8 +290,7 @@ struct lbs_private {
291 u16 nextSNRNF; 290 u16 nextSNRNF;
292 u16 numSNRNF; 291 u16 numSNRNF;
293 292
294 u8 radioon; 293 u8 radio_on;
295 u32 preamble;
296 294
297 /** data rate stuff */ 295 /** data rate stuff */
298 u8 cur_rate; 296 u8 cur_rate;
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index c92e41b4faf4..5004d7679c02 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -9,17 +9,6 @@
9#define DEFAULT_AD_HOC_CHANNEL 6 9#define DEFAULT_AD_HOC_CHANNEL 6
10#define DEFAULT_AD_HOC_CHANNEL_A 36 10#define DEFAULT_AD_HOC_CHANNEL_A 36
11 11
12/** IEEE 802.11 oids */
13#define OID_802_11_SSID 0x00008002
14#define OID_802_11_INFRASTRUCTURE_MODE 0x00008008
15#define OID_802_11_FRAGMENTATION_THRESHOLD 0x00008009
16#define OID_802_11_RTS_THRESHOLD 0x0000800A
17#define OID_802_11_TX_ANTENNA_SELECTED 0x0000800D
18#define OID_802_11_SUPPORTED_RATES 0x0000800E
19#define OID_802_11_STATISTICS 0x00008012
20#define OID_802_11_TX_RETRYCOUNT 0x0000801D
21#define OID_802_11D_ENABLE 0x00008020
22
23#define CMD_OPTION_WAITFORRSP 0x0002 12#define CMD_OPTION_WAITFORRSP 0x0002
24 13
25/** Host command IDs */ 14/** Host command IDs */
@@ -61,7 +50,6 @@
61#define CMD_RF_REG_MAP 0x0023 50#define CMD_RF_REG_MAP 0x0023
62#define CMD_802_11_DEAUTHENTICATE 0x0024 51#define CMD_802_11_DEAUTHENTICATE 0x0024
63#define CMD_802_11_REASSOCIATE 0x0025 52#define CMD_802_11_REASSOCIATE 0x0025
64#define CMD_802_11_DISASSOCIATE 0x0026
65#define CMD_MAC_CONTROL 0x0028 53#define CMD_MAC_CONTROL 0x0028
66#define CMD_802_11_AD_HOC_START 0x002b 54#define CMD_802_11_AD_HOC_START 0x002b
67#define CMD_802_11_AD_HOC_JOIN 0x002c 55#define CMD_802_11_AD_HOC_JOIN 0x002c
@@ -84,6 +72,7 @@
84#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 72#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
85#define CMD_802_11_SLEEP_PERIOD 0x0068 73#define CMD_802_11_SLEEP_PERIOD 0x0068
86#define CMD_802_11_TPC_CFG 0x0072 74#define CMD_802_11_TPC_CFG 0x0072
75#define CMD_802_11_PA_CFG 0x0073
87#define CMD_802_11_FW_WAKE_METHOD 0x0074 76#define CMD_802_11_FW_WAKE_METHOD 0x0074
88#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 77#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
89#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 78#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
@@ -153,11 +142,6 @@
153#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 142#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
154#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 143#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
155 144
156/* Define action or option for CMD_802_11_RADIO_CONTROL */
157#define CMD_TYPE_AUTO_PREAMBLE 0x0001
158#define CMD_TYPE_SHORT_PREAMBLE 0x0002
159#define CMD_TYPE_LONG_PREAMBLE 0x0003
160
161/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ 145/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
162#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 146#define CMD_SUBSCRIBE_RSSI_LOW 0x0001
163#define CMD_SUBSCRIBE_SNR_LOW 0x0002 147#define CMD_SUBSCRIBE_SNR_LOW 0x0002
@@ -166,28 +150,14 @@
166#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 150#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010
167#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 151#define CMD_SUBSCRIBE_SNR_HIGH 0x0020
168 152
169#define TURN_ON_RF 0x01 153#define RADIO_PREAMBLE_LONG 0x00
170#define RADIO_ON 0x01 154#define RADIO_PREAMBLE_SHORT 0x02
171#define RADIO_OFF 0x00 155#define RADIO_PREAMBLE_AUTO 0x04
172
173#define SET_AUTO_PREAMBLE 0x05
174#define SET_SHORT_PREAMBLE 0x03
175#define SET_LONG_PREAMBLE 0x01
176 156
177/* Define action or option for CMD_802_11_RF_CHANNEL */ 157/* Define action or option for CMD_802_11_RF_CHANNEL */
178#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 158#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
179#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 159#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
180 160
181/* Define action or option for CMD_802_11_RF_TX_POWER */
182#define CMD_ACT_TX_POWER_OPT_GET 0x0000
183#define CMD_ACT_TX_POWER_OPT_SET_HIGH 0x8007
184#define CMD_ACT_TX_POWER_OPT_SET_MID 0x8004
185#define CMD_ACT_TX_POWER_OPT_SET_LOW 0x8000
186
187#define CMD_ACT_TX_POWER_INDEX_HIGH 0x0007
188#define CMD_ACT_TX_POWER_INDEX_MID 0x0004
189#define CMD_ACT_TX_POWER_INDEX_LOW 0x0000
190
191/* Define action or option for CMD_802_11_DATA_RATE */ 161/* Define action or option for CMD_802_11_DATA_RATE */
192#define CMD_ACT_SET_TX_AUTO 0x0000 162#define CMD_ACT_SET_TX_AUTO 0x0000
193#define CMD_ACT_SET_TX_FIX_RATE 0x0001 163#define CMD_ACT_SET_TX_FIX_RATE 0x0001
@@ -210,6 +180,19 @@
210#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 180#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
211#define CMD_WAKE_METHOD_GPIO 0x0002 181#define CMD_WAKE_METHOD_GPIO 0x0002
212 182
183/* Object IDs for CMD_802_11_SNMP_MIB */
184#define SNMP_MIB_OID_BSS_TYPE 0x0000
185#define SNMP_MIB_OID_OP_RATE_SET 0x0001
186#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */
187#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */
188#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */
189#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005
190#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006
191#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007
192#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008
193#define SNMP_MIB_OID_11D_ENABLE 0x0009
194#define SNMP_MIB_OID_11H_ENABLE 0x000A
195
213/* Define action or option for CMD_BT_ACCESS */ 196/* Define action or option for CMD_BT_ACCESS */
214enum cmd_bt_access_opts { 197enum cmd_bt_access_opts {
215 /* The bt commands start at 5 instead of 1 because the old dft commands 198 /* The bt commands start at 5 instead of 1 because the old dft commands
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 913b480211a9..d9f9a12a739e 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -151,10 +151,6 @@ struct cmd_ds_get_hw_spec {
151 __le32 fwcapinfo; 151 __le32 fwcapinfo;
152} __attribute__ ((packed)); 152} __attribute__ ((packed));
153 153
154struct cmd_ds_802_11_reset {
155 __le16 action;
156};
157
158struct cmd_ds_802_11_subscribe_event { 154struct cmd_ds_802_11_subscribe_event {
159 struct cmd_header hdr; 155 struct cmd_header hdr;
160 156
@@ -232,7 +228,9 @@ struct cmd_ds_802_11_authenticate {
232}; 228};
233 229
234struct cmd_ds_802_11_deauthenticate { 230struct cmd_ds_802_11_deauthenticate {
235 u8 macaddr[6]; 231 struct cmd_header hdr;
232
233 u8 macaddr[ETH_ALEN];
236 __le16 reasoncode; 234 __le16 reasoncode;
237}; 235};
238 236
@@ -251,20 +249,10 @@ struct cmd_ds_802_11_associate {
251#endif 249#endif
252} __attribute__ ((packed)); 250} __attribute__ ((packed));
253 251
254struct cmd_ds_802_11_disassociate {
255 u8 destmacaddr[6];
256 __le16 reasoncode;
257};
258
259struct cmd_ds_802_11_associate_rsp { 252struct cmd_ds_802_11_associate_rsp {
260 struct ieeetypes_assocrsp assocRsp; 253 struct ieeetypes_assocrsp assocRsp;
261}; 254};
262 255
263struct cmd_ds_802_11_ad_hoc_result {
264 u8 pad[3];
265 u8 bssid[ETH_ALEN];
266};
267
268struct cmd_ds_802_11_set_wep { 256struct cmd_ds_802_11_set_wep {
269 struct cmd_header hdr; 257 struct cmd_header hdr;
270 258
@@ -309,7 +297,9 @@ struct cmd_ds_802_11_get_stat {
309}; 297};
310 298
311struct cmd_ds_802_11_snmp_mib { 299struct cmd_ds_802_11_snmp_mib {
312 __le16 querytype; 300 struct cmd_header hdr;
301
302 __le16 action;
313 __le16 oid; 303 __le16 oid;
314 __le16 bufsize; 304 __le16 bufsize;
315 u8 value[128]; 305 u8 value[128];
@@ -435,8 +425,12 @@ struct cmd_ds_802_11_mac_address {
435}; 425};
436 426
437struct cmd_ds_802_11_rf_tx_power { 427struct cmd_ds_802_11_rf_tx_power {
428 struct cmd_header hdr;
429
438 __le16 action; 430 __le16 action;
439 __le16 currentlevel; 431 __le16 curlevel;
432 s8 maxlevel;
433 s8 minlevel;
440}; 434};
441 435
442struct cmd_ds_802_11_rf_antenna { 436struct cmd_ds_802_11_rf_antenna {
@@ -507,10 +501,12 @@ struct cmd_ds_802_11_rate_adapt_rateset {
507}; 501};
508 502
509struct cmd_ds_802_11_ad_hoc_start { 503struct cmd_ds_802_11_ad_hoc_start {
504 struct cmd_header hdr;
505
510 u8 ssid[IW_ESSID_MAX_SIZE]; 506 u8 ssid[IW_ESSID_MAX_SIZE];
511 u8 bsstype; 507 u8 bsstype;
512 __le16 beaconperiod; 508 __le16 beaconperiod;
513 u8 dtimperiod; 509 u8 dtimperiod; /* Reserved on v9 and later */
514 union IEEEtypes_ssparamset ssparamset; 510 union IEEEtypes_ssparamset ssparamset;
515 union ieeetypes_phyparamset phyparamset; 511 union ieeetypes_phyparamset phyparamset;
516 __le16 probedelay; 512 __le16 probedelay;
@@ -519,9 +515,16 @@ struct cmd_ds_802_11_ad_hoc_start {
519 u8 tlv_memory_size_pad[100]; 515 u8 tlv_memory_size_pad[100];
520} __attribute__ ((packed)); 516} __attribute__ ((packed));
521 517
518struct cmd_ds_802_11_ad_hoc_result {
519 struct cmd_header hdr;
520
521 u8 pad[3];
522 u8 bssid[ETH_ALEN];
523};
524
522struct adhoc_bssdesc { 525struct adhoc_bssdesc {
523 u8 bssid[6]; 526 u8 bssid[ETH_ALEN];
524 u8 ssid[32]; 527 u8 ssid[IW_ESSID_MAX_SIZE];
525 u8 type; 528 u8 type;
526 __le16 beaconperiod; 529 __le16 beaconperiod;
527 u8 dtimperiod; 530 u8 dtimperiod;
@@ -539,10 +542,15 @@ struct adhoc_bssdesc {
539} __attribute__ ((packed)); 542} __attribute__ ((packed));
540 543
541struct cmd_ds_802_11_ad_hoc_join { 544struct cmd_ds_802_11_ad_hoc_join {
545 struct cmd_header hdr;
546
542 struct adhoc_bssdesc bss; 547 struct adhoc_bssdesc bss;
543 __le16 failtimeout; 548 __le16 failtimeout; /* Reserved on v9 and later */
544 __le16 probedelay; 549 __le16 probedelay; /* Reserved on v9 and later */
550} __attribute__ ((packed));
545 551
552struct cmd_ds_802_11_ad_hoc_stop {
553 struct cmd_header hdr;
546} __attribute__ ((packed)); 554} __attribute__ ((packed));
547 555
548struct cmd_ds_802_11_enable_rsn { 556struct cmd_ds_802_11_enable_rsn {
@@ -597,14 +605,28 @@ struct cmd_ds_802_11_eeprom_access {
597} __attribute__ ((packed)); 605} __attribute__ ((packed));
598 606
599struct cmd_ds_802_11_tpc_cfg { 607struct cmd_ds_802_11_tpc_cfg {
608 struct cmd_header hdr;
609
600 __le16 action; 610 __le16 action;
601 u8 enable; 611 uint8_t enable;
602 s8 P0; 612 int8_t P0;
603 s8 P1; 613 int8_t P1;
604 s8 P2; 614 int8_t P2;
605 u8 usesnr; 615 uint8_t usesnr;
606} __attribute__ ((packed)); 616} __attribute__ ((packed));
607 617
618
619struct cmd_ds_802_11_pa_cfg {
620 struct cmd_header hdr;
621
622 __le16 action;
623 uint8_t enable;
624 int8_t P0;
625 int8_t P1;
626 int8_t P2;
627} __attribute__ ((packed));
628
629
608struct cmd_ds_802_11_led_ctrl { 630struct cmd_ds_802_11_led_ctrl {
609 __le16 action; 631 __le16 action;
610 __le16 numled; 632 __le16 numled;
@@ -693,21 +715,13 @@ struct cmd_ds_command {
693 union { 715 union {
694 struct cmd_ds_802_11_ps_mode psmode; 716 struct cmd_ds_802_11_ps_mode psmode;
695 struct cmd_ds_802_11_associate associate; 717 struct cmd_ds_802_11_associate associate;
696 struct cmd_ds_802_11_deauthenticate deauth;
697 struct cmd_ds_802_11_ad_hoc_start ads;
698 struct cmd_ds_802_11_reset reset;
699 struct cmd_ds_802_11_ad_hoc_result result;
700 struct cmd_ds_802_11_authenticate auth; 718 struct cmd_ds_802_11_authenticate auth;
701 struct cmd_ds_802_11_get_stat gstat; 719 struct cmd_ds_802_11_get_stat gstat;
702 struct cmd_ds_802_3_get_stat gstat_8023; 720 struct cmd_ds_802_3_get_stat gstat_8023;
703 struct cmd_ds_802_11_snmp_mib smib;
704 struct cmd_ds_802_11_rf_tx_power txp;
705 struct cmd_ds_802_11_rf_antenna rant; 721 struct cmd_ds_802_11_rf_antenna rant;
706 struct cmd_ds_802_11_monitor_mode monitor; 722 struct cmd_ds_802_11_monitor_mode monitor;
707 struct cmd_ds_802_11_ad_hoc_join adj;
708 struct cmd_ds_802_11_rssi rssi; 723 struct cmd_ds_802_11_rssi rssi;
709 struct cmd_ds_802_11_rssi_rsp rssirsp; 724 struct cmd_ds_802_11_rssi_rsp rssirsp;
710 struct cmd_ds_802_11_disassociate dassociate;
711 struct cmd_ds_mac_reg_access macreg; 725 struct cmd_ds_mac_reg_access macreg;
712 struct cmd_ds_bbp_reg_access bbpreg; 726 struct cmd_ds_bbp_reg_access bbpreg;
713 struct cmd_ds_rf_reg_access rfreg; 727 struct cmd_ds_rf_reg_access rfreg;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 04d7a251e3f0..e3505c110af6 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -595,7 +595,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
595 if (ret < 0) { 595 if (ret < 0) {
596 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 596 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
597 sent, ret); 597 sent, ret);
598 goto done; 598 goto err_release;
599 } 599 }
600 600
601 if (count == 0) 601 if (count == 0)
@@ -604,9 +604,8 @@ static int if_cs_prog_helper(struct if_cs_card *card)
604 sent += count; 604 sent += count;
605 } 605 }
606 606
607err_release:
607 release_firmware(fw); 608 release_firmware(fw);
608 ret = 0;
609
610done: 609done:
611 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 610 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
612 return ret; 611 return ret;
@@ -676,14 +675,8 @@ static int if_cs_prog_real(struct if_cs_card *card)
676 } 675 }
677 676
678 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); 677 ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a);
679 if (ret < 0) { 678 if (ret < 0)
680 lbs_pr_err("firmware download failed\n"); 679 lbs_pr_err("firmware download failed\n");
681 goto err_release;
682 }
683
684 ret = 0;
685 goto done;
686
687 680
688err_release: 681err_release:
689 release_firmware(fw); 682 release_firmware(fw);
@@ -720,7 +713,7 @@ static int if_cs_host_to_card(struct lbs_private *priv,
720 ret = if_cs_send_cmd(priv, buf, nb); 713 ret = if_cs_send_cmd(priv, buf, nb);
721 break; 714 break;
722 default: 715 default:
723 lbs_pr_err("%s: unsupported type %d\n", __FUNCTION__, type); 716 lbs_pr_err("%s: unsupported type %d\n", __func__, type);
724 } 717 }
725 718
726 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 719 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 632c291404ab..cafbccb74143 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -39,7 +39,10 @@ MODULE_DEVICE_TABLE(usb, if_usb_table);
39 39
40static void if_usb_receive(struct urb *urb); 40static void if_usb_receive(struct urb *urb);
41static void if_usb_receive_fwload(struct urb *urb); 41static void if_usb_receive_fwload(struct urb *urb);
42static int if_usb_prog_firmware(struct if_usb_card *cardp); 42static int __if_usb_prog_firmware(struct if_usb_card *cardp,
43 const char *fwname, int cmd);
44static int if_usb_prog_firmware(struct if_usb_card *cardp,
45 const char *fwname, int cmd);
43static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 46static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
44 uint8_t *payload, uint16_t nb); 47 uint8_t *payload, uint16_t nb);
45static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 48static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -48,6 +51,62 @@ static void if_usb_free(struct if_usb_card *cardp);
48static int if_usb_submit_rx_urb(struct if_usb_card *cardp); 51static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
49static int if_usb_reset_device(struct if_usb_card *cardp); 52static int if_usb_reset_device(struct if_usb_card *cardp);
50 53
54/* sysfs hooks */
55
56/**
57 * Set function to write firmware to device's persistent memory
58 */
59static ssize_t if_usb_firmware_set(struct device *dev,
60 struct device_attribute *attr, const char *buf, size_t count)
61{
62 struct lbs_private *priv = to_net_dev(dev)->priv;
63 struct if_usb_card *cardp = priv->card;
64 char fwname[FIRMWARE_NAME_MAX];
65 int ret;
66
67 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
68 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
69 if (ret == 0)
70 return count;
71
72 return ret;
73}
74
75/**
76 * lbs_flash_fw attribute to be exported per ethX interface through sysfs
77 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to
78 * the device's persistent memory:
79 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
80 */
81static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
82
83/**
84 * Set function to write firmware to device's persistent memory
85 */
86static ssize_t if_usb_boot2_set(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count)
88{
89 struct lbs_private *priv = to_net_dev(dev)->priv;
90 struct if_usb_card *cardp = priv->card;
91 char fwname[FIRMWARE_NAME_MAX];
92 int ret;
93
94 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
95 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
96 if (ret == 0)
97 return count;
98
99 return ret;
100}
101
102/**
103 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
104 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware
105 * to the device's persistent memory:
106 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
107 */
108static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
109
51/** 110/**
52 * @brief call back function to handle the status of the URB 111 * @brief call back function to handle the status of the URB
53 * @param urb pointer to urb structure 112 * @param urb pointer to urb structure
@@ -66,10 +125,10 @@ static void if_usb_write_bulk_callback(struct urb *urb)
66 lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n", 125 lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
67 urb->actual_length); 126 urb->actual_length);
68 127
69 /* Used for both firmware TX and regular TX. priv isn't 128 /* Boot commands such as UPDATE_FW and UPDATE_BOOT2 are not
70 * valid at firmware load time. 129 * passed up to the lbs level.
71 */ 130 */
72 if (priv) 131 if (priv && priv->dnld_sent != DNLD_BOOTCMD_SENT)
73 lbs_host_to_card_done(priv); 132 lbs_host_to_card_done(priv);
74 } else { 133 } else {
75 /* print the failure status number for debug */ 134 /* print the failure status number for debug */
@@ -231,7 +290,7 @@ static int if_usb_probe(struct usb_interface *intf,
231 } 290 }
232 291
233 /* Upload firmware */ 292 /* Upload firmware */
234 if (if_usb_prog_firmware(cardp)) { 293 if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
235 lbs_deb_usbd(&udev->dev, "FW upload failed\n"); 294 lbs_deb_usbd(&udev->dev, "FW upload failed\n");
236 goto err_prog_firmware; 295 goto err_prog_firmware;
237 } 296 }
@@ -260,6 +319,12 @@ static int if_usb_probe(struct usb_interface *intf,
260 usb_get_dev(udev); 319 usb_get_dev(udev);
261 usb_set_intfdata(intf, cardp); 320 usb_set_intfdata(intf, cardp);
262 321
322 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw))
323 lbs_pr_err("cannot register lbs_flash_fw attribute\n");
324
325 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
326 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
327
263 return 0; 328 return 0;
264 329
265err_start_card: 330err_start_card:
@@ -285,6 +350,9 @@ static void if_usb_disconnect(struct usb_interface *intf)
285 350
286 lbs_deb_enter(LBS_DEB_MAIN); 351 lbs_deb_enter(LBS_DEB_MAIN);
287 352
353 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
354 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
355
288 cardp->surprise_removed = 1; 356 cardp->surprise_removed = 1;
289 357
290 if (priv) { 358 if (priv) {
@@ -371,11 +439,10 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
371 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 439 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
372 440
373 cmd->command = cpu_to_le16(CMD_802_11_RESET); 441 cmd->command = cpu_to_le16(CMD_802_11_RESET);
374 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); 442 cmd->size = cpu_to_le16(sizeof(struct cmd_header));
375 cmd->result = cpu_to_le16(0); 443 cmd->result = cpu_to_le16(0);
376 cmd->seqnum = cpu_to_le16(0x5a5a); 444 cmd->seqnum = cpu_to_le16(0x5a5a);
377 cmd->params.reset.action = cpu_to_le16(CMD_ACT_HALT); 445 usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header));
378 usb_tx_block(cardp, cardp->ep_out_buf, 4 + S_DS_GEN + sizeof(struct cmd_ds_802_11_reset));
379 446
380 msleep(100); 447 msleep(100);
381 ret = usb_reset_device(cardp->udev); 448 ret = usb_reset_device(cardp->udev);
@@ -510,7 +577,7 @@ static void if_usb_receive_fwload(struct urb *urb)
510 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { 577 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
511 kfree_skb(skb); 578 kfree_skb(skb);
512 if_usb_submit_rx_urb_fwload(cardp); 579 if_usb_submit_rx_urb_fwload(cardp);
513 cardp->bootcmdresp = 1; 580 cardp->bootcmdresp = BOOT_CMD_RESP_OK;
514 lbs_deb_usbd(&cardp->udev->dev, 581 lbs_deb_usbd(&cardp->udev->dev,
515 "Received valid boot command response\n"); 582 "Received valid boot command response\n");
516 return; 583 return;
@@ -526,7 +593,9 @@ static void if_usb_receive_fwload(struct urb *urb)
526 lbs_pr_info("boot cmd response wrong magic number (0x%x)\n", 593 lbs_pr_info("boot cmd response wrong magic number (0x%x)\n",
527 le32_to_cpu(bootcmdresp.magic)); 594 le32_to_cpu(bootcmdresp.magic));
528 } 595 }
529 } else if (bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) { 596 } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) &&
597 (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) &&
598 (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) {
530 lbs_pr_info("boot cmd response cmd_tag error (%d)\n", 599 lbs_pr_info("boot cmd response cmd_tag error (%d)\n",
531 bootcmdresp.cmd); 600 bootcmdresp.cmd);
532 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) { 601 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) {
@@ -564,8 +633,8 @@ static void if_usb_receive_fwload(struct urb *urb)
564 633
565 kfree_skb(skb); 634 kfree_skb(skb);
566 635
567 /* reschedule timer for 200ms hence */ 636 /* Give device 5s to either write firmware to its RAM or eeprom */
568 mod_timer(&cardp->fw_timeout, jiffies + (HZ/5)); 637 mod_timer(&cardp->fw_timeout, jiffies + (HZ*5));
569 638
570 if (cardp->fwfinalblk) { 639 if (cardp->fwfinalblk) {
571 cardp->fwdnldover = 1; 640 cardp->fwdnldover = 1;
@@ -809,7 +878,54 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
809} 878}
810 879
811 880
812static int if_usb_prog_firmware(struct if_usb_card *cardp) 881/**
882* @brief This function programs the firmware subject to cmd
883*
884* @param cardp the if_usb_card descriptor
885* fwname firmware or boot2 image file name
886* cmd either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
887* or BOOT_CMD_UPDATE_BOOT2.
888* @return 0 or error code
889*/
890static int if_usb_prog_firmware(struct if_usb_card *cardp,
891 const char *fwname, int cmd)
892{
893 struct lbs_private *priv = cardp->priv;
894 unsigned long flags, caps;
895 int ret;
896
897 caps = priv->fwcapinfo;
898 if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
899 ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
900 return -EOPNOTSUPP;
901
902 /* Ensure main thread is idle. */
903 spin_lock_irqsave(&priv->driver_lock, flags);
904 while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
905 spin_unlock_irqrestore(&priv->driver_lock, flags);
906 if (wait_event_interruptible(priv->waitq,
907 (priv->cur_cmd == NULL &&
908 priv->dnld_sent == DNLD_RES_RECEIVED))) {
909 return -ERESTARTSYS;
910 }
911 spin_lock_irqsave(&priv->driver_lock, flags);
912 }
913 priv->dnld_sent = DNLD_BOOTCMD_SENT;
914 spin_unlock_irqrestore(&priv->driver_lock, flags);
915
916 ret = __if_usb_prog_firmware(cardp, fwname, cmd);
917
918 spin_lock_irqsave(&priv->driver_lock, flags);
919 priv->dnld_sent = DNLD_RES_RECEIVED;
920 spin_unlock_irqrestore(&priv->driver_lock, flags);
921
922 wake_up_interruptible(&priv->waitq);
923
924 return ret;
925}
926
927static int __if_usb_prog_firmware(struct if_usb_card *cardp,
928 const char *fwname, int cmd)
813{ 929{
814 int i = 0; 930 int i = 0;
815 static int reset_count = 10; 931 static int reset_count = 10;
@@ -817,20 +933,32 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
817 933
818 lbs_deb_enter(LBS_DEB_USB); 934 lbs_deb_enter(LBS_DEB_USB);
819 935
820 if ((ret = request_firmware(&cardp->fw, lbs_fw_name, 936 ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
821 &cardp->udev->dev)) < 0) { 937 if (ret < 0) {
822 lbs_pr_err("request_firmware() failed with %#x\n", ret); 938 lbs_pr_err("request_firmware() failed with %#x\n", ret);
823 lbs_pr_err("firmware %s not found\n", lbs_fw_name); 939 lbs_pr_err("firmware %s not found\n", fwname);
824 goto done; 940 goto done;
825 } 941 }
826 942
827 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) 943 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
944 ret = -EINVAL;
828 goto release_fw; 945 goto release_fw;
946 }
947
948 /* Cancel any pending usb business */
949 usb_kill_urb(cardp->rx_urb);
950 usb_kill_urb(cardp->tx_urb);
951
952 cardp->fwlastblksent = 0;
953 cardp->fwdnldover = 0;
954 cardp->totalbytes = 0;
955 cardp->fwfinalblk = 0;
956 cardp->bootcmdresp = 0;
829 957
830restart: 958restart:
831 if (if_usb_submit_rx_urb_fwload(cardp) < 0) { 959 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
832 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); 960 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
833 ret = -1; 961 ret = -EIO;
834 goto release_fw; 962 goto release_fw;
835 } 963 }
836 964
@@ -838,8 +966,7 @@ restart:
838 do { 966 do {
839 int j = 0; 967 int j = 0;
840 i++; 968 i++;
841 /* Issue Boot command = 1, Boot from Download-FW */ 969 if_usb_issue_boot_command(cardp, cmd);
842 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
843 /* wait for command response */ 970 /* wait for command response */
844 do { 971 do {
845 j++; 972 j++;
@@ -847,12 +974,21 @@ restart:
847 } while (cardp->bootcmdresp == 0 && j < 10); 974 } while (cardp->bootcmdresp == 0 && j < 10);
848 } while (cardp->bootcmdresp == 0 && i < 5); 975 } while (cardp->bootcmdresp == 0 && i < 5);
849 976
850 if (cardp->bootcmdresp <= 0) { 977 if (cardp->bootcmdresp == BOOT_CMD_RESP_NOT_SUPPORTED) {
978 /* Return to normal operation */
979 ret = -EOPNOTSUPP;
980 usb_kill_urb(cardp->rx_urb);
981 usb_kill_urb(cardp->tx_urb);
982 if (if_usb_submit_rx_urb(cardp) < 0)
983 ret = -EIO;
984 goto release_fw;
985 } else if (cardp->bootcmdresp <= 0) {
851 if (--reset_count >= 0) { 986 if (--reset_count >= 0) {
852 if_usb_reset_device(cardp); 987 if_usb_reset_device(cardp);
853 goto restart; 988 goto restart;
854 } 989 }
855 return -1; 990 ret = -EIO;
991 goto release_fw;
856 } 992 }
857 993
858 i = 0; 994 i = 0;
@@ -882,7 +1018,7 @@ restart:
882 } 1018 }
883 1019
884 lbs_pr_info("FW download failure, time = %d ms\n", i * 100); 1020 lbs_pr_info("FW download failure, time = %d ms\n", i * 100);
885 ret = -1; 1021 ret = -EIO;
886 goto release_fw; 1022 goto release_fw;
887 } 1023 }
888 1024
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index 5771a83a43f0..5ba0aee0eb2f 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -30,6 +30,7 @@ struct bootcmd
30 30
31#define BOOT_CMD_RESP_OK 0x0001 31#define BOOT_CMD_RESP_OK 0x0001
32#define BOOT_CMD_RESP_FAIL 0x0000 32#define BOOT_CMD_RESP_FAIL 0x0000
33#define BOOT_CMD_RESP_NOT_SUPPORTED 0x0002
33 34
34struct bootcmdresp 35struct bootcmdresp
35{ 36{
@@ -50,6 +51,10 @@ struct if_usb_card {
50 uint8_t ep_in; 51 uint8_t ep_in;
51 uint8_t ep_out; 52 uint8_t ep_out;
52 53
54 /* bootcmdresp == 0 means command is pending
55 * bootcmdresp < 0 means error
56 * bootcmdresp > 0 is a BOOT_CMD_RESP_* from firmware
57 */
53 int8_t bootcmdresp; 58 int8_t bootcmdresp;
54 59
55 int ep_in_size; 60 int ep_in_size;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index bd32ac0b4e07..73dc8c72402a 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -291,9 +291,11 @@ static ssize_t lbs_rtap_set(struct device *dev,
291 if (priv->infra_open || priv->mesh_open) 291 if (priv->infra_open || priv->mesh_open)
292 return -EBUSY; 292 return -EBUSY;
293 if (priv->mode == IW_MODE_INFRA) 293 if (priv->mode == IW_MODE_INFRA)
294 lbs_send_deauthentication(priv); 294 lbs_cmd_80211_deauthenticate(priv,
295 priv->curbssparams.bssid,
296 WLAN_REASON_DEAUTH_LEAVING);
295 else if (priv->mode == IW_MODE_ADHOC) 297 else if (priv->mode == IW_MODE_ADHOC)
296 lbs_stop_adhoc_network(priv); 298 lbs_adhoc_stop(priv);
297 lbs_add_rtap(priv); 299 lbs_add_rtap(priv);
298 } 300 }
299 priv->monitormode = monitor_mode; 301 priv->monitormode = monitor_mode;
@@ -956,17 +958,24 @@ EXPORT_SYMBOL_GPL(lbs_resume);
956static int lbs_setup_firmware(struct lbs_private *priv) 958static int lbs_setup_firmware(struct lbs_private *priv)
957{ 959{
958 int ret = -1; 960 int ret = -1;
961 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
959 962
960 lbs_deb_enter(LBS_DEB_FW); 963 lbs_deb_enter(LBS_DEB_FW);
961 964
962 /* 965 /* Read MAC address from firmware */
963 * Read MAC address from HW
964 */
965 memset(priv->current_addr, 0xff, ETH_ALEN); 966 memset(priv->current_addr, 0xff, ETH_ALEN);
966 ret = lbs_update_hw_spec(priv); 967 ret = lbs_update_hw_spec(priv);
967 if (ret) 968 if (ret)
968 goto done; 969 goto done;
969 970
971 /* Read power levels if available */
972 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
973 if (ret == 0) {
974 priv->txpower_cur = curlevel;
975 priv->txpower_min = minlevel;
976 priv->txpower_max = maxlevel;
977 }
978
970 lbs_set_mac_control(priv); 979 lbs_set_mac_control(priv);
971done: 980done:
972 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 981 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -1042,7 +1051,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1042 priv->mode = IW_MODE_INFRA; 1051 priv->mode = IW_MODE_INFRA;
1043 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1052 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
1044 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1053 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1045 priv->radioon = RADIO_ON; 1054 priv->radio_on = 1;
1046 priv->enablehwauto = 1; 1055 priv->enablehwauto = 1;
1047 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1056 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1048 priv->psmode = LBS802_11POWERMODECAM; 1057 priv->psmode = LBS802_11POWERMODECAM;
@@ -1196,7 +1205,13 @@ void lbs_remove_card(struct lbs_private *priv)
1196 cancel_delayed_work_sync(&priv->scan_work); 1205 cancel_delayed_work_sync(&priv->scan_work);
1197 cancel_delayed_work_sync(&priv->assoc_work); 1206 cancel_delayed_work_sync(&priv->assoc_work);
1198 cancel_work_sync(&priv->mcast_work); 1207 cancel_work_sync(&priv->mcast_work);
1208
1209 /* worker thread destruction blocks on the in-flight command which
1210 * should have been cleared already in lbs_stop_card().
1211 */
1212 lbs_deb_main("destroying worker thread\n");
1199 destroy_workqueue(priv->work_thread); 1213 destroy_workqueue(priv->work_thread);
1214 lbs_deb_main("done destroying worker thread\n");
1200 1215
1201 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 1216 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1202 priv->psmode = LBS802_11POWERMODECAM; 1217 priv->psmode = LBS802_11POWERMODECAM;
@@ -1314,14 +1329,26 @@ void lbs_stop_card(struct lbs_private *priv)
1314 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1329 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1315 } 1330 }
1316 1331
1317 /* Flush pending command nodes */ 1332 /* Delete the timeout of the currently processing command */
1318 del_timer_sync(&priv->command_timer); 1333 del_timer_sync(&priv->command_timer);
1334
1335 /* Flush pending command nodes */
1319 spin_lock_irqsave(&priv->driver_lock, flags); 1336 spin_lock_irqsave(&priv->driver_lock, flags);
1337 lbs_deb_main("clearing pending commands\n");
1320 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 1338 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
1321 cmdnode->result = -ENOENT; 1339 cmdnode->result = -ENOENT;
1322 cmdnode->cmdwaitqwoken = 1; 1340 cmdnode->cmdwaitqwoken = 1;
1323 wake_up_interruptible(&cmdnode->cmdwait_q); 1341 wake_up_interruptible(&cmdnode->cmdwait_q);
1324 } 1342 }
1343
1344 /* Flush the command the card is currently processing */
1345 if (priv->cur_cmd) {
1346 lbs_deb_main("clearing current command\n");
1347 priv->cur_cmd->result = -ENOENT;
1348 priv->cur_cmd->cmdwaitqwoken = 1;
1349 wake_up_interruptible(&priv->cur_cmd->cmdwait_q);
1350 }
1351 lbs_deb_main("done clearing commands\n");
1325 spin_unlock_irqrestore(&priv->driver_lock, flags); 1352 spin_unlock_irqrestore(&priv->driver_lock, flags);
1326 1353
1327 unregister_netdev(dev); 1354 unregister_netdev(dev);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 4b274562f965..8f66903641b9 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -944,6 +944,11 @@ int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
944 944
945 lbs_deb_enter(LBS_DEB_WEXT); 945 lbs_deb_enter(LBS_DEB_WEXT);
946 946
947 if (!priv->radio_on) {
948 ret = -EINVAL;
949 goto out;
950 }
951
947 if (!netif_running(dev)) { 952 if (!netif_running(dev)) {
948 ret = -ENETDOWN; 953 ret = -ENETDOWN;
949 goto out; 954 goto out;
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 8b3ed77860b3..11297dcf9fc3 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -30,6 +30,14 @@ static inline void lbs_postpone_association_work(struct lbs_private *priv)
30 queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2); 30 queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
31} 31}
32 32
33static inline void lbs_do_association_work(struct lbs_private *priv)
34{
35 if (priv->surpriseremoved)
36 return;
37 cancel_delayed_work(&priv->assoc_work);
38 queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
39}
40
33static inline void lbs_cancel_association_work(struct lbs_private *priv) 41static inline void lbs_cancel_association_work(struct lbs_private *priv)
34{ 42{
35 cancel_delayed_work(&priv->assoc_work); 43 cancel_delayed_work(&priv->assoc_work);
@@ -120,34 +128,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
120 return cfp; 128 return cfp;
121} 129}
122 130
123
124/**
125 * @brief Set Radio On/OFF
126 *
127 * @param priv A pointer to struct lbs_private structure
128 * @option Radio Option
129 * @return 0 --success, otherwise fail
130 */
131static int lbs_radio_ioctl(struct lbs_private *priv, u8 option)
132{
133 int ret = 0;
134
135 lbs_deb_enter(LBS_DEB_WEXT);
136
137 if (priv->radioon != option) {
138 lbs_deb_wext("switching radio %s\n", option ? "on" : "off");
139 priv->radioon = option;
140
141 ret = lbs_prepare_and_send_command(priv,
142 CMD_802_11_RADIO_CONTROL,
143 CMD_ACT_SET,
144 CMD_OPTION_WAITFORRSP, 0, NULL);
145 }
146
147 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
148 return ret;
149}
150
151/** 131/**
152 * @brief Copy active data rates based on adapter mode and status 132 * @brief Copy active data rates based on adapter mode and status
153 * 133 *
@@ -294,21 +274,17 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
294{ 274{
295 int ret = 0; 275 int ret = 0;
296 struct lbs_private *priv = dev->priv; 276 struct lbs_private *priv = dev->priv;
297 u32 rthr = vwrq->value; 277 u32 val = vwrq->value;
298 278
299 lbs_deb_enter(LBS_DEB_WEXT); 279 lbs_deb_enter(LBS_DEB_WEXT);
300 280
301 if (vwrq->disabled) { 281 if (vwrq->disabled)
302 priv->rtsthsd = rthr = MRVDRV_RTS_MAX_VALUE; 282 val = MRVDRV_RTS_MAX_VALUE;
303 } else { 283
304 if (rthr < MRVDRV_RTS_MIN_VALUE || rthr > MRVDRV_RTS_MAX_VALUE) 284 if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
305 return -EINVAL; 285 return -EINVAL;
306 priv->rtsthsd = rthr;
307 }
308 286
309 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 287 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
310 CMD_ACT_SET, CMD_OPTION_WAITFORRSP,
311 OID_802_11_RTS_THRESHOLD, &rthr);
312 288
313 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 289 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
314 return ret; 290 return ret;
@@ -317,21 +293,18 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
317static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, 293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
318 struct iw_param *vwrq, char *extra) 294 struct iw_param *vwrq, char *extra)
319{ 295{
320 int ret = 0;
321 struct lbs_private *priv = dev->priv; 296 struct lbs_private *priv = dev->priv;
297 int ret = 0;
298 u16 val = 0;
322 299
323 lbs_deb_enter(LBS_DEB_WEXT); 300 lbs_deb_enter(LBS_DEB_WEXT);
324 301
325 priv->rtsthsd = 0; 302 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
326 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
327 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
328 OID_802_11_RTS_THRESHOLD, NULL);
329 if (ret) 303 if (ret)
330 goto out; 304 goto out;
331 305
332 vwrq->value = priv->rtsthsd; 306 vwrq->value = val;
333 vwrq->disabled = ((vwrq->value < MRVDRV_RTS_MIN_VALUE) 307 vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
334 || (vwrq->value > MRVDRV_RTS_MAX_VALUE));
335 vwrq->fixed = 1; 308 vwrq->fixed = 1;
336 309
337out: 310out:
@@ -342,24 +315,19 @@ out:
342static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, 315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
343 struct iw_param *vwrq, char *extra) 316 struct iw_param *vwrq, char *extra)
344{ 317{
345 int ret = 0;
346 u32 fthr = vwrq->value;
347 struct lbs_private *priv = dev->priv; 318 struct lbs_private *priv = dev->priv;
319 int ret = 0;
320 u32 val = vwrq->value;
348 321
349 lbs_deb_enter(LBS_DEB_WEXT); 322 lbs_deb_enter(LBS_DEB_WEXT);
350 323
351 if (vwrq->disabled) { 324 if (vwrq->disabled)
352 priv->fragthsd = fthr = MRVDRV_FRAG_MAX_VALUE; 325 val = MRVDRV_FRAG_MAX_VALUE;
353 } else {
354 if (fthr < MRVDRV_FRAG_MIN_VALUE
355 || fthr > MRVDRV_FRAG_MAX_VALUE)
356 return -EINVAL;
357 priv->fragthsd = fthr;
358 }
359 326
360 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 327 if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
361 CMD_ACT_SET, CMD_OPTION_WAITFORRSP, 328 return -EINVAL;
362 OID_802_11_FRAGMENTATION_THRESHOLD, &fthr); 329
330 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
363 331
364 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 332 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
365 return ret; 333 return ret;
@@ -368,22 +336,19 @@ static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
368static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, 336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
369 struct iw_param *vwrq, char *extra) 337 struct iw_param *vwrq, char *extra)
370{ 338{
371 int ret = 0;
372 struct lbs_private *priv = dev->priv; 339 struct lbs_private *priv = dev->priv;
340 int ret = 0;
341 u16 val = 0;
373 342
374 lbs_deb_enter(LBS_DEB_WEXT); 343 lbs_deb_enter(LBS_DEB_WEXT);
375 344
376 priv->fragthsd = 0; 345 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
377 ret = lbs_prepare_and_send_command(priv,
378 CMD_802_11_SNMP_MIB,
379 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
380 OID_802_11_FRAGMENTATION_THRESHOLD, NULL);
381 if (ret) 346 if (ret)
382 goto out; 347 goto out;
383 348
384 vwrq->value = priv->fragthsd; 349 vwrq->value = val;
385 vwrq->disabled = ((vwrq->value < MRVDRV_FRAG_MIN_VALUE) 350 vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
386 || (vwrq->value > MRVDRV_FRAG_MAX_VALUE)); 351 || (val > MRVDRV_FRAG_MAX_VALUE));
387 vwrq->fixed = 1; 352 vwrq->fixed = 1;
388 353
389out: 354out:
@@ -410,7 +375,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
410{ 375{
411 lbs_deb_enter(LBS_DEB_WEXT); 376 lbs_deb_enter(LBS_DEB_WEXT);
412 377
413 *uwrq = IW_MODE_REPEAT ; 378 *uwrq = IW_MODE_REPEAT;
414 379
415 lbs_deb_leave(LBS_DEB_WEXT); 380 lbs_deb_leave(LBS_DEB_WEXT);
416 return 0; 381 return 0;
@@ -420,28 +385,30 @@ static int lbs_get_txpow(struct net_device *dev,
420 struct iw_request_info *info, 385 struct iw_request_info *info,
421 struct iw_param *vwrq, char *extra) 386 struct iw_param *vwrq, char *extra)
422{ 387{
423 int ret = 0;
424 struct lbs_private *priv = dev->priv; 388 struct lbs_private *priv = dev->priv;
389 s16 curlevel = 0;
390 int ret = 0;
425 391
426 lbs_deb_enter(LBS_DEB_WEXT); 392 lbs_deb_enter(LBS_DEB_WEXT);
427 393
428 ret = lbs_prepare_and_send_command(priv, 394 if (!priv->radio_on) {
429 CMD_802_11_RF_TX_POWER, 395 lbs_deb_wext("tx power off\n");
430 CMD_ACT_TX_POWER_OPT_GET, 396 vwrq->value = 0;
431 CMD_OPTION_WAITFORRSP, 0, NULL); 397 vwrq->disabled = 1;
398 goto out;
399 }
432 400
401 ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
433 if (ret) 402 if (ret)
434 goto out; 403 goto out;
435 404
436 lbs_deb_wext("tx power level %d dbm\n", priv->txpowerlevel); 405 lbs_deb_wext("tx power level %d dbm\n", curlevel);
437 vwrq->value = priv->txpowerlevel; 406 priv->txpower_cur = curlevel;
407
408 vwrq->value = curlevel;
438 vwrq->fixed = 1; 409 vwrq->fixed = 1;
439 if (priv->radioon) { 410 vwrq->disabled = 0;
440 vwrq->disabled = 0; 411 vwrq->flags = IW_TXPOW_DBM;
441 vwrq->flags = IW_TXPOW_DBM;
442 } else {
443 vwrq->disabled = 1;
444 }
445 412
446out: 413out:
447 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 414 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
@@ -451,31 +418,44 @@ out:
451static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, 418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
452 struct iw_param *vwrq, char *extra) 419 struct iw_param *vwrq, char *extra)
453{ 420{
454 int ret = 0;
455 struct lbs_private *priv = dev->priv; 421 struct lbs_private *priv = dev->priv;
422 int ret = 0;
423 u16 slimit = 0, llimit = 0;
456 424
457 lbs_deb_enter(LBS_DEB_WEXT); 425 lbs_deb_enter(LBS_DEB_WEXT);
458 426
459 if (vwrq->flags == IW_RETRY_LIMIT) { 427 if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
460 /* The MAC has a 4-bit Total_Tx_Count register 428 return -EOPNOTSUPP;
461 Total_Tx_Count = 1 + Tx_Retry_Count */ 429
430 /* The MAC has a 4-bit Total_Tx_Count register
431 Total_Tx_Count = 1 + Tx_Retry_Count */
462#define TX_RETRY_MIN 0 432#define TX_RETRY_MIN 0
463#define TX_RETRY_MAX 14 433#define TX_RETRY_MAX 14
464 if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX) 434 if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
465 return -EINVAL; 435 return -EINVAL;
466 436
467 /* Adding 1 to convert retry count to try count */ 437 /* Add 1 to convert retry count to try count */
468 priv->txretrycount = vwrq->value + 1; 438 if (vwrq->flags & IW_RETRY_SHORT)
439 slimit = (u16) (vwrq->value + 1);
440 else if (vwrq->flags & IW_RETRY_LONG)
441 llimit = (u16) (vwrq->value + 1);
442 else
443 slimit = llimit = (u16) (vwrq->value + 1); /* set both */
469 444
470 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 445 if (llimit) {
471 CMD_ACT_SET, 446 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
472 CMD_OPTION_WAITFORRSP, 447 llimit);
473 OID_802_11_TX_RETRYCOUNT, NULL); 448 if (ret)
449 goto out;
450 }
474 451
452 if (slimit) {
453 /* txretrycount follows the short retry limit */
454 priv->txretrycount = slimit;
455 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
456 slimit);
475 if (ret) 457 if (ret)
476 goto out; 458 goto out;
477 } else {
478 return -EOPNOTSUPP;
479 } 459 }
480 460
481out: 461out:
@@ -488,22 +468,30 @@ static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
488{ 468{
489 struct lbs_private *priv = dev->priv; 469 struct lbs_private *priv = dev->priv;
490 int ret = 0; 470 int ret = 0;
471 u16 val = 0;
491 472
492 lbs_deb_enter(LBS_DEB_WEXT); 473 lbs_deb_enter(LBS_DEB_WEXT);
493 474
494 priv->txretrycount = 0;
495 ret = lbs_prepare_and_send_command(priv,
496 CMD_802_11_SNMP_MIB,
497 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
498 OID_802_11_TX_RETRYCOUNT, NULL);
499 if (ret)
500 goto out;
501
502 vwrq->disabled = 0; 475 vwrq->disabled = 0;
503 if (!vwrq->flags) { 476
504 vwrq->flags = IW_RETRY_LIMIT; 477 if (vwrq->flags & IW_RETRY_LONG) {
478 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
479 if (ret)
480 goto out;
481
505 /* Subtract 1 to convert try count to retry count */ 482 /* Subtract 1 to convert try count to retry count */
506 vwrq->value = priv->txretrycount - 1; 483 vwrq->value = val - 1;
484 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
485 } else {
486 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
487 if (ret)
488 goto out;
489
490 /* txretry count follows the short retry limit */
491 priv->txretrycount = val;
492 /* Subtract 1 to convert try count to retry count */
493 vwrq->value = val - 1;
494 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
507 } 495 }
508 496
509out: 497out:
@@ -693,22 +681,12 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
693 681
694 range->sensitivity = 0; 682 range->sensitivity = 0;
695 683
696 /* 684 /* Setup the supported power level ranges */
697 * Setup the supported power level ranges
698 */
699 memset(range->txpower, 0, sizeof(range->txpower)); 685 memset(range->txpower, 0, sizeof(range->txpower));
700 range->txpower[0] = 5; 686 range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
701 range->txpower[1] = 7; 687 range->txpower[0] = priv->txpower_min;
702 range->txpower[2] = 9; 688 range->txpower[1] = priv->txpower_max;
703 range->txpower[3] = 11; 689 range->num_txpower = 2;
704 range->txpower[4] = 13;
705 range->txpower[5] = 15;
706 range->txpower[6] = 17;
707 range->txpower[7] = 19;
708
709 range->num_txpower = 8;
710 range->txpower_capa = IW_TXPOW_DBM;
711 range->txpower_capa |= IW_TXPOW_RANGE;
712 690
713 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 691 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
714 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 692 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
@@ -998,9 +976,11 @@ static int lbs_mesh_set_freq(struct net_device *dev,
998 if (fwrq->m != priv->curbssparams.channel) { 976 if (fwrq->m != priv->curbssparams.channel) {
999 lbs_deb_wext("mesh channel change forces eth disconnect\n"); 977 lbs_deb_wext("mesh channel change forces eth disconnect\n");
1000 if (priv->mode == IW_MODE_INFRA) 978 if (priv->mode == IW_MODE_INFRA)
1001 lbs_send_deauthentication(priv); 979 lbs_cmd_80211_deauthenticate(priv,
980 priv->curbssparams.bssid,
981 WLAN_REASON_DEAUTH_LEAVING);
1002 else if (priv->mode == IW_MODE_ADHOC) 982 else if (priv->mode == IW_MODE_ADHOC)
1003 lbs_stop_adhoc_network(priv); 983 lbs_adhoc_stop(priv);
1004 } 984 }
1005 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m); 985 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
1006 lbs_update_channel(priv); 986 lbs_update_channel(priv);
@@ -1612,12 +1592,14 @@ static int lbs_set_encodeext(struct net_device *dev,
1612 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); 1592 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1613 } 1593 }
1614 1594
1615 disable_wep (assoc_req); 1595 /* Only disable wep if necessary: can't waste time here. */
1596 if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
1597 disable_wep(assoc_req);
1616 } 1598 }
1617 1599
1618out: 1600out:
1619 if (ret == 0) { 1601 if (ret == 0) { /* key installation is time critical: postpone not! */
1620 lbs_postpone_association_work(priv); 1602 lbs_do_association_work(priv);
1621 } else { 1603 } else {
1622 lbs_cancel_association_work(priv); 1604 lbs_cancel_association_work(priv);
1623 } 1605 }
@@ -1844,39 +1826,77 @@ static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
1844{ 1826{
1845 int ret = 0; 1827 int ret = 0;
1846 struct lbs_private *priv = dev->priv; 1828 struct lbs_private *priv = dev->priv;
1847 1829 s16 dbm = (s16) vwrq->value;
1848 u16 dbm;
1849 1830
1850 lbs_deb_enter(LBS_DEB_WEXT); 1831 lbs_deb_enter(LBS_DEB_WEXT);
1851 1832
1852 if (vwrq->disabled) { 1833 if (vwrq->disabled) {
1853 lbs_radio_ioctl(priv, RADIO_OFF); 1834 lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
1854 return 0; 1835 goto out;
1855 } 1836 }
1856 1837
1857 priv->preamble = CMD_TYPE_AUTO_PREAMBLE; 1838 if (vwrq->fixed == 0) {
1858 1839 /* User requests automatic tx power control, however there are
1859 lbs_radio_ioctl(priv, RADIO_ON); 1840 * many auto tx settings. For now use firmware defaults until
1841 * we come up with a good way to expose these to the user. */
1842 if (priv->fwrelease < 0x09000000) {
1843 ret = lbs_set_power_adapt_cfg(priv, 1,
1844 POW_ADAPT_DEFAULT_P0,
1845 POW_ADAPT_DEFAULT_P1,
1846 POW_ADAPT_DEFAULT_P2);
1847 if (ret)
1848 goto out;
1849 }
1850 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1851 TPC_DEFAULT_P2, 1);
1852 if (ret)
1853 goto out;
1854 dbm = priv->txpower_max;
1855 } else {
1856 /* Userspace check in iwrange if it should use dBm or mW,
1857 * therefore this should never happen... Jean II */
1858 if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
1859 ret = -EOPNOTSUPP;
1860 goto out;
1861 }
1860 1862
1861 /* Userspace check in iwrange if it should use dBm or mW, 1863 /* Validate requested power level against firmware allowed
1862 * therefore this should never happen... Jean II */ 1864 * levels */
1863 if ((vwrq->flags & IW_TXPOW_TYPE) == IW_TXPOW_MWATT) { 1865 if (priv->txpower_min && (dbm < priv->txpower_min)) {
1864 return -EOPNOTSUPP; 1866 ret = -EINVAL;
1865 } else 1867 goto out;
1866 dbm = (u16) vwrq->value; 1868 }
1867 1869
1868 /* auto tx power control */ 1870 if (priv->txpower_max && (dbm > priv->txpower_max)) {
1871 ret = -EINVAL;
1872 goto out;
1873 }
1874 if (priv->fwrelease < 0x09000000) {
1875 ret = lbs_set_power_adapt_cfg(priv, 0,
1876 POW_ADAPT_DEFAULT_P0,
1877 POW_ADAPT_DEFAULT_P1,
1878 POW_ADAPT_DEFAULT_P2);
1879 if (ret)
1880 goto out;
1881 }
1882 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1883 TPC_DEFAULT_P2, 1);
1884 if (ret)
1885 goto out;
1886 }
1869 1887
1870 if (vwrq->fixed == 0) 1888 /* If the radio was off, turn it on */
1871 dbm = 0xffff; 1889 if (!priv->radio_on) {
1890 ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
1891 if (ret)
1892 goto out;
1893 }
1872 1894
1873 lbs_deb_wext("txpower set %d dbm\n", dbm); 1895 lbs_deb_wext("txpower set %d dBm\n", dbm);
1874 1896
1875 ret = lbs_prepare_and_send_command(priv, 1897 ret = lbs_set_tx_power(priv, dbm);
1876 CMD_802_11_RF_TX_POWER,
1877 CMD_ACT_TX_POWER_OPT_SET_LOW,
1878 CMD_OPTION_WAITFORRSP, 0, (void *)&dbm);
1879 1898
1899out:
1880 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1900 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1881 return ret; 1901 return ret;
1882} 1902}
@@ -1928,6 +1948,11 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1928 1948
1929 lbs_deb_enter(LBS_DEB_WEXT); 1949 lbs_deb_enter(LBS_DEB_WEXT);
1930 1950
1951 if (!priv->radio_on) {
1952 ret = -EINVAL;
1953 goto out;
1954 }
1955
1931 /* Check the size of the string */ 1956 /* Check the size of the string */
1932 if (in_ssid_len > IW_ESSID_MAX_SIZE) { 1957 if (in_ssid_len > IW_ESSID_MAX_SIZE) {
1933 ret = -E2BIG; 1958 ret = -E2BIG;
@@ -2005,6 +2030,11 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2005 2030
2006 lbs_deb_enter(LBS_DEB_WEXT); 2031 lbs_deb_enter(LBS_DEB_WEXT);
2007 2032
2033 if (!priv->radio_on) {
2034 ret = -EINVAL;
2035 goto out;
2036 }
2037
2008 /* Check the size of the string */ 2038 /* Check the size of the string */
2009 if (dwrq->length > IW_ESSID_MAX_SIZE) { 2039 if (dwrq->length > IW_ESSID_MAX_SIZE) {
2010 ret = -E2BIG; 2040 ret = -E2BIG;
@@ -2046,6 +2076,9 @@ static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2046 2076
2047 lbs_deb_enter(LBS_DEB_WEXT); 2077 lbs_deb_enter(LBS_DEB_WEXT);
2048 2078
2079 if (!priv->radio_on)
2080 return -EINVAL;
2081
2049 if (awrq->sa_family != ARPHRD_ETHER) 2082 if (awrq->sa_family != ARPHRD_ETHER)
2050 return -EINVAL; 2083 return -EINVAL;
2051 2084
diff --git a/drivers/net/wireless/libertas_tf/Makefile b/drivers/net/wireless/libertas_tf/Makefile
new file mode 100644
index 000000000000..ff5544d6ac9d
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/Makefile
@@ -0,0 +1,6 @@
1libertas_tf-objs := main.o cmd.o
2
3libertas_tf_usb-objs += if_usb.o
4
5obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf.o
6obj-$(CONFIG_LIBERTAS_THINFIRM_USB) += libertas_tf_usb.o
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
new file mode 100644
index 000000000000..fdbcf8ba3e8a
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -0,0 +1,669 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include "libertas_tf.h"
11
12static const struct channel_range channel_ranges[] = {
13 { LBTF_REGDOMAIN_US, 1, 12 },
14 { LBTF_REGDOMAIN_CA, 1, 12 },
15 { LBTF_REGDOMAIN_EU, 1, 14 },
16 { LBTF_REGDOMAIN_JP, 1, 14 },
17 { LBTF_REGDOMAIN_SP, 1, 14 },
18 { LBTF_REGDOMAIN_FR, 1, 14 },
19};
20
21static u16 lbtf_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
22{
23 LBTF_REGDOMAIN_US, LBTF_REGDOMAIN_CA, LBTF_REGDOMAIN_EU,
24 LBTF_REGDOMAIN_SP, LBTF_REGDOMAIN_FR, LBTF_REGDOMAIN_JP,
25};
26
27static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv);
28
29
30/**
31 * lbtf_cmd_copyback - Simple callback that copies response back into command
32 *
33 * @priv A pointer to struct lbtf_private structure
34 * @extra A pointer to the original command structure for which
35 * 'resp' is a response
36 * @resp A pointer to the command response
37 *
38 * Returns: 0 on success, error on failure
39 */
40int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
41 struct cmd_header *resp)
42{
43 struct cmd_header *buf = (void *)extra;
44 uint16_t copy_len;
45
46 copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size));
47 memcpy(buf, resp, copy_len);
48 return 0;
49}
50EXPORT_SYMBOL_GPL(lbtf_cmd_copyback);
51
52#define CHAN_TO_IDX(chan) ((chan) - 1)
53
54static void lbtf_geo_init(struct lbtf_private *priv)
55{
56 const struct channel_range *range = channel_ranges;
57 u8 ch;
58 int i;
59
60 for (i = 0; i < ARRAY_SIZE(channel_ranges); i++)
61 if (channel_ranges[i].regdomain == priv->regioncode) {
62 range = &channel_ranges[i];
63 break;
64 }
65
66 for (ch = priv->range.start; ch < priv->range.end; ch++)
67 priv->channels[CHAN_TO_IDX(ch)].flags = 0;
68}
69
70/**
71 * lbtf_update_hw_spec: Updates the hardware details.
72 *
73 * @priv A pointer to struct lbtf_private structure
74 *
75 * Returns: 0 on success, error on failure
76 */
77int lbtf_update_hw_spec(struct lbtf_private *priv)
78{
79 struct cmd_ds_get_hw_spec cmd;
80 int ret = -1;
81 u32 i;
82 DECLARE_MAC_BUF(mac);
83
84 memset(&cmd, 0, sizeof(cmd));
85 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
86 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
87 ret = lbtf_cmd_with_response(priv, CMD_GET_HW_SPEC, &cmd);
88 if (ret)
89 goto out;
90
91 priv->fwcapinfo = le32_to_cpu(cmd.fwcapinfo);
92
93 /* The firmware release is in an interesting format: the patch
94 * level is in the most significant nibble ... so fix that: */
95 priv->fwrelease = le32_to_cpu(cmd.fwrelease);
96 priv->fwrelease = (priv->fwrelease << 8) |
97 (priv->fwrelease >> 24 & 0xff);
98
99 printk(KERN_INFO "libertastf: %s, fw %u.%u.%up%u, cap 0x%08x\n",
100 print_mac(mac, cmd.permanentaddr),
101 priv->fwrelease >> 24 & 0xff,
102 priv->fwrelease >> 16 & 0xff,
103 priv->fwrelease >> 8 & 0xff,
104 priv->fwrelease & 0xff,
105 priv->fwcapinfo);
106
107 /* Clamp region code to 8-bit since FW spec indicates that it should
108 * only ever be 8-bit, even though the field size is 16-bit. Some
109 * firmware returns non-zero high 8 bits here.
110 */
111 priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF;
112
113 for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) {
114 /* use the region code to search for the index */
115 if (priv->regioncode == lbtf_region_code_to_index[i])
116 break;
117 }
118
119 /* if it's unidentified region code, use the default (USA) */
120 if (i >= MRVDRV_MAX_REGION_CODE)
121 priv->regioncode = 0x10;
122
123 if (priv->current_addr[0] == 0xff)
124 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
125
126 SET_IEEE80211_PERM_ADDR(priv->hw, priv->current_addr);
127
128 lbtf_geo_init(priv);
129out:
130 return ret;
131}
132
133/**
134 * lbtf_set_channel: Set the radio channel
135 *
136 * @priv A pointer to struct lbtf_private structure
137 * @channel The desired channel, or 0 to clear a locked channel
138 *
139 * Returns: 0 on success, error on failure
140 */
141int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
142{
143 struct cmd_ds_802_11_rf_channel cmd;
144
145 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
146 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
147 cmd.channel = cpu_to_le16(channel);
148
149 return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
150}
151
152int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
153{
154 struct cmd_ds_802_11_beacon_set cmd;
155 int size;
156
157 if (beacon->len > MRVL_MAX_BCN_SIZE)
158 return -1;
159 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
160 cmd.hdr.size = cpu_to_le16(size);
161 cmd.len = cpu_to_le16(beacon->len);
162 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
163
164 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
165 return 0;
166}
167
168int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
169 int beacon_int) {
170 struct cmd_ds_802_11_beacon_control cmd;
171
172 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
173 cmd.action = cpu_to_le16(CMD_ACT_SET);
174 cmd.beacon_enable = cpu_to_le16(beacon_enable);
175 cmd.beacon_period = cpu_to_le16(beacon_int);
176
177 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
178 return 0;
179}
180
181static void lbtf_queue_cmd(struct lbtf_private *priv,
182 struct cmd_ctrl_node *cmdnode)
183{
184 unsigned long flags;
185
186 if (!cmdnode)
187 return;
188
189 if (!cmdnode->cmdbuf->size)
190 return;
191
192 cmdnode->result = 0;
193 spin_lock_irqsave(&priv->driver_lock, flags);
194 list_add_tail(&cmdnode->list, &priv->cmdpendingq);
195 spin_unlock_irqrestore(&priv->driver_lock, flags);
196}
197
198static void lbtf_submit_command(struct lbtf_private *priv,
199 struct cmd_ctrl_node *cmdnode)
200{
201 unsigned long flags;
202 struct cmd_header *cmd;
203 uint16_t cmdsize;
204 uint16_t command;
205 int timeo = 5 * HZ;
206 int ret;
207
208 cmd = cmdnode->cmdbuf;
209
210 spin_lock_irqsave(&priv->driver_lock, flags);
211 priv->cur_cmd = cmdnode;
212 cmdsize = le16_to_cpu(cmd->size);
213 command = le16_to_cpu(cmd->command);
214 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
215 spin_unlock_irqrestore(&priv->driver_lock, flags);
216
217 if (ret)
218 /* Let the timer kick in and retry, and potentially reset
219 the whole thing if the condition persists */
220 timeo = HZ;
221
222 /* Setup the timer after transmit command */
223 mod_timer(&priv->command_timer, jiffies + timeo);
224}
225
226/**
227 * This function inserts command node to cmdfreeq
228 * after cleans it. Requires priv->driver_lock held.
229 */
230static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
231 struct cmd_ctrl_node *cmdnode)
232{
233 if (!cmdnode)
234 return;
235
236 cmdnode->callback = NULL;
237 cmdnode->callback_arg = 0;
238
239 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
240
241 list_add_tail(&cmdnode->list, &priv->cmdfreeq);
242}
243
244static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
245 struct cmd_ctrl_node *ptempcmd)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&priv->driver_lock, flags);
250 __lbtf_cleanup_and_insert_cmd(priv, ptempcmd);
251 spin_unlock_irqrestore(&priv->driver_lock, flags);
252}
253
254void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
255 int result)
256{
257 cmd->result = result;
258 cmd->cmdwaitqwoken = 1;
259 wake_up_interruptible(&cmd->cmdwait_q);
260
261 if (!cmd->callback)
262 __lbtf_cleanup_and_insert_cmd(priv, cmd);
263 priv->cur_cmd = NULL;
264}
265
266int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
267{
268 struct cmd_ds_mac_multicast_addr cmd;
269
270 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
271 cmd.action = cpu_to_le16(CMD_ACT_SET);
272
273 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
274 memcpy(cmd.maclist, priv->multicastlist,
275 priv->nr_of_multicastmacaddr * ETH_ALEN);
276
277 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
278 return 0;
279}
280
281void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
282{
283 struct cmd_ds_set_mode cmd;
284
285 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
286 cmd.mode = cpu_to_le16(mode);
287 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
288}
289
290void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid)
291{
292 struct cmd_ds_set_bssid cmd;
293
294 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
295 cmd.activate = activate ? 1 : 0;
296 if (activate)
297 memcpy(cmd.bssid, bssid, ETH_ALEN);
298
299 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
300}
301
302int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
303{
304 struct cmd_ds_802_11_mac_address cmd;
305
306 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
307 cmd.action = cpu_to_le16(CMD_ACT_SET);
308
309 memcpy(cmd.macadd, mac_addr, ETH_ALEN);
310
311 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
312 return 0;
313}
314
315int lbtf_set_radio_control(struct lbtf_private *priv)
316{
317 int ret = 0;
318 struct cmd_ds_802_11_radio_control cmd;
319
320 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
321 cmd.action = cpu_to_le16(CMD_ACT_SET);
322
323 switch (priv->preamble) {
324 case CMD_TYPE_SHORT_PREAMBLE:
325 cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE);
326 break;
327
328 case CMD_TYPE_LONG_PREAMBLE:
329 cmd.control = cpu_to_le16(SET_LONG_PREAMBLE);
330 break;
331
332 case CMD_TYPE_AUTO_PREAMBLE:
333 default:
334 cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE);
335 break;
336 }
337
338 if (priv->radioon)
339 cmd.control |= cpu_to_le16(TURN_ON_RF);
340 else
341 cmd.control &= cpu_to_le16(~TURN_ON_RF);
342
343 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
344 return ret;
345}
346
347void lbtf_set_mac_control(struct lbtf_private *priv)
348{
349 struct cmd_ds_mac_control cmd;
350 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
351 cmd.action = cpu_to_le16(priv->mac_control);
352 cmd.reserved = 0;
353
354 lbtf_cmd_async(priv, CMD_MAC_CONTROL,
355 &cmd.hdr, sizeof(cmd));
356}
357
358/**
359 * lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue
360 *
361 * @priv A pointer to struct lbtf_private structure
362 *
363 * Returns: 0 on success.
364 */
365int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
366{
367 u32 bufsize;
368 u32 i;
369 struct cmd_ctrl_node *cmdarray;
370
371 /* Allocate and initialize the command array */
372 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
373 cmdarray = kzalloc(bufsize, GFP_KERNEL);
374 if (!cmdarray)
375 return -1;
376 priv->cmd_array = cmdarray;
377
378 /* Allocate and initialize each command buffer in the command array */
379 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
380 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
381 if (!cmdarray[i].cmdbuf)
382 return -1;
383 }
384
385 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
386 init_waitqueue_head(&cmdarray[i].cmdwait_q);
387 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
388 }
389 return 0;
390}
391
392/**
393 * lbtf_free_cmd_buffer - Frees the cmd buffer.
394 *
395 * @priv A pointer to struct lbtf_private structure
396 *
397 * Returns: 0
398 */
399int lbtf_free_cmd_buffer(struct lbtf_private *priv)
400{
401 struct cmd_ctrl_node *cmdarray;
402 unsigned int i;
403
404 /* need to check if cmd array is allocated or not */
405 if (priv->cmd_array == NULL)
406 return 0;
407
408 cmdarray = priv->cmd_array;
409
410 /* Release shared memory buffers */
411 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
412 kfree(cmdarray[i].cmdbuf);
413 cmdarray[i].cmdbuf = NULL;
414 }
415
416 /* Release cmd_ctrl_node */
417 kfree(priv->cmd_array);
418 priv->cmd_array = NULL;
419
420 return 0;
421}
422
423/**
424 * lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue.
425 *
426 * @priv A pointer to struct lbtf_private structure
427 *
428 * Returns: pointer to a struct cmd_ctrl_node or NULL if none available.
429 */
430static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
431{
432 struct cmd_ctrl_node *tempnode;
433 unsigned long flags;
434
435 if (!priv)
436 return NULL;
437
438 spin_lock_irqsave(&priv->driver_lock, flags);
439
440 if (!list_empty(&priv->cmdfreeq)) {
441 tempnode = list_first_entry(&priv->cmdfreeq,
442 struct cmd_ctrl_node, list);
443 list_del(&tempnode->list);
444 } else
445 tempnode = NULL;
446
447 spin_unlock_irqrestore(&priv->driver_lock, flags);
448
449 return tempnode;
450}
451
452/**
453 * lbtf_execute_next_command: execute next command in cmd pending queue.
454 *
455 * @priv A pointer to struct lbtf_private structure
456 *
457 * Returns: 0 on success.
458 */
459int lbtf_execute_next_command(struct lbtf_private *priv)
460{
461 struct cmd_ctrl_node *cmdnode = NULL;
462 struct cmd_header *cmd;
463 unsigned long flags;
464
465 /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
466 * only caller to us is lbtf_thread() and we get even when a
467 * data packet is received */
468
469 spin_lock_irqsave(&priv->driver_lock, flags);
470
471 if (priv->cur_cmd) {
472 spin_unlock_irqrestore(&priv->driver_lock, flags);
473 return -1;
474 }
475
476 if (!list_empty(&priv->cmdpendingq)) {
477 cmdnode = list_first_entry(&priv->cmdpendingq,
478 struct cmd_ctrl_node, list);
479 }
480
481 if (cmdnode) {
482 cmd = cmdnode->cmdbuf;
483
484 list_del(&cmdnode->list);
485 spin_unlock_irqrestore(&priv->driver_lock, flags);
486 lbtf_submit_command(priv, cmdnode);
487 } else
488 spin_unlock_irqrestore(&priv->driver_lock, flags);
489 return 0;
490}
491
492static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
493 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
494 int (*callback)(struct lbtf_private *, unsigned long,
495 struct cmd_header *),
496 unsigned long callback_arg)
497{
498 struct cmd_ctrl_node *cmdnode;
499
500 if (priv->surpriseremoved)
501 return ERR_PTR(-ENOENT);
502
503 cmdnode = lbtf_get_cmd_ctrl_node(priv);
504 if (cmdnode == NULL) {
505 /* Wake up main thread to execute next command */
506 queue_work(lbtf_wq, &priv->cmd_work);
507 return ERR_PTR(-ENOBUFS);
508 }
509
510 cmdnode->callback = callback;
511 cmdnode->callback_arg = callback_arg;
512
513 /* Copy the incoming command to the buffer */
514 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
515
516 /* Set sequence number, clean result, move to buffer */
517 priv->seqnum++;
518 cmdnode->cmdbuf->command = cpu_to_le16(command);
519 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
520 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
521 cmdnode->cmdbuf->result = 0;
522 cmdnode->cmdwaitqwoken = 0;
523 lbtf_queue_cmd(priv, cmdnode);
524 queue_work(lbtf_wq, &priv->cmd_work);
525
526 return cmdnode;
527}
528
529void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
530 struct cmd_header *in_cmd, int in_cmd_size)
531{
532 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
533}
534
535int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
536 struct cmd_header *in_cmd, int in_cmd_size,
537 int (*callback)(struct lbtf_private *,
538 unsigned long, struct cmd_header *),
539 unsigned long callback_arg)
540{
541 struct cmd_ctrl_node *cmdnode;
542 unsigned long flags;
543 int ret = 0;
544
545 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
546 callback, callback_arg);
547 if (IS_ERR(cmdnode))
548 return PTR_ERR(cmdnode);
549
550 might_sleep();
551 ret = wait_event_interruptible(cmdnode->cmdwait_q,
552 cmdnode->cmdwaitqwoken);
553 if (ret) {
554 printk(KERN_DEBUG
555 "libertastf: command 0x%04x interrupted by signal",
556 command);
557 return ret;
558 }
559
560 spin_lock_irqsave(&priv->driver_lock, flags);
561 ret = cmdnode->result;
562 if (ret)
563 printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
564 command, ret);
565
566 __lbtf_cleanup_and_insert_cmd(priv, cmdnode);
567 spin_unlock_irqrestore(&priv->driver_lock, flags);
568
569 return ret;
570}
571EXPORT_SYMBOL_GPL(__lbtf_cmd);
572
573/* Call holding driver_lock */
574void lbtf_cmd_response_rx(struct lbtf_private *priv)
575{
576 priv->cmd_response_rxed = 1;
577 queue_work(lbtf_wq, &priv->cmd_work);
578}
579EXPORT_SYMBOL_GPL(lbtf_cmd_response_rx);
580
581int lbtf_process_rx_command(struct lbtf_private *priv)
582{
583 uint16_t respcmd, curcmd;
584 struct cmd_header *resp;
585 int ret = 0;
586 unsigned long flags;
587 uint16_t result;
588
589 mutex_lock(&priv->lock);
590 spin_lock_irqsave(&priv->driver_lock, flags);
591
592 if (!priv->cur_cmd) {
593 ret = -1;
594 spin_unlock_irqrestore(&priv->driver_lock, flags);
595 goto done;
596 }
597
598 resp = (void *)priv->cmd_resp_buff;
599 curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
600 respcmd = le16_to_cpu(resp->command);
601 result = le16_to_cpu(resp->result);
602
603 if (net_ratelimit())
604 printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
605 respcmd, le16_to_cpu(resp->seqnum),
606 le16_to_cpu(resp->size));
607
608 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
609 spin_unlock_irqrestore(&priv->driver_lock, flags);
610 ret = -1;
611 goto done;
612 }
613 if (respcmd != CMD_RET(curcmd)) {
614 spin_unlock_irqrestore(&priv->driver_lock, flags);
615 ret = -1;
616 goto done;
617 }
618
619 if (resp->result == cpu_to_le16(0x0004)) {
620 /* 0x0004 means -EAGAIN. Drop the response, let it time out
621 and be resubmitted */
622 spin_unlock_irqrestore(&priv->driver_lock, flags);
623 ret = -1;
624 goto done;
625 }
626
627 /* Now we got response from FW, cancel the command timer */
628 del_timer(&priv->command_timer);
629 priv->cmd_timed_out = 0;
630 if (priv->nr_retries)
631 priv->nr_retries = 0;
632
633 /* If the command is not successful, cleanup and return failure */
634 if ((result != 0 || !(respcmd & 0x8000))) {
635 /*
636 * Handling errors here
637 */
638 switch (respcmd) {
639 case CMD_RET(CMD_GET_HW_SPEC):
640 case CMD_RET(CMD_802_11_RESET):
641 printk(KERN_DEBUG "libertastf: reset failed\n");
642 break;
643
644 }
645 lbtf_complete_command(priv, priv->cur_cmd, result);
646 spin_unlock_irqrestore(&priv->driver_lock, flags);
647
648 ret = -1;
649 goto done;
650 }
651
652 spin_unlock_irqrestore(&priv->driver_lock, flags);
653
654 if (priv->cur_cmd && priv->cur_cmd->callback) {
655 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg,
656 resp);
657 }
658 spin_lock_irqsave(&priv->driver_lock, flags);
659
660 if (priv->cur_cmd) {
661 /* Clean up and Put current command back to cmdfreeq */
662 lbtf_complete_command(priv, priv->cur_cmd, result);
663 }
664 spin_unlock_irqrestore(&priv->driver_lock, flags);
665
666done:
667 mutex_unlock(&priv->lock);
668 return ret;
669}
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
new file mode 100644
index 000000000000..1cc03a8dd67a
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -0,0 +1,766 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include <linux/delay.h>
11#include <linux/moduleparam.h>
12#include <linux/firmware.h>
13#include <linux/netdevice.h>
14#include <linux/usb.h>
15
16#define DRV_NAME "lbtf_usb"
17
18#include "libertas_tf.h"
19#include "if_usb.h"
20
21#define MESSAGE_HEADER_LEN 4
22
23static char *lbtf_fw_name = "lbtf_usb.bin";
24module_param_named(fw_name, lbtf_fw_name, charp, 0644);
25
26static struct usb_device_id if_usb_table[] = {
27 /* Enter the device signature inside */
28 { USB_DEVICE(0x1286, 0x2001) },
29 { USB_DEVICE(0x05a3, 0x8388) },
30 {} /* Terminating entry */
31};
32
33MODULE_DEVICE_TABLE(usb, if_usb_table);
34
35static void if_usb_receive(struct urb *urb);
36static void if_usb_receive_fwload(struct urb *urb);
37static int if_usb_prog_firmware(struct if_usb_card *cardp);
38static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
39 uint8_t *payload, uint16_t nb);
40static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
41 uint16_t nb, u8 data);
42static void if_usb_free(struct if_usb_card *cardp);
43static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
44static int if_usb_reset_device(struct if_usb_card *cardp);
45
46/**
47 * if_usb_wrike_bulk_callback - call back to handle URB status
48 *
49 * @param urb pointer to urb structure
50 */
51static void if_usb_write_bulk_callback(struct urb *urb)
52{
53 if (urb->status != 0)
54 printk(KERN_INFO "libertastf: URB in failure status: %d\n",
55 urb->status);
56}
57
58/**
59 * if_usb_free - free tx/rx urb, skb and rx buffer
60 *
61 * @param cardp pointer if_usb_card
62 */
63static void if_usb_free(struct if_usb_card *cardp)
64{
65 /* Unlink tx & rx urb */
66 usb_kill_urb(cardp->tx_urb);
67 usb_kill_urb(cardp->rx_urb);
68 usb_kill_urb(cardp->cmd_urb);
69
70 usb_free_urb(cardp->tx_urb);
71 cardp->tx_urb = NULL;
72
73 usb_free_urb(cardp->rx_urb);
74 cardp->rx_urb = NULL;
75
76 usb_free_urb(cardp->cmd_urb);
77 cardp->cmd_urb = NULL;
78
79 kfree(cardp->ep_out_buf);
80 cardp->ep_out_buf = NULL;
81}
82
83static void if_usb_setup_firmware(struct lbtf_private *priv)
84{
85 struct if_usb_card *cardp = priv->card;
86 struct cmd_ds_set_boot2_ver b2_cmd;
87
88 if_usb_submit_rx_urb(cardp);
89 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
90 b2_cmd.action = 0;
91 b2_cmd.version = cardp->boot2_version;
92
93 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
94 printk(KERN_INFO "libertastf: setting boot2 version failed\n");
95}
96
97static void if_usb_fw_timeo(unsigned long priv)
98{
99 struct if_usb_card *cardp = (void *)priv;
100
101 if (!cardp->fwdnldover)
102 /* Download timed out */
103 cardp->priv->surpriseremoved = 1;
104 wake_up(&cardp->fw_wq);
105}
106
107/**
108 * if_usb_probe - sets the configuration values
109 *
110 * @ifnum interface number
111 * @id pointer to usb_device_id
112 *
113 * Returns: 0 on success, error code on failure
114 */
115static int if_usb_probe(struct usb_interface *intf,
116 const struct usb_device_id *id)
117{
118 struct usb_device *udev;
119 struct usb_host_interface *iface_desc;
120 struct usb_endpoint_descriptor *endpoint;
121 struct lbtf_private *priv;
122 struct if_usb_card *cardp;
123 int i;
124
125 udev = interface_to_usbdev(intf);
126
127 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
128 if (!cardp)
129 goto error;
130
131 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
132 init_waitqueue_head(&cardp->fw_wq);
133
134 cardp->udev = udev;
135 iface_desc = intf->cur_altsetting;
136
137 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
138 endpoint = &iface_desc->endpoint[i].desc;
139 if (usb_endpoint_is_bulk_in(endpoint)) {
140 cardp->ep_in_size =
141 le16_to_cpu(endpoint->wMaxPacketSize);
142 cardp->ep_in = usb_endpoint_num(endpoint);
143 } else if (usb_endpoint_is_bulk_out(endpoint)) {
144 cardp->ep_out_size =
145 le16_to_cpu(endpoint->wMaxPacketSize);
146 cardp->ep_out = usb_endpoint_num(endpoint);
147 }
148 }
149 if (!cardp->ep_out_size || !cardp->ep_in_size)
150 /* Endpoints not found */
151 goto dealloc;
152
153 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
154 if (!cardp->rx_urb)
155 goto dealloc;
156
157 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
158 if (!cardp->tx_urb)
159 goto dealloc;
160
161 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
162 if (!cardp->cmd_urb)
163 goto dealloc;
164
165 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
166 GFP_KERNEL);
167 if (!cardp->ep_out_buf)
168 goto dealloc;
169
170 priv = lbtf_add_card(cardp, &udev->dev);
171 if (!priv)
172 goto dealloc;
173
174 cardp->priv = priv;
175
176 priv->hw_host_to_card = if_usb_host_to_card;
177 priv->hw_prog_firmware = if_usb_prog_firmware;
178 priv->hw_reset_device = if_usb_reset_device;
179 cardp->boot2_version = udev->descriptor.bcdDevice;
180
181 usb_get_dev(udev);
182 usb_set_intfdata(intf, cardp);
183
184 return 0;
185
186dealloc:
187 if_usb_free(cardp);
188error:
189 return -ENOMEM;
190}
191
192/**
193 * if_usb_disconnect - free resource and cleanup
194 *
195 * @intf USB interface structure
196 */
197static void if_usb_disconnect(struct usb_interface *intf)
198{
199 struct if_usb_card *cardp = usb_get_intfdata(intf);
200 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
201
202 if_usb_reset_device(cardp);
203
204 if (priv)
205 lbtf_remove_card(priv);
206
207 /* Unlink and free urb */
208 if_usb_free(cardp);
209
210 usb_set_intfdata(intf, NULL);
211 usb_put_dev(interface_to_usbdev(intf));
212}
213
214/**
215 * if_usb_send_fw_pkt - This function downloads the FW
216 *
217 * @priv pointer to struct lbtf_private
218 *
219 * Returns: 0
220 */
221static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
222{
223 struct fwdata *fwdata = cardp->ep_out_buf;
224 u8 *firmware = (u8 *) cardp->fw->data;
225
226 /* If we got a CRC failure on the last block, back
227 up and retry it */
228 if (!cardp->CRC_OK) {
229 cardp->totalbytes = cardp->fwlastblksent;
230 cardp->fwseqnum--;
231 }
232
233 /* struct fwdata (which we sent to the card) has an
234 extra __le32 field in between the header and the data,
235 which is not in the struct fwheader in the actual
236 firmware binary. Insert the seqnum in the middle... */
237 memcpy(&fwdata->hdr, &firmware[cardp->totalbytes],
238 sizeof(struct fwheader));
239
240 cardp->fwlastblksent = cardp->totalbytes;
241 cardp->totalbytes += sizeof(struct fwheader);
242
243 memcpy(fwdata->data, &firmware[cardp->totalbytes],
244 le32_to_cpu(fwdata->hdr.datalength));
245
246 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
247 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
248
249 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
250 le32_to_cpu(fwdata->hdr.datalength), 0);
251
252 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
253 /* Host has finished FW downloading
254 * Donwloading FW JUMP BLOCK
255 */
256 cardp->fwfinalblk = 1;
257
258 return 0;
259}
260
261static int if_usb_reset_device(struct if_usb_card *cardp)
262{
263 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
264 int ret;
265
266 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
267
268 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
269 cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset));
270 cmd->hdr.result = cpu_to_le16(0);
271 cmd->hdr.seqnum = cpu_to_le16(0x5a5a);
272 cmd->action = cpu_to_le16(CMD_ACT_HALT);
273 usb_tx_block(cardp, cardp->ep_out_buf,
274 4 + sizeof(struct cmd_ds_802_11_reset), 0);
275
276 msleep(100);
277 ret = usb_reset_device(cardp->udev);
278 msleep(100);
279
280 return ret;
281}
282EXPORT_SYMBOL_GPL(if_usb_reset_device);
283
284/**
285 * usb_tx_block - transfer data to the device
286 *
287 * @priv pointer to struct lbtf_private
288 * @payload pointer to payload data
289 * @nb data length
290 * @data non-zero for data, zero for commands
291 *
292 * Returns: 0 on success, nonzero otherwise.
293 */
294static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
295 uint16_t nb, u8 data)
296{
297 struct urb *urb;
298
299 /* check if device is removed */
300 if (cardp->priv->surpriseremoved)
301 return -1;
302
303 if (data)
304 urb = cardp->tx_urb;
305 else
306 urb = cardp->cmd_urb;
307
308 usb_fill_bulk_urb(urb, cardp->udev,
309 usb_sndbulkpipe(cardp->udev,
310 cardp->ep_out),
311 payload, nb, if_usb_write_bulk_callback, cardp);
312
313 urb->transfer_flags |= URB_ZERO_PACKET;
314
315 if (usb_submit_urb(urb, GFP_ATOMIC))
316 return -1;
317 return 0;
318}
319
320static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
321 void (*callbackfn)(struct urb *urb))
322{
323 struct sk_buff *skb;
324
325 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
326 if (!skb)
327 return -1;
328
329 cardp->rx_skb = skb;
330
331 /* Fill the receive configuration URB and initialise the Rx call back */
332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
334 (void *) (skb->tail),
335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
336
337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
338
339 if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
340 kfree_skb(skb);
341 cardp->rx_skb = NULL;
342 return -1;
343 } else
344 return 0;
345}
346
347static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
348{
349 return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload);
350}
351
352static int if_usb_submit_rx_urb(struct if_usb_card *cardp)
353{
354 return __if_usb_submit_rx_urb(cardp, &if_usb_receive);
355}
356
357static void if_usb_receive_fwload(struct urb *urb)
358{
359 struct if_usb_card *cardp = urb->context;
360 struct sk_buff *skb = cardp->rx_skb;
361 struct fwsyncheader *syncfwheader;
362 struct bootcmdresp bcmdresp;
363
364 if (urb->status) {
365 kfree_skb(skb);
366 return;
367 }
368
369 if (cardp->fwdnldover) {
370 __le32 *tmp = (__le32 *)(skb->data);
371
372 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
373 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
374 /* Firmware ready event received */
375 wake_up(&cardp->fw_wq);
376 else
377 if_usb_submit_rx_urb_fwload(cardp);
378 kfree_skb(skb);
379 return;
380 }
381 if (cardp->bootcmdresp <= 0) {
382 memcpy(&bcmdresp, skb->data, sizeof(bcmdresp));
383
384 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
385 kfree_skb(skb);
386 if_usb_submit_rx_urb_fwload(cardp);
387 cardp->bootcmdresp = 1;
388 /* Received valid boot command response */
389 return;
390 }
391 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
392 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
393 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
394 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
395 cardp->bootcmdresp = -1;
396 } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
397 bcmdresp.result == BOOT_CMD_RESP_OK)
398 cardp->bootcmdresp = 1;
399
400 kfree_skb(skb);
401 if_usb_submit_rx_urb_fwload(cardp);
402 return;
403 }
404
405 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
406 if (!syncfwheader) {
407 kfree_skb(skb);
408 return;
409 }
410
411 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
412
413 if (!syncfwheader->cmd)
414 cardp->CRC_OK = 1;
415 else
416 cardp->CRC_OK = 0;
417 kfree_skb(skb);
418
419 /* reschedule timer for 200ms hence */
420 mod_timer(&cardp->fw_timeout, jiffies + (HZ/5));
421
422 if (cardp->fwfinalblk) {
423 cardp->fwdnldover = 1;
424 goto exit;
425 }
426
427 if_usb_send_fw_pkt(cardp);
428
429 exit:
430 if_usb_submit_rx_urb_fwload(cardp);
431
432 kfree(syncfwheader);
433
434 return;
435}
436
437#define MRVDRV_MIN_PKT_LEN 30
438
439static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
440 struct if_usb_card *cardp,
441 struct lbtf_private *priv)
442{
443 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
444 || recvlength < MRVDRV_MIN_PKT_LEN) {
445 kfree_skb(skb);
446 return;
447 }
448
449 skb_put(skb, recvlength);
450 skb_pull(skb, MESSAGE_HEADER_LEN);
451 lbtf_rx(priv, skb);
452}
453
454static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
455 struct sk_buff *skb,
456 struct if_usb_card *cardp,
457 struct lbtf_private *priv)
458{
459 if (recvlength > LBS_CMD_BUFFER_SIZE) {
460 kfree_skb(skb);
461 return;
462 }
463
464 if (!in_interrupt())
465 BUG();
466
467 spin_lock(&priv->driver_lock);
468 memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
469 recvlength - MESSAGE_HEADER_LEN);
470 kfree_skb(skb);
471 lbtf_cmd_response_rx(priv);
472 spin_unlock(&priv->driver_lock);
473}
474
475/**
476 * if_usb_receive - read data received from the device.
477 *
478 * @urb pointer to struct urb
479 */
480static void if_usb_receive(struct urb *urb)
481{
482 struct if_usb_card *cardp = urb->context;
483 struct sk_buff *skb = cardp->rx_skb;
484 struct lbtf_private *priv = cardp->priv;
485 int recvlength = urb->actual_length;
486 uint8_t *recvbuff = NULL;
487 uint32_t recvtype = 0;
488 __le32 *pkt = (__le32 *) skb->data;
489
490 if (recvlength) {
491 if (urb->status) {
492 kfree_skb(skb);
493 goto setup_for_next;
494 }
495
496 recvbuff = skb->data;
497 recvtype = le32_to_cpu(pkt[0]);
498 } else if (urb->status) {
499 kfree_skb(skb);
500 return;
501 }
502
503 switch (recvtype) {
504 case CMD_TYPE_DATA:
505 process_cmdtypedata(recvlength, skb, cardp, priv);
506 break;
507
508 case CMD_TYPE_REQUEST:
509 process_cmdrequest(recvlength, recvbuff, skb, cardp, priv);
510 break;
511
512 case CMD_TYPE_INDICATION:
513 {
514 /* Event cause handling */
515 u32 event_cause = le32_to_cpu(pkt[1]);
516
517 /* Icky undocumented magic special case */
518 if (event_cause & 0xffff0000) {
519 u16 tmp;
520 u8 retrycnt;
521 u8 failure;
522
523 tmp = event_cause >> 16;
524 retrycnt = tmp & 0x00ff;
525 failure = (tmp & 0xff00) >> 8;
526 lbtf_send_tx_feedback(priv, retrycnt, failure);
527 } else if (event_cause == LBTF_EVENT_BCN_SENT)
528 lbtf_bcn_sent(priv);
529 else
530 printk(KERN_DEBUG
531 "Unsupported notification %d received\n",
532 event_cause);
533 kfree_skb(skb);
534 break;
535 }
536 default:
537 printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
538 recvtype);
539 kfree_skb(skb);
540 break;
541 }
542
543setup_for_next:
544 if_usb_submit_rx_urb(cardp);
545}
546
547/**
548 * if_usb_host_to_card - Download data to the device
549 *
550 * @priv pointer to struct lbtf_private structure
551 * @type type of data
552 * @buf pointer to data buffer
553 * @len number of bytes
554 *
555 * Returns: 0 on success, nonzero otherwise
556 */
557static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
558 uint8_t *payload, uint16_t nb)
559{
560 struct if_usb_card *cardp = priv->card;
561 u8 data = 0;
562
563 if (type == MVMS_CMD) {
564 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
565 } else {
566 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA);
567 data = 1;
568 }
569
570 memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb);
571
572 return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN,
573 data);
574}
575
576/**
577 * if_usb_issue_boot_command - Issue boot command to Boot2.
578 *
579 * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
580 *
581 * Returns: 0
582 */
583static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
584{
585 struct bootcmd *bootcmd = cardp->ep_out_buf;
586
587 /* Prepare command */
588 bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER);
589 bootcmd->cmd = ivalue;
590 memset(bootcmd->pad, 0, sizeof(bootcmd->pad));
591
592 /* Issue command */
593 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd), 0);
594
595 return 0;
596}
597
598
599/**
600 * check_fwfile_format - Check the validity of Boot2/FW image.
601 *
602 * @data pointer to image
603 * @totlen image length
604 *
605 * Returns: 0 if the image is valid, nonzero otherwise.
606 */
607static int check_fwfile_format(const u8 *data, u32 totlen)
608{
609 u32 bincmd, exit;
610 u32 blksize, offset, len;
611 int ret;
612
613 ret = 1;
614 exit = len = 0;
615
616 do {
617 struct fwheader *fwh = (void *) data;
618
619 bincmd = le32_to_cpu(fwh->dnldcmd);
620 blksize = le32_to_cpu(fwh->datalength);
621 switch (bincmd) {
622 case FW_HAS_DATA_TO_RECV:
623 offset = sizeof(struct fwheader) + blksize;
624 data += offset;
625 len += offset;
626 if (len >= totlen)
627 exit = 1;
628 break;
629 case FW_HAS_LAST_BLOCK:
630 exit = 1;
631 ret = 0;
632 break;
633 default:
634 exit = 1;
635 break;
636 }
637 } while (!exit);
638
639 if (ret)
640 printk(KERN_INFO
641 "libertastf: firmware file format check failed\n");
642 return ret;
643}
644
645
646static int if_usb_prog_firmware(struct if_usb_card *cardp)
647{
648 int i = 0;
649 static int reset_count = 10;
650 int ret = 0;
651
652 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
653 if (ret < 0) {
654 printk(KERN_INFO "libertastf: firmware %s not found\n",
655 lbtf_fw_name);
656 goto done;
657 }
658
659 if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
660 goto release_fw;
661
662restart:
663 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
664 ret = -1;
665 goto release_fw;
666 }
667
668 cardp->bootcmdresp = 0;
669 do {
670 int j = 0;
671 i++;
672 /* Issue Boot command = 1, Boot from Download-FW */
673 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
674 /* wait for command response */
675 do {
676 j++;
677 msleep_interruptible(100);
678 } while (cardp->bootcmdresp == 0 && j < 10);
679 } while (cardp->bootcmdresp == 0 && i < 5);
680
681 if (cardp->bootcmdresp <= 0) {
682 if (--reset_count >= 0) {
683 if_usb_reset_device(cardp);
684 goto restart;
685 }
686 return -1;
687 }
688
689 i = 0;
690
691 cardp->totalbytes = 0;
692 cardp->fwlastblksent = 0;
693 cardp->CRC_OK = 1;
694 cardp->fwdnldover = 0;
695 cardp->fwseqnum = -1;
696 cardp->totalbytes = 0;
697 cardp->fwfinalblk = 0;
698
699 /* Send the first firmware packet... */
700 if_usb_send_fw_pkt(cardp);
701
702 /* ... and wait for the process to complete */
703 wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved ||
704 cardp->fwdnldover);
705
706 del_timer_sync(&cardp->fw_timeout);
707 usb_kill_urb(cardp->rx_urb);
708
709 if (!cardp->fwdnldover) {
710 printk(KERN_INFO "libertastf: failed to load fw,"
711 " resetting device!\n");
712 if (--reset_count >= 0) {
713 if_usb_reset_device(cardp);
714 goto restart;
715 }
716
717 printk(KERN_INFO "libertastf: fw download failure\n");
718 ret = -1;
719 goto release_fw;
720 }
721
722 cardp->priv->fw_ready = 1;
723
724 release_fw:
725 release_firmware(cardp->fw);
726 cardp->fw = NULL;
727
728 if_usb_setup_firmware(cardp->priv);
729
730 done:
731 return ret;
732}
733EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
734
735
736#define if_usb_suspend NULL
737#define if_usb_resume NULL
738
739static struct usb_driver if_usb_driver = {
740 .name = DRV_NAME,
741 .probe = if_usb_probe,
742 .disconnect = if_usb_disconnect,
743 .id_table = if_usb_table,
744 .suspend = if_usb_suspend,
745 .resume = if_usb_resume,
746};
747
748static int __init if_usb_init_module(void)
749{
750 int ret = 0;
751
752 ret = usb_register(&if_usb_driver);
753 return ret;
754}
755
756static void __exit if_usb_exit_module(void)
757{
758 usb_deregister(&if_usb_driver);
759}
760
761module_init(if_usb_init_module);
762module_exit(if_usb_exit_module);
763
764MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
765MODULE_AUTHOR("Cozybit Inc.");
766MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.h b/drivers/net/wireless/libertas_tf/if_usb.h
new file mode 100644
index 000000000000..6fa5b3f59efe
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include <linux/wait.h>
11#include <linux/timer.h>
12
13struct lbtf_private;
14
15/**
16 * This file contains definition for USB interface.
17 */
18#define CMD_TYPE_REQUEST 0xF00DFACE
19#define CMD_TYPE_DATA 0xBEADC0DE
20#define CMD_TYPE_INDICATION 0xBEEFFACE
21
22#define BOOT_CMD_FW_BY_USB 0x01
23#define BOOT_CMD_FW_IN_EEPROM 0x02
24#define BOOT_CMD_UPDATE_BOOT2 0x03
25#define BOOT_CMD_UPDATE_FW 0x04
26#define BOOT_CMD_MAGIC_NUMBER 0x4C56524D /* LVRM */
27
28struct bootcmd {
29 __le32 magic;
30 uint8_t cmd;
31 uint8_t pad[11];
32};
33
34#define BOOT_CMD_RESP_OK 0x0001
35#define BOOT_CMD_RESP_FAIL 0x0000
36
37struct bootcmdresp {
38 __le32 magic;
39 uint8_t cmd;
40 uint8_t result;
41 uint8_t pad[2];
42};
43
44/** USB card description structure*/
45struct if_usb_card {
46 struct usb_device *udev;
47 struct urb *rx_urb, *tx_urb, *cmd_urb;
48 struct lbtf_private *priv;
49
50 struct sk_buff *rx_skb;
51
52 uint8_t ep_in;
53 uint8_t ep_out;
54
55 int8_t bootcmdresp;
56
57 int ep_in_size;
58
59 void *ep_out_buf;
60 int ep_out_size;
61
62 const struct firmware *fw;
63 struct timer_list fw_timeout;
64 wait_queue_head_t fw_wq;
65 uint32_t fwseqnum;
66 uint32_t totalbytes;
67 uint32_t fwlastblksent;
68 uint8_t CRC_OK;
69 uint8_t fwdnldover;
70 uint8_t fwfinalblk;
71
72 __le16 boot2_version;
73};
74
75/** fwheader */
76struct fwheader {
77 __le32 dnldcmd;
78 __le32 baseaddr;
79 __le32 datalength;
80 __le32 CRC;
81};
82
83#define FW_MAX_DATA_BLK_SIZE 600
84/** FWData */
85struct fwdata {
86 struct fwheader hdr;
87 __le32 seqnum;
88 uint8_t data[0];
89};
90
91/** fwsyncheader */
92struct fwsyncheader {
93 __le32 cmd;
94 __le32 seqnum;
95};
96
97#define FW_HAS_DATA_TO_RECV 0x00000001
98#define FW_HAS_LAST_BLOCK 0x00000004
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
new file mode 100644
index 000000000000..8995cd7c29bf
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -0,0 +1,514 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2007, Red Hat, Inc.
4 * Copyright (C) 2003-2006, Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11#include <linux/spinlock.h>
12#include <linux/device.h>
13#include <linux/kthread.h>
14#include <net/mac80211.h>
15
16#ifndef DRV_NAME
17#define DRV_NAME "libertas_tf"
18#endif
19
20#define MRVL_DEFAULT_RETRIES 9
21#define MRVL_PER_PACKET_RATE 0x10
22#define MRVL_MAX_BCN_SIZE 440
23#define CMD_OPTION_WAITFORRSP 0x0002
24
25/* Return command are almost always the same as the host command, but with
26 * bit 15 set high. There are a few exceptions, though...
27 */
28#define CMD_RET(cmd) (0x8000 | cmd)
29
30/* Command codes */
31#define CMD_GET_HW_SPEC 0x0003
32#define CMD_802_11_RESET 0x0005
33#define CMD_MAC_MULTICAST_ADR 0x0010
34#define CMD_802_11_RADIO_CONTROL 0x001c
35#define CMD_802_11_RF_CHANNEL 0x001d
36#define CMD_802_11_RF_TX_POWER 0x001e
37#define CMD_MAC_CONTROL 0x0028
38#define CMD_802_11_MAC_ADDRESS 0x004d
39#define CMD_SET_BOOT2_VER 0x00a5
40#define CMD_802_11_BEACON_CTRL 0x00b0
41#define CMD_802_11_BEACON_SET 0x00cb
42#define CMD_802_11_SET_MODE 0x00cc
43#define CMD_802_11_SET_BSSID 0x00cd
44
45#define CMD_ACT_GET 0x0000
46#define CMD_ACT_SET 0x0001
47
48/* Define action or option for CMD_802_11_RESET */
49#define CMD_ACT_HALT 0x0003
50
51/* Define action or option for CMD_MAC_CONTROL */
52#define CMD_ACT_MAC_RX_ON 0x0001
53#define CMD_ACT_MAC_TX_ON 0x0002
54#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020
55#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040
56#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
57#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
58
59/* Define action or option for CMD_802_11_RADIO_CONTROL */
60#define CMD_TYPE_AUTO_PREAMBLE 0x0001
61#define CMD_TYPE_SHORT_PREAMBLE 0x0002
62#define CMD_TYPE_LONG_PREAMBLE 0x0003
63
64#define TURN_ON_RF 0x01
65#define RADIO_ON 0x01
66#define RADIO_OFF 0x00
67
68#define SET_AUTO_PREAMBLE 0x05
69#define SET_SHORT_PREAMBLE 0x03
70#define SET_LONG_PREAMBLE 0x01
71
72/* Define action or option for CMD_802_11_RF_CHANNEL */
73#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
74#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
75
76/* Codes for CMD_802_11_SET_MODE */
77enum lbtf_mode {
78 LBTF_PASSIVE_MODE,
79 LBTF_STA_MODE,
80 LBTF_AP_MODE,
81};
82
83/** Card Event definition */
84#define MACREG_INT_CODE_FIRMWARE_READY 48
85/** Buffer Constants */
86
87/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical
88* addresses of TxPD buffers. Station has only 8 TxPD available, Whereas
89* driver has more local TxPDs. Each TxPD on the host memory is associated
90* with a Tx control node. The driver maintains 8 RxPD descriptors for
91* station firmware to store Rx packet information.
92*
93* Current version of MAC has a 32x6 multicast address buffer.
94*
95* 802.11b can have up to 14 channels, the driver keeps the
96* BSSID(MAC address) of each APs or Ad hoc stations it has sensed.
97*/
98
99#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32
100#define LBS_NUM_CMD_BUFFERS 10
101#define LBS_CMD_BUFFER_SIZE (2 * 1024)
102#define MRVDRV_MAX_CHANNEL_SIZE 14
103#define MRVDRV_SNAP_HEADER_LEN 8
104
105#define LBS_UPLD_SIZE 2312
106#define DEV_NAME_LEN 32
107
108/** Misc constants */
109/* This section defines 802.11 specific contants */
110
111#define MRVDRV_MAX_REGION_CODE 6
112/**
113 * the table to keep region code
114 */
115#define LBTF_REGDOMAIN_US 0x10
116#define LBTF_REGDOMAIN_CA 0x20
117#define LBTF_REGDOMAIN_EU 0x30
118#define LBTF_REGDOMAIN_SP 0x31
119#define LBTF_REGDOMAIN_FR 0x32
120#define LBTF_REGDOMAIN_JP 0x40
121
122#define SBI_EVENT_CAUSE_SHIFT 3
123
124/** RxPD status */
125
126#define MRVDRV_RXPD_STATUS_OK 0x0001
127
128
129/* This is for firmware specific length */
130#define EXTRA_LEN 36
131
132#define MRVDRV_ETH_TX_PACKET_BUFFER_SIZE \
133 (ETH_FRAME_LEN + sizeof(struct txpd) + EXTRA_LEN)
134
135#define MRVDRV_ETH_RX_PACKET_BUFFER_SIZE \
136 (ETH_FRAME_LEN + sizeof(struct rxpd) \
137 + MRVDRV_SNAP_HEADER_LEN + EXTRA_LEN)
138
139#define CMD_F_HOSTCMD (1 << 0)
140#define FW_CAPINFO_WPA (1 << 0)
141
142#define RF_ANTENNA_1 0x1
143#define RF_ANTENNA_2 0x2
144#define RF_ANTENNA_AUTO 0xFFFF
145
146#define LBTF_EVENT_BCN_SENT 55
147
148/** Global Variable Declaration */
149/** mv_ms_type */
150enum mv_ms_type {
151 MVMS_DAT = 0,
152 MVMS_CMD = 1,
153 MVMS_TXDONE = 2,
154 MVMS_EVENT
155};
156
157extern struct workqueue_struct *lbtf_wq;
158
159struct lbtf_private;
160
161struct lbtf_offset_value {
162 u32 offset;
163 u32 value;
164};
165
166struct channel_range {
167 u8 regdomain;
168 u8 start;
169 u8 end; /* exclusive (channel must be less than end) */
170};
171
172struct if_usb_card;
173
174/** Private structure for the MV device */
175struct lbtf_private {
176 void *card;
177 struct ieee80211_hw *hw;
178
179 /* Command response buffer */
180 u8 cmd_resp_buff[LBS_UPLD_SIZE];
181 /* Download sent:
182 bit0 1/0=data_sent/data_tx_done,
183 bit1 1/0=cmd_sent/cmd_tx_done,
184 all other bits reserved 0 */
185 struct ieee80211_vif *vif;
186
187 struct work_struct cmd_work;
188 struct work_struct tx_work;
189 /** Hardware access */
190 int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb);
191 int (*hw_prog_firmware) (struct if_usb_card *cardp);
192 int (*hw_reset_device) (struct if_usb_card *cardp);
193
194
195 /** Wlan adapter data structure*/
196 /** STATUS variables */
197 u32 fwrelease;
198 u32 fwcapinfo;
199 /* protected with big lock */
200
201 struct mutex lock;
202
203 /** command-related variables */
204 u16 seqnum;
205 /* protected by big lock */
206
207 struct cmd_ctrl_node *cmd_array;
208 /** Current command */
209 struct cmd_ctrl_node *cur_cmd;
210 /** command Queues */
211 /** Free command buffers */
212 struct list_head cmdfreeq;
213 /** Pending command buffers */
214 struct list_head cmdpendingq;
215
216 /** spin locks */
217 spinlock_t driver_lock;
218
219 /** Timers */
220 struct timer_list command_timer;
221 int nr_retries;
222 int cmd_timed_out;
223
224 u8 cmd_response_rxed;
225
226 /** capability Info used in Association, start, join */
227 u16 capability;
228
229 /** MAC address information */
230 u8 current_addr[ETH_ALEN];
231 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
232 u32 nr_of_multicastmacaddr;
233 int cur_freq;
234
235 struct sk_buff *skb_to_tx;
236 struct sk_buff *tx_skb;
237
238 /** NIC Operation characteristics */
239 u16 mac_control;
240 u16 regioncode;
241 struct channel_range range;
242
243 u8 radioon;
244 u32 preamble;
245
246 struct ieee80211_channel channels[14];
247 struct ieee80211_rate rates[12];
248 struct ieee80211_supported_band band;
249 struct lbtf_offset_value offsetvalue;
250
251 u8 fw_ready;
252 u8 surpriseremoved;
253 struct sk_buff_head bc_ps_buf;
254};
255
256/* 802.11-related definitions */
257
258/* TxPD descriptor */
259struct txpd {
260 /* Current Tx packet status */
261 __le32 tx_status;
262 /* Tx control */
263 __le32 tx_control;
264 __le32 tx_packet_location;
265 /* Tx packet length */
266 __le16 tx_packet_length;
267 /* First 2 byte of destination MAC address */
268 u8 tx_dest_addr_high[2];
269 /* Last 4 byte of destination MAC address */
270 u8 tx_dest_addr_low[4];
271 /* Pkt Priority */
272 u8 priority;
273 /* Pkt Trasnit Power control */
274 u8 powermgmt;
275 /* Time the packet has been queued in the driver (units = 2ms) */
276 u8 pktdelay_2ms;
277 /* reserved */
278 u8 reserved1;
279};
280
281/* RxPD Descriptor */
282struct rxpd {
283 /* Current Rx packet status */
284 __le16 status;
285
286 /* SNR */
287 u8 snr;
288
289 /* Tx control */
290 u8 rx_control;
291
292 /* Pkt length */
293 __le16 pkt_len;
294
295 /* Noise Floor */
296 u8 nf;
297
298 /* Rx Packet Rate */
299 u8 rx_rate;
300
301 /* Pkt addr */
302 __le32 pkt_ptr;
303
304 /* Next Rx RxPD addr */
305 __le32 next_rxpd_ptr;
306
307 /* Pkt Priority */
308 u8 priority;
309 u8 reserved[3];
310};
311
312struct cmd_header {
313 __le16 command;
314 __le16 size;
315 __le16 seqnum;
316 __le16 result;
317} __attribute__ ((packed));
318
319struct cmd_ctrl_node {
320 struct list_head list;
321 int result;
322 /* command response */
323 int (*callback)(struct lbtf_private *,
324 unsigned long, struct cmd_header *);
325 unsigned long callback_arg;
326 /* command data */
327 struct cmd_header *cmdbuf;
328 /* wait queue */
329 u16 cmdwaitqwoken;
330 wait_queue_head_t cmdwait_q;
331};
332
333/*
334 * Define data structure for CMD_GET_HW_SPEC
335 * This structure defines the response for the GET_HW_SPEC command
336 */
337struct cmd_ds_get_hw_spec {
338 struct cmd_header hdr;
339
340 /* HW Interface version number */
341 __le16 hwifversion;
342 /* HW version number */
343 __le16 version;
344 /* Max number of TxPD FW can handle */
345 __le16 nr_txpd;
346 /* Max no of Multicast address */
347 __le16 nr_mcast_adr;
348 /* MAC address */
349 u8 permanentaddr[6];
350
351 /* region Code */
352 __le16 regioncode;
353
354 /* Number of antenna used */
355 __le16 nr_antenna;
356
357 /* FW release number, example 0x01030304 = 2.3.4p1 */
358 __le32 fwrelease;
359
360 /* Base Address of TxPD queue */
361 __le32 wcb_base;
362 /* Read Pointer of RxPd queue */
363 __le32 rxpd_rdptr;
364
365 /* Write Pointer of RxPd queue */
366 __le32 rxpd_wrptr;
367
368 /*FW/HW capability */
369 __le32 fwcapinfo;
370} __attribute__ ((packed));
371
372struct cmd_ds_mac_control {
373 struct cmd_header hdr;
374 __le16 action;
375 u16 reserved;
376};
377
378struct cmd_ds_802_11_mac_address {
379 struct cmd_header hdr;
380
381 __le16 action;
382 uint8_t macadd[ETH_ALEN];
383};
384
385struct cmd_ds_mac_multicast_addr {
386 struct cmd_header hdr;
387
388 __le16 action;
389 __le16 nr_of_adrs;
390 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
391};
392
393struct cmd_ds_set_mode {
394 struct cmd_header hdr;
395
396 __le16 mode;
397};
398
399struct cmd_ds_set_bssid {
400 struct cmd_header hdr;
401
402 u8 bssid[6];
403 u8 activate;
404};
405
406struct cmd_ds_802_11_radio_control {
407 struct cmd_header hdr;
408
409 __le16 action;
410 __le16 control;
411};
412
413
414struct cmd_ds_802_11_rf_channel {
415 struct cmd_header hdr;
416
417 __le16 action;
418 __le16 channel;
419 __le16 rftype; /* unused */
420 __le16 reserved; /* unused */
421 u8 channellist[32]; /* unused */
422};
423
424struct cmd_ds_set_boot2_ver {
425 struct cmd_header hdr;
426
427 __le16 action;
428 __le16 version;
429};
430
431struct cmd_ds_802_11_reset {
432 struct cmd_header hdr;
433
434 __le16 action;
435};
436
437struct cmd_ds_802_11_beacon_control {
438 struct cmd_header hdr;
439
440 __le16 action;
441 __le16 beacon_enable;
442 __le16 beacon_period;
443};
444
445struct cmd_ds_802_11_beacon_set {
446 struct cmd_header hdr;
447
448 __le16 len;
449 u8 beacon[MRVL_MAX_BCN_SIZE];
450};
451
452struct lbtf_private;
453struct cmd_ctrl_node;
454
455/** Function Prototype Declaration */
456void lbtf_set_mac_control(struct lbtf_private *priv);
457
458int lbtf_free_cmd_buffer(struct lbtf_private *priv);
459
460int lbtf_allocate_cmd_buffer(struct lbtf_private *priv);
461int lbtf_execute_next_command(struct lbtf_private *priv);
462int lbtf_set_radio_control(struct lbtf_private *priv);
463int lbtf_update_hw_spec(struct lbtf_private *priv);
464int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv);
465void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode);
466void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid);
467int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr);
468
469int lbtf_set_channel(struct lbtf_private *priv, u8 channel);
470
471int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon);
472int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
473 int beacon_int);
474
475
476int lbtf_process_rx_command(struct lbtf_private *priv);
477void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
478 int result);
479void lbtf_cmd_response_rx(struct lbtf_private *priv);
480
481/* main.c */
482struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
483 int *cfp_no);
484struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev);
485int lbtf_remove_card(struct lbtf_private *priv);
486int lbtf_start_card(struct lbtf_private *priv);
487int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
488void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
489void lbtf_bcn_sent(struct lbtf_private *priv);
490
491/* support functions for cmd.c */
492/* lbtf_cmd() infers the size of the buffer to copy data back into, from
493 the size of the target of the pointer. Since the command to be sent
494 may often be smaller, that size is set in cmd->size by the caller.*/
495#define lbtf_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
496 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \
497 (cmd)->hdr.size = cpu_to_le16(sizeof(*(cmd))); \
498 __lbtf_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \
499})
500
501#define lbtf_cmd_with_response(priv, cmdnr, cmd) \
502 lbtf_cmd(priv, cmdnr, cmd, lbtf_cmd_copyback, (unsigned long) (cmd))
503
504void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
505 struct cmd_header *in_cmd, int in_cmd_size);
506
507int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
508 struct cmd_header *in_cmd, int in_cmd_size,
509 int (*callback)(struct lbtf_private *, unsigned long,
510 struct cmd_header *),
511 unsigned long callback_arg);
512
513int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
514 struct cmd_header *resp);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
new file mode 100644
index 000000000000..feff945ad856
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -0,0 +1,662 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include "libertas_tf.h"
11#include "linux/etherdevice.h"
12
13#define DRIVER_RELEASE_VERSION "004.p0"
14/* thinfirm version: 5.132.X.pX */
15#define LBTF_FW_VER_MIN 0x05840300
16#define LBTF_FW_VER_MAX 0x0584ffff
17#define QOS_CONTROL_LEN 2
18
19static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
20struct workqueue_struct *lbtf_wq;
21
22static const struct ieee80211_channel lbtf_channels[] = {
23 { .center_freq = 2412, .hw_value = 1 },
24 { .center_freq = 2417, .hw_value = 2 },
25 { .center_freq = 2422, .hw_value = 3 },
26 { .center_freq = 2427, .hw_value = 4 },
27 { .center_freq = 2432, .hw_value = 5 },
28 { .center_freq = 2437, .hw_value = 6 },
29 { .center_freq = 2442, .hw_value = 7 },
30 { .center_freq = 2447, .hw_value = 8 },
31 { .center_freq = 2452, .hw_value = 9 },
32 { .center_freq = 2457, .hw_value = 10 },
33 { .center_freq = 2462, .hw_value = 11 },
34 { .center_freq = 2467, .hw_value = 12 },
35 { .center_freq = 2472, .hw_value = 13 },
36 { .center_freq = 2484, .hw_value = 14 },
37};
38
39/* This table contains the hardware specific values for the modulation rates. */
40static const struct ieee80211_rate lbtf_rates[] = {
41 { .bitrate = 10,
42 .hw_value = 0, },
43 { .bitrate = 20,
44 .hw_value = 1,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = 2,
48 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
49 { .bitrate = 110,
50 .hw_value = 3,
51 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
52 { .bitrate = 60,
53 .hw_value = 5,
54 .flags = 0 },
55 { .bitrate = 90,
56 .hw_value = 6,
57 .flags = 0 },
58 { .bitrate = 120,
59 .hw_value = 7,
60 .flags = 0 },
61 { .bitrate = 180,
62 .hw_value = 8,
63 .flags = 0 },
64 { .bitrate = 240,
65 .hw_value = 9,
66 .flags = 0 },
67 { .bitrate = 360,
68 .hw_value = 10,
69 .flags = 0 },
70 { .bitrate = 480,
71 .hw_value = 11,
72 .flags = 0 },
73 { .bitrate = 540,
74 .hw_value = 12,
75 .flags = 0 },
76};
77
78static void lbtf_cmd_work(struct work_struct *work)
79{
80 struct lbtf_private *priv = container_of(work, struct lbtf_private,
81 cmd_work);
82 spin_lock_irq(&priv->driver_lock);
83 /* command response? */
84 if (priv->cmd_response_rxed) {
85 priv->cmd_response_rxed = 0;
86 spin_unlock_irq(&priv->driver_lock);
87 lbtf_process_rx_command(priv);
88 spin_lock_irq(&priv->driver_lock);
89 }
90
91 if (priv->cmd_timed_out && priv->cur_cmd) {
92 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
93
94 if (++priv->nr_retries > 10) {
95 lbtf_complete_command(priv, cmdnode,
96 -ETIMEDOUT);
97 priv->nr_retries = 0;
98 } else {
99 priv->cur_cmd = NULL;
100
101 /* Stick it back at the _top_ of the pending
102 * queue for immediate resubmission */
103 list_add(&cmdnode->list, &priv->cmdpendingq);
104 }
105 }
106 priv->cmd_timed_out = 0;
107 spin_unlock_irq(&priv->driver_lock);
108
109 if (!priv->fw_ready)
110 return;
111 /* Execute the next command */
112 if (!priv->cur_cmd)
113 lbtf_execute_next_command(priv);
114}
115
116/**
117 * lbtf_setup_firmware: initialize firmware.
118 *
119 * @priv A pointer to struct lbtf_private structure
120 *
121 * Returns: 0 on success.
122 */
123static int lbtf_setup_firmware(struct lbtf_private *priv)
124{
125 int ret = -1;
126
127 /*
128 * Read priv address from HW
129 */
130 memset(priv->current_addr, 0xff, ETH_ALEN);
131 ret = lbtf_update_hw_spec(priv);
132 if (ret) {
133 ret = -1;
134 goto done;
135 }
136
137 lbtf_set_mac_control(priv);
138 lbtf_set_radio_control(priv);
139
140 ret = 0;
141done:
142 return ret;
143}
144
145/**
146 * This function handles the timeout of command sending.
147 * It will re-send the same command again.
148 */
149static void command_timer_fn(unsigned long data)
150{
151 struct lbtf_private *priv = (struct lbtf_private *)data;
152 unsigned long flags;
153
154 spin_lock_irqsave(&priv->driver_lock, flags);
155
156 if (!priv->cur_cmd) {
157 printk(KERN_DEBUG "libertastf: command timer expired; "
158 "no pending command\n");
159 goto out;
160 }
161
162 printk(KERN_DEBUG "libertas: command %x timed out\n",
163 le16_to_cpu(priv->cur_cmd->cmdbuf->command));
164
165 priv->cmd_timed_out = 1;
166 queue_work(lbtf_wq, &priv->cmd_work);
167out:
168 spin_unlock_irqrestore(&priv->driver_lock, flags);
169}
170
171static int lbtf_init_adapter(struct lbtf_private *priv)
172{
173 memset(priv->current_addr, 0xff, ETH_ALEN);
174 mutex_init(&priv->lock);
175
176 priv->vif = NULL;
177 setup_timer(&priv->command_timer, command_timer_fn,
178 (unsigned long)priv);
179
180 INIT_LIST_HEAD(&priv->cmdfreeq);
181 INIT_LIST_HEAD(&priv->cmdpendingq);
182
183 spin_lock_init(&priv->driver_lock);
184
185 /* Allocate the command buffers */
186 if (lbtf_allocate_cmd_buffer(priv))
187 return -1;
188
189 return 0;
190}
191
192static void lbtf_free_adapter(struct lbtf_private *priv)
193{
194 lbtf_free_cmd_buffer(priv);
195 del_timer(&priv->command_timer);
196}
197
198static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
199{
200 struct lbtf_private *priv = hw->priv;
201
202 priv->skb_to_tx = skb;
203 queue_work(lbtf_wq, &priv->tx_work);
204 /*
205 * queue will be restarted when we receive transmission feedback if
206 * there are no buffered multicast frames to send
207 */
208 ieee80211_stop_queues(priv->hw);
209 return 0;
210}
211
212static void lbtf_tx_work(struct work_struct *work)
213{
214 struct lbtf_private *priv = container_of(work, struct lbtf_private,
215 tx_work);
216 unsigned int len;
217 struct ieee80211_tx_info *info;
218 struct txpd *txpd;
219 struct sk_buff *skb = NULL;
220 int err;
221
222 if ((priv->vif->type == NL80211_IFTYPE_AP) &&
223 (!skb_queue_empty(&priv->bc_ps_buf)))
224 skb = skb_dequeue(&priv->bc_ps_buf);
225 else if (priv->skb_to_tx) {
226 skb = priv->skb_to_tx;
227 priv->skb_to_tx = NULL;
228 } else
229 return;
230
231 len = skb->len;
232 info = IEEE80211_SKB_CB(skb);
233 txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd));
234
235 if (priv->surpriseremoved) {
236 dev_kfree_skb_any(skb);
237 return;
238 }
239
240 memset(txpd, 0, sizeof(struct txpd));
241 /* Activate per-packet rate selection */
242 txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE |
243 ieee80211_get_tx_rate(priv->hw, info)->hw_value);
244
245 /* copy destination address from 802.11 header */
246 memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
247 ETH_ALEN);
248 txpd->tx_packet_length = cpu_to_le16(len);
249 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
250 BUG_ON(priv->tx_skb);
251 spin_lock_irq(&priv->driver_lock);
252 priv->tx_skb = skb;
253 err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
254 spin_unlock_irq(&priv->driver_lock);
255 if (err) {
256 dev_kfree_skb_any(skb);
257 priv->tx_skb = NULL;
258 }
259}
260
261static int lbtf_op_start(struct ieee80211_hw *hw)
262{
263 struct lbtf_private *priv = hw->priv;
264 void *card = priv->card;
265 int ret = -1;
266
267 if (!priv->fw_ready)
268 /* Upload firmware */
269 if (priv->hw_prog_firmware(card))
270 goto err_prog_firmware;
271
272 /* poke the firmware */
273 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
274 priv->radioon = RADIO_ON;
275 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
276 ret = lbtf_setup_firmware(priv);
277 if (ret)
278 goto err_prog_firmware;
279
280 if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
281 (priv->fwrelease > LBTF_FW_VER_MAX)) {
282 ret = -1;
283 goto err_prog_firmware;
284 }
285
286 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
287 return 0;
288
289err_prog_firmware:
290 priv->hw_reset_device(card);
291 return ret;
292}
293
294static void lbtf_op_stop(struct ieee80211_hw *hw)
295{
296 struct lbtf_private *priv = hw->priv;
297 unsigned long flags;
298 struct sk_buff *skb;
299
300 struct cmd_ctrl_node *cmdnode;
301 /* Flush pending command nodes */
302 spin_lock_irqsave(&priv->driver_lock, flags);
303 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
304 cmdnode->result = -ENOENT;
305 cmdnode->cmdwaitqwoken = 1;
306 wake_up_interruptible(&cmdnode->cmdwait_q);
307 }
308
309 spin_unlock_irqrestore(&priv->driver_lock, flags);
310 cancel_work_sync(&priv->cmd_work);
311 cancel_work_sync(&priv->tx_work);
312 while ((skb = skb_dequeue(&priv->bc_ps_buf)))
313 dev_kfree_skb_any(skb);
314 priv->radioon = RADIO_OFF;
315 lbtf_set_radio_control(priv);
316
317 return;
318}
319
320static int lbtf_op_add_interface(struct ieee80211_hw *hw,
321 struct ieee80211_if_init_conf *conf)
322{
323 struct lbtf_private *priv = hw->priv;
324 if (priv->vif != NULL)
325 return -EOPNOTSUPP;
326
327 priv->vif = conf->vif;
328 switch (conf->type) {
329 case NL80211_IFTYPE_MESH_POINT:
330 case NL80211_IFTYPE_AP:
331 lbtf_set_mode(priv, LBTF_AP_MODE);
332 break;
333 case NL80211_IFTYPE_STATION:
334 lbtf_set_mode(priv, LBTF_STA_MODE);
335 break;
336 default:
337 priv->vif = NULL;
338 return -EOPNOTSUPP;
339 }
340 lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
341 return 0;
342}
343
344static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
345 struct ieee80211_if_init_conf *conf)
346{
347 struct lbtf_private *priv = hw->priv;
348
349 if (priv->vif->type == NL80211_IFTYPE_AP ||
350 priv->vif->type == NL80211_IFTYPE_MESH_POINT)
351 lbtf_beacon_ctrl(priv, 0, 0);
352 lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
353 lbtf_set_bssid(priv, 0, NULL);
354 priv->vif = NULL;
355}
356
357static int lbtf_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
358{
359 struct lbtf_private *priv = hw->priv;
360 if (conf->channel->center_freq != priv->cur_freq) {
361 priv->cur_freq = conf->channel->center_freq;
362 lbtf_set_channel(priv, conf->channel->hw_value);
363 }
364 return 0;
365}
366
367static int lbtf_op_config_interface(struct ieee80211_hw *hw,
368 struct ieee80211_vif *vif,
369 struct ieee80211_if_conf *conf)
370{
371 struct lbtf_private *priv = hw->priv;
372 struct sk_buff *beacon;
373
374 switch (priv->vif->type) {
375 case NL80211_IFTYPE_AP:
376 case NL80211_IFTYPE_MESH_POINT:
377 beacon = ieee80211_beacon_get(hw, vif);
378 if (beacon) {
379 lbtf_beacon_set(priv, beacon);
380 kfree_skb(beacon);
381 lbtf_beacon_ctrl(priv, 1, hw->conf.beacon_int);
382 }
383 break;
384 default:
385 break;
386 }
387
388 if (conf->bssid) {
389 u8 null_bssid[ETH_ALEN] = {0};
390 bool activate = compare_ether_addr(conf->bssid, null_bssid);
391 lbtf_set_bssid(priv, activate, conf->bssid);
392 }
393
394 return 0;
395}
396
397#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
398static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
399 unsigned int changed_flags,
400 unsigned int *new_flags,
401 int mc_count, struct dev_mc_list *mclist)
402{
403 struct lbtf_private *priv = hw->priv;
404 int old_mac_control = priv->mac_control;
405 int i;
406 changed_flags &= SUPPORTED_FIF_FLAGS;
407 *new_flags &= SUPPORTED_FIF_FLAGS;
408
409 if (!changed_flags)
410 return;
411
412 if (*new_flags & (FIF_PROMISC_IN_BSS))
413 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
414 else
415 priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
416 if (*new_flags & (FIF_ALLMULTI) ||
417 mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
418 priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
419 priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
420 } else if (mc_count) {
421 priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
422 priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
423 priv->nr_of_multicastmacaddr = mc_count;
424 for (i = 0; i < mc_count; i++) {
425 if (!mclist)
426 break;
427 memcpy(&priv->multicastlist[i], mclist->da_addr,
428 ETH_ALEN);
429 mclist = mclist->next;
430 }
431 lbtf_cmd_set_mac_multicast_addr(priv);
432 } else {
433 priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE |
434 CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
435 if (priv->nr_of_multicastmacaddr) {
436 priv->nr_of_multicastmacaddr = 0;
437 lbtf_cmd_set_mac_multicast_addr(priv);
438 }
439 }
440
441
442 if (priv->mac_control != old_mac_control)
443 lbtf_set_mac_control(priv);
444}
445
446static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
447 struct ieee80211_vif *vif,
448 struct ieee80211_bss_conf *bss_conf,
449 u32 changes)
450{
451 struct lbtf_private *priv = hw->priv;
452
453 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
454 if (bss_conf->use_short_preamble)
455 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
456 else
457 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
458 lbtf_set_radio_control(priv);
459 }
460
461 return;
462}
463
464static const struct ieee80211_ops lbtf_ops = {
465 .tx = lbtf_op_tx,
466 .start = lbtf_op_start,
467 .stop = lbtf_op_stop,
468 .add_interface = lbtf_op_add_interface,
469 .remove_interface = lbtf_op_remove_interface,
470 .config = lbtf_op_config,
471 .config_interface = lbtf_op_config_interface,
472 .configure_filter = lbtf_op_configure_filter,
473 .bss_info_changed = lbtf_op_bss_info_changed,
474};
475
476int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
477{
478 struct ieee80211_rx_status stats;
479 struct rxpd *prxpd;
480 int need_padding;
481 unsigned int flags;
482 struct ieee80211_hdr *hdr;
483
484 prxpd = (struct rxpd *) skb->data;
485
486 stats.flag = 0;
487 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
488 stats.flag |= RX_FLAG_FAILED_FCS_CRC;
489 stats.freq = priv->cur_freq;
490 stats.band = IEEE80211_BAND_2GHZ;
491 stats.signal = prxpd->snr;
492 stats.noise = prxpd->nf;
493 stats.qual = prxpd->snr - prxpd->nf;
494 /* Marvell rate index has a hole at value 4 */
495 if (prxpd->rx_rate > 4)
496 --prxpd->rx_rate;
497 stats.rate_idx = prxpd->rx_rate;
498 skb_pull(skb, sizeof(struct rxpd));
499
500 hdr = (struct ieee80211_hdr *)skb->data;
501 flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
502
503 need_padding = ieee80211_is_data_qos(hdr->frame_control);
504 need_padding ^= ieee80211_has_a4(hdr->frame_control);
505 need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
506 (*ieee80211_get_qos_ctl(hdr) &
507 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
508
509 if (need_padding) {
510 memmove(skb->data + 2, skb->data, skb->len);
511 skb_reserve(skb, 2);
512 }
513
514 ieee80211_rx_irqsafe(priv->hw, skb, &stats);
515 return 0;
516}
517EXPORT_SYMBOL_GPL(lbtf_rx);
518
519/**
520 * lbtf_add_card: Add and initialize the card, no fw upload yet.
521 *
522 * @card A pointer to card
523 *
524 * Returns: pointer to struct lbtf_priv.
525 */
526struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
527{
528 struct ieee80211_hw *hw;
529 struct lbtf_private *priv = NULL;
530
531 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
532 if (!hw)
533 goto done;
534
535 priv = hw->priv;
536 if (lbtf_init_adapter(priv))
537 goto err_init_adapter;
538
539 priv->hw = hw;
540 priv->card = card;
541 priv->tx_skb = NULL;
542
543 hw->queues = 1;
544 hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
545 hw->extra_tx_headroom = sizeof(struct txpd);
546 memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
547 memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
548 priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates);
549 priv->band.bitrates = priv->rates;
550 priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
551 priv->band.channels = priv->channels;
552 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
553 skb_queue_head_init(&priv->bc_ps_buf);
554
555 SET_IEEE80211_DEV(hw, dmdev);
556
557 INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
558 INIT_WORK(&priv->tx_work, lbtf_tx_work);
559 if (ieee80211_register_hw(hw))
560 goto err_init_adapter;
561
562 goto done;
563
564err_init_adapter:
565 lbtf_free_adapter(priv);
566 ieee80211_free_hw(hw);
567 priv = NULL;
568
569done:
570 return priv;
571}
572EXPORT_SYMBOL_GPL(lbtf_add_card);
573
574
575int lbtf_remove_card(struct lbtf_private *priv)
576{
577 struct ieee80211_hw *hw = priv->hw;
578
579 priv->surpriseremoved = 1;
580 del_timer(&priv->command_timer);
581 lbtf_free_adapter(priv);
582 priv->hw = NULL;
583 ieee80211_unregister_hw(hw);
584 ieee80211_free_hw(hw);
585
586 return 0;
587}
588EXPORT_SYMBOL_GPL(lbtf_remove_card);
589
590void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
591{
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
593 memset(&info->status, 0, sizeof(info->status));
594 /*
595 * Commented out, otherwise we never go beyond 1Mbit/s using mac80211
596 * default pid rc algorithm.
597 *
598 * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
599 */
600 info->status.excessive_retries = fail ? 1 : 0;
601 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
602 info->flags |= IEEE80211_TX_STAT_ACK;
603 skb_pull(priv->tx_skb, sizeof(struct txpd));
604 ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
605 priv->tx_skb = NULL;
606 if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf))
607 ieee80211_wake_queues(priv->hw);
608 else
609 queue_work(lbtf_wq, &priv->tx_work);
610}
611EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback);
612
613void lbtf_bcn_sent(struct lbtf_private *priv)
614{
615 struct sk_buff *skb = NULL;
616
617 if (priv->vif->type != NL80211_IFTYPE_AP)
618 return;
619
620 if (skb_queue_empty(&priv->bc_ps_buf)) {
621 bool tx_buff_bc = 0;
622
623 while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
624 skb_queue_tail(&priv->bc_ps_buf, skb);
625 tx_buff_bc = 1;
626 }
627 if (tx_buff_bc) {
628 ieee80211_stop_queues(priv->hw);
629 queue_work(lbtf_wq, &priv->tx_work);
630 }
631 }
632
633 skb = ieee80211_beacon_get(priv->hw, priv->vif);
634
635 if (skb) {
636 lbtf_beacon_set(priv, skb);
637 kfree_skb(skb);
638 }
639}
640EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
641
642static int __init lbtf_init_module(void)
643{
644 lbtf_wq = create_workqueue("libertastf");
645 if (lbtf_wq == NULL) {
646 printk(KERN_ERR "libertastf: couldn't create workqueue\n");
647 return -ENOMEM;
648 }
649 return 0;
650}
651
652static void __exit lbtf_exit_module(void)
653{
654 destroy_workqueue(lbtf_wq);
655}
656
657module_init(lbtf_init_module);
658module_exit(lbtf_exit_module);
659
660MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library");
661MODULE_AUTHOR("Cozybit Inc.");
662MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 248d31a7aa33..c9e4a435b2fc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -14,6 +14,8 @@
14 * - RX filtering based on filter configuration (data->rx_filter) 14 * - RX filtering based on filter configuration (data->rx_filter)
15 */ 15 */
16 16
17#include <linux/list.h>
18#include <linux/spinlock.h>
17#include <net/mac80211.h> 19#include <net/mac80211.h>
18#include <net/ieee80211_radiotap.h> 20#include <net/ieee80211_radiotap.h>
19#include <linux/if_arp.h> 21#include <linux/if_arp.h>
@@ -28,11 +30,56 @@ static int radios = 2;
28module_param(radios, int, 0444); 30module_param(radios, int, 0444);
29MODULE_PARM_DESC(radios, "Number of simulated radios"); 31MODULE_PARM_DESC(radios, "Number of simulated radios");
30 32
33struct hwsim_vif_priv {
34 u32 magic;
35};
36
37#define HWSIM_VIF_MAGIC 0x69537748
38
39static inline void hwsim_check_magic(struct ieee80211_vif *vif)
40{
41 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
42 WARN_ON(vp->magic != HWSIM_VIF_MAGIC);
43}
44
45static inline void hwsim_set_magic(struct ieee80211_vif *vif)
46{
47 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
48 vp->magic = HWSIM_VIF_MAGIC;
49}
50
51static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
52{
53 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
54 vp->magic = 0;
55}
56
57struct hwsim_sta_priv {
58 u32 magic;
59};
60
61#define HWSIM_STA_MAGIC 0x6d537748
62
63static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
64{
65 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
66 WARN_ON(sp->magic != HWSIM_VIF_MAGIC);
67}
68
69static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta)
70{
71 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
72 sp->magic = HWSIM_VIF_MAGIC;
73}
74
75static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
76{
77 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
78 sp->magic = 0;
79}
31 80
32static struct class *hwsim_class; 81static struct class *hwsim_class;
33 82
34static struct ieee80211_hw **hwsim_radios;
35static int hwsim_radio_count;
36static struct net_device *hwsim_mon; /* global monitor netdev */ 83static struct net_device *hwsim_mon; /* global monitor netdev */
37 84
38 85
@@ -68,7 +115,12 @@ static const struct ieee80211_rate hwsim_rates[] = {
68 { .bitrate = 540 } 115 { .bitrate = 540 }
69}; 116};
70 117
118static spinlock_t hwsim_radio_lock;
119static struct list_head hwsim_radios;
120
71struct mac80211_hwsim_data { 121struct mac80211_hwsim_data {
122 struct list_head list;
123 struct ieee80211_hw *hw;
72 struct device *dev; 124 struct device *dev;
73 struct ieee80211_supported_band band; 125 struct ieee80211_supported_band band;
74 struct ieee80211_channel channels[ARRAY_SIZE(hwsim_channels)]; 126 struct ieee80211_channel channels[ARRAY_SIZE(hwsim_channels)];
@@ -144,11 +196,11 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
144} 196}
145 197
146 198
147static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 199static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
148 struct sk_buff *skb) 200 struct sk_buff *skb)
149{ 201{
150 struct mac80211_hwsim_data *data = hw->priv; 202 struct mac80211_hwsim_data *data = hw->priv, *data2;
151 int i, ack = 0; 203 bool ack = false;
152 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 204 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
153 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 205 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
154 struct ieee80211_rx_status rx_status; 206 struct ieee80211_rx_status rx_status;
@@ -161,13 +213,13 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
161 /* TODO: simulate signal strength (and optional packet drop) */ 213 /* TODO: simulate signal strength (and optional packet drop) */
162 214
163 /* Copy skb to all enabled radios that are on the current frequency */ 215 /* Copy skb to all enabled radios that are on the current frequency */
164 for (i = 0; i < hwsim_radio_count; i++) { 216 spin_lock(&hwsim_radio_lock);
165 struct mac80211_hwsim_data *data2; 217 list_for_each_entry(data2, &hwsim_radios, list) {
166 struct sk_buff *nskb; 218 struct sk_buff *nskb;
167 219
168 if (hwsim_radios[i] == NULL || hwsim_radios[i] == hw) 220 if (data == data2)
169 continue; 221 continue;
170 data2 = hwsim_radios[i]->priv; 222
171 if (!data2->started || !data2->radio_enabled || 223 if (!data2->started || !data2->radio_enabled ||
172 data->channel->center_freq != data2->channel->center_freq) 224 data->channel->center_freq != data2->channel->center_freq)
173 continue; 225 continue;
@@ -176,11 +228,12 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
176 if (nskb == NULL) 228 if (nskb == NULL)
177 continue; 229 continue;
178 230
179 if (memcmp(hdr->addr1, hwsim_radios[i]->wiphy->perm_addr, 231 if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
180 ETH_ALEN) == 0) 232 ETH_ALEN) == 0)
181 ack = 1; 233 ack = true;
182 ieee80211_rx_irqsafe(hwsim_radios[i], nskb, &rx_status); 234 ieee80211_rx_irqsafe(data2->hw, nskb, &rx_status);
183 } 235 }
236 spin_unlock(&hwsim_radio_lock);
184 237
185 return ack; 238 return ack;
186} 239}
@@ -189,7 +242,7 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
189static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 242static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
190{ 243{
191 struct mac80211_hwsim_data *data = hw->priv; 244 struct mac80211_hwsim_data *data = hw->priv;
192 int ack; 245 bool ack;
193 struct ieee80211_tx_info *txi; 246 struct ieee80211_tx_info *txi;
194 247
195 mac80211_hwsim_monitor_rx(hw, skb); 248 mac80211_hwsim_monitor_rx(hw, skb);
@@ -210,6 +263,12 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
210 ack = mac80211_hwsim_tx_frame(hw, skb); 263 ack = mac80211_hwsim_tx_frame(hw, skb);
211 264
212 txi = IEEE80211_SKB_CB(skb); 265 txi = IEEE80211_SKB_CB(skb);
266
267 if (txi->control.vif)
268 hwsim_check_magic(txi->control.vif);
269 if (txi->control.sta)
270 hwsim_check_sta_magic(txi->control.sta);
271
213 memset(&txi->status, 0, sizeof(txi->status)); 272 memset(&txi->status, 0, sizeof(txi->status));
214 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) { 273 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) {
215 if (ack) 274 if (ack)
@@ -246,6 +305,7 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
246 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n", 305 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
247 wiphy_name(hw->wiphy), __func__, conf->type, 306 wiphy_name(hw->wiphy), __func__, conf->type,
248 print_mac(mac, conf->mac_addr)); 307 print_mac(mac, conf->mac_addr));
308 hwsim_set_magic(conf->vif);
249 return 0; 309 return 0;
250} 310}
251 311
@@ -257,6 +317,8 @@ static void mac80211_hwsim_remove_interface(
257 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n", 317 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
258 wiphy_name(hw->wiphy), __func__, conf->type, 318 wiphy_name(hw->wiphy), __func__, conf->type,
259 print_mac(mac, conf->mac_addr)); 319 print_mac(mac, conf->mac_addr));
320 hwsim_check_magic(conf->vif);
321 hwsim_clear_magic(conf->vif);
260} 322}
261 323
262 324
@@ -267,7 +329,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
267 struct sk_buff *skb; 329 struct sk_buff *skb;
268 struct ieee80211_tx_info *info; 330 struct ieee80211_tx_info *info;
269 331
270 if (vif->type != IEEE80211_IF_TYPE_AP) 332 hwsim_check_magic(vif);
333
334 if (vif->type != NL80211_IFTYPE_AP)
271 return; 335 return;
272 336
273 skb = ieee80211_beacon_get(hw, vif); 337 skb = ieee80211_beacon_get(hw, vif);
@@ -341,7 +405,45 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
341 *total_flags = data->rx_filter; 405 *total_flags = data->rx_filter;
342} 406}
343 407
408static int mac80211_hwsim_config_interface(struct ieee80211_hw *hw,
409 struct ieee80211_vif *vif,
410 struct ieee80211_if_conf *conf)
411{
412 hwsim_check_magic(vif);
413 return 0;
414}
415
416static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
417 struct ieee80211_vif *vif,
418 struct ieee80211_bss_conf *info,
419 u32 changed)
420{
421 hwsim_check_magic(vif);
422}
423
424static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
425 struct ieee80211_vif *vif,
426 enum sta_notify_cmd cmd,
427 struct ieee80211_sta *sta)
428{
429 hwsim_check_magic(vif);
430 switch (cmd) {
431 case STA_NOTIFY_ADD:
432 hwsim_set_sta_magic(sta);
433 break;
434 case STA_NOTIFY_REMOVE:
435 hwsim_clear_sta_magic(sta);
436 break;
437 }
438}
344 439
440static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
441 struct ieee80211_sta *sta,
442 bool set)
443{
444 hwsim_check_sta_magic(sta);
445 return 0;
446}
345 447
346static const struct ieee80211_ops mac80211_hwsim_ops = 448static const struct ieee80211_ops mac80211_hwsim_ops =
347{ 449{
@@ -352,23 +454,30 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
352 .remove_interface = mac80211_hwsim_remove_interface, 454 .remove_interface = mac80211_hwsim_remove_interface,
353 .config = mac80211_hwsim_config, 455 .config = mac80211_hwsim_config,
354 .configure_filter = mac80211_hwsim_configure_filter, 456 .configure_filter = mac80211_hwsim_configure_filter,
457 .config_interface = mac80211_hwsim_config_interface,
458 .bss_info_changed = mac80211_hwsim_bss_info_changed,
459 .sta_notify = mac80211_hwsim_sta_notify,
460 .set_tim = mac80211_hwsim_set_tim,
355}; 461};
356 462
357 463
358static void mac80211_hwsim_free(void) 464static void mac80211_hwsim_free(void)
359{ 465{
360 int i; 466 struct list_head tmplist, *i, *tmp;
361 467 struct mac80211_hwsim_data *data;
362 for (i = 0; i < hwsim_radio_count; i++) { 468
363 if (hwsim_radios[i]) { 469 INIT_LIST_HEAD(&tmplist);
364 struct mac80211_hwsim_data *data; 470
365 data = hwsim_radios[i]->priv; 471 spin_lock_bh(&hwsim_radio_lock);
366 ieee80211_unregister_hw(hwsim_radios[i]); 472 list_for_each_safe(i, tmp, &hwsim_radios)
367 device_unregister(data->dev); 473 list_move(i, &tmplist);
368 ieee80211_free_hw(hwsim_radios[i]); 474 spin_unlock_bh(&hwsim_radio_lock);
369 } 475
476 list_for_each_entry(data, &tmplist, list) {
477 ieee80211_unregister_hw(data->hw);
478 device_unregister(data->dev);
479 ieee80211_free_hw(data->hw);
370 } 480 }
371 kfree(hwsim_radios);
372 class_destroy(hwsim_class); 481 class_destroy(hwsim_class);
373} 482}
374 483
@@ -398,37 +507,32 @@ static int __init init_mac80211_hwsim(void)
398 struct ieee80211_hw *hw; 507 struct ieee80211_hw *hw;
399 DECLARE_MAC_BUF(mac); 508 DECLARE_MAC_BUF(mac);
400 509
401 if (radios < 1 || radios > 65535) 510 if (radios < 1 || radios > 100)
402 return -EINVAL; 511 return -EINVAL;
403 512
404 hwsim_radio_count = radios; 513 spin_lock_init(&hwsim_radio_lock);
405 hwsim_radios = kcalloc(hwsim_radio_count, 514 INIT_LIST_HEAD(&hwsim_radios);
406 sizeof(struct ieee80211_hw *), GFP_KERNEL);
407 if (hwsim_radios == NULL)
408 return -ENOMEM;
409 515
410 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); 516 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
411 if (IS_ERR(hwsim_class)) { 517 if (IS_ERR(hwsim_class))
412 kfree(hwsim_radios);
413 return PTR_ERR(hwsim_class); 518 return PTR_ERR(hwsim_class);
414 }
415 519
416 memset(addr, 0, ETH_ALEN); 520 memset(addr, 0, ETH_ALEN);
417 addr[0] = 0x02; 521 addr[0] = 0x02;
418 522
419 for (i = 0; i < hwsim_radio_count; i++) { 523 for (i = 0; i < radios; i++) {
420 printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n", 524 printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
421 i); 525 i);
422 hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops); 526 hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
423 if (hw == NULL) { 527 if (!hw) {
424 printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw " 528 printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
425 "failed\n"); 529 "failed\n");
426 err = -ENOMEM; 530 err = -ENOMEM;
427 goto failed; 531 goto failed;
428 } 532 }
429 hwsim_radios[i] = hw;
430
431 data = hw->priv; 533 data = hw->priv;
534 data->hw = hw;
535
432 data->dev = device_create_drvdata(hwsim_class, NULL, 0, hw, 536 data->dev = device_create_drvdata(hwsim_class, NULL, 0, hw,
433 "hwsim%d", i); 537 "hwsim%d", i);
434 if (IS_ERR(data->dev)) { 538 if (IS_ERR(data->dev)) {
@@ -446,7 +550,15 @@ static int __init init_mac80211_hwsim(void)
446 SET_IEEE80211_PERM_ADDR(hw, addr); 550 SET_IEEE80211_PERM_ADDR(hw, addr);
447 551
448 hw->channel_change_time = 1; 552 hw->channel_change_time = 1;
449 hw->queues = 1; 553 hw->queues = 4;
554 hw->wiphy->interface_modes =
555 BIT(NL80211_IFTYPE_STATION) |
556 BIT(NL80211_IFTYPE_AP);
557 hw->ampdu_queues = 1;
558
559 /* ask mac80211 to reserve space for magic */
560 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
561 hw->sta_data_size = sizeof(struct hwsim_sta_priv);
450 562
451 memcpy(data->channels, hwsim_channels, sizeof(hwsim_channels)); 563 memcpy(data->channels, hwsim_channels, sizeof(hwsim_channels));
452 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); 564 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
@@ -454,6 +566,19 @@ static int __init init_mac80211_hwsim(void)
454 data->band.n_channels = ARRAY_SIZE(hwsim_channels); 566 data->band.n_channels = ARRAY_SIZE(hwsim_channels);
455 data->band.bitrates = data->rates; 567 data->band.bitrates = data->rates;
456 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates); 568 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates);
569 data->band.ht_info.ht_supported = 1;
570 data->band.ht_info.cap = IEEE80211_HT_CAP_SUP_WIDTH |
571 IEEE80211_HT_CAP_GRN_FLD |
572 IEEE80211_HT_CAP_SGI_40 |
573 IEEE80211_HT_CAP_DSSSCCK40;
574 data->band.ht_info.ampdu_factor = 0x3;
575 data->band.ht_info.ampdu_density = 0x6;
576 memset(data->band.ht_info.supp_mcs_set, 0,
577 sizeof(data->band.ht_info.supp_mcs_set));
578 data->band.ht_info.supp_mcs_set[0] = 0xff;
579 data->band.ht_info.supp_mcs_set[1] = 0xff;
580 data->band.ht_info.supp_mcs_set[12] =
581 IEEE80211_HT_CAP_MCS_TX_DEFINED;
457 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band; 582 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band;
458 583
459 err = ieee80211_register_hw(hw); 584 err = ieee80211_register_hw(hw);
@@ -469,6 +594,8 @@ static int __init init_mac80211_hwsim(void)
469 594
470 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon, 595 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
471 (unsigned long) hw); 596 (unsigned long) hw);
597
598 list_add_tail(&data->list, &hwsim_radios);
472 } 599 }
473 600
474 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup); 601 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
@@ -500,7 +627,6 @@ failed_hw:
500 device_unregister(data->dev); 627 device_unregister(data->dev);
501failed_drvdata: 628failed_drvdata:
502 ieee80211_free_hw(hw); 629 ieee80211_free_hw(hw);
503 hwsim_radios[i] = NULL;
504failed: 630failed:
505 mac80211_hwsim_free(); 631 mac80211_hwsim_free();
506 return err; 632 return err;
@@ -509,8 +635,7 @@ failed:
509 635
510static void __exit exit_mac80211_hwsim(void) 636static void __exit exit_mac80211_hwsim(void)
511{ 637{
512 printk(KERN_DEBUG "mac80211_hwsim: unregister %d radios\n", 638 printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
513 hwsim_radio_count);
514 639
515 unregister_netdev(hwsim_mon); 640 unregister_netdev(hwsim_mon);
516 mac80211_hwsim_free(); 641 mac80211_hwsim_free();
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 1ebcafe7ca5f..9a2fcc0163d6 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -79,15 +79,21 @@
79#include <linux/module.h> 79#include <linux/module.h>
80#include <linux/kernel.h> 80#include <linux/kernel.h>
81#include <linux/init.h> 81#include <linux/init.h>
82#include <linux/delay.h>
82#include <linux/netdevice.h> 83#include <linux/netdevice.h>
83#include <linux/etherdevice.h> 84#include <linux/etherdevice.h>
84#include <linux/ethtool.h> 85#include <linux/ethtool.h>
86#include <linux/firmware.h>
85#include <linux/if_arp.h> 87#include <linux/if_arp.h>
86#include <linux/wireless.h> 88#include <linux/wireless.h>
87#include <net/iw_handler.h> 89#include <net/iw_handler.h>
88#include <net/ieee80211.h> 90#include <net/ieee80211.h>
89 91
92#include <linux/scatterlist.h>
93#include <linux/crypto.h>
94
90#include "hermes_rid.h" 95#include "hermes_rid.h"
96#include "hermes_dld.h"
91#include "orinoco.h" 97#include "orinoco.h"
92 98
93/********************************************************************/ 99/********************************************************************/
@@ -241,6 +247,74 @@ static int __orinoco_program_rids(struct net_device *dev);
241static void __orinoco_set_multicast_list(struct net_device *dev); 247static void __orinoco_set_multicast_list(struct net_device *dev);
242 248
243/********************************************************************/ 249/********************************************************************/
250/* Michael MIC crypto setup */
251/********************************************************************/
252#define MICHAEL_MIC_LEN 8
253static int orinoco_mic_init(struct orinoco_private *priv)
254{
255 priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
256 if (IS_ERR(priv->tx_tfm_mic)) {
257 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
258 "crypto API michael_mic\n");
259 priv->tx_tfm_mic = NULL;
260 return -ENOMEM;
261 }
262
263 priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
264 if (IS_ERR(priv->rx_tfm_mic)) {
265 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
266 "crypto API michael_mic\n");
267 priv->rx_tfm_mic = NULL;
268 return -ENOMEM;
269 }
270
271 return 0;
272}
273
274static void orinoco_mic_free(struct orinoco_private *priv)
275{
276 if (priv->tx_tfm_mic)
277 crypto_free_hash(priv->tx_tfm_mic);
278 if (priv->rx_tfm_mic)
279 crypto_free_hash(priv->rx_tfm_mic);
280}
281
282static int michael_mic(struct crypto_hash *tfm_michael, u8 *key,
283 u8 *da, u8 *sa, u8 priority,
284 u8 *data, size_t data_len, u8 *mic)
285{
286 struct hash_desc desc;
287 struct scatterlist sg[2];
288 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
289
290 if (tfm_michael == NULL) {
291 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
292 return -1;
293 }
294
295 /* Copy header into buffer. We need the padding on the end zeroed */
296 memcpy(&hdr[0], da, ETH_ALEN);
297 memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
298 hdr[ETH_ALEN*2] = priority;
299 hdr[ETH_ALEN*2+1] = 0;
300 hdr[ETH_ALEN*2+2] = 0;
301 hdr[ETH_ALEN*2+3] = 0;
302
303 /* Use scatter gather to MIC header and data in one go */
304 sg_init_table(sg, 2);
305 sg_set_buf(&sg[0], hdr, sizeof(hdr));
306 sg_set_buf(&sg[1], data, data_len);
307
308 if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
309 return -1;
310
311 desc.tfm = tfm_michael;
312 desc.flags = 0;
313 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
314 mic);
315}
316
317/********************************************************************/
244/* Internal helper functions */ 318/* Internal helper functions */
245/********************************************************************/ 319/********************************************************************/
246 320
@@ -273,12 +347,19 @@ static inline void set_port_type(struct orinoco_private *priv)
273#define ORINOCO_MAX_BSS_COUNT 64 347#define ORINOCO_MAX_BSS_COUNT 64
274static int orinoco_bss_data_allocate(struct orinoco_private *priv) 348static int orinoco_bss_data_allocate(struct orinoco_private *priv)
275{ 349{
276 if (priv->bss_data) 350 if (priv->bss_xbss_data)
277 return 0; 351 return 0;
278 352
279 priv->bss_data = 353 if (priv->has_ext_scan)
280 kzalloc(ORINOCO_MAX_BSS_COUNT * sizeof(bss_element), GFP_KERNEL); 354 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
281 if (!priv->bss_data) { 355 sizeof(struct xbss_element),
356 GFP_KERNEL);
357 else
358 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
359 sizeof(struct bss_element),
360 GFP_KERNEL);
361
362 if (!priv->bss_xbss_data) {
282 printk(KERN_WARNING "Out of memory allocating beacons"); 363 printk(KERN_WARNING "Out of memory allocating beacons");
283 return -ENOMEM; 364 return -ENOMEM;
284 } 365 }
@@ -287,18 +368,319 @@ static int orinoco_bss_data_allocate(struct orinoco_private *priv)
287 368
288static void orinoco_bss_data_free(struct orinoco_private *priv) 369static void orinoco_bss_data_free(struct orinoco_private *priv)
289{ 370{
290 kfree(priv->bss_data); 371 kfree(priv->bss_xbss_data);
291 priv->bss_data = NULL; 372 priv->bss_xbss_data = NULL;
292} 373}
293 374
375#define PRIV_BSS ((struct bss_element *)priv->bss_xbss_data)
376#define PRIV_XBSS ((struct xbss_element *)priv->bss_xbss_data)
294static void orinoco_bss_data_init(struct orinoco_private *priv) 377static void orinoco_bss_data_init(struct orinoco_private *priv)
295{ 378{
296 int i; 379 int i;
297 380
298 INIT_LIST_HEAD(&priv->bss_free_list); 381 INIT_LIST_HEAD(&priv->bss_free_list);
299 INIT_LIST_HEAD(&priv->bss_list); 382 INIT_LIST_HEAD(&priv->bss_list);
300 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++) 383 if (priv->has_ext_scan)
301 list_add_tail(&priv->bss_data[i].list, &priv->bss_free_list); 384 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
385 list_add_tail(&(PRIV_XBSS[i].list),
386 &priv->bss_free_list);
387 else
388 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
389 list_add_tail(&(PRIV_BSS[i].list),
390 &priv->bss_free_list);
391
392}
393
394static inline u8 *orinoco_get_ie(u8 *data, size_t len,
395 enum ieee80211_mfie eid)
396{
397 u8 *p = data;
398 while ((p + 2) < (data + len)) {
399 if (p[0] == eid)
400 return p;
401 p += p[1] + 2;
402 }
403 return NULL;
404}
405
406#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
407#define WPA_SELECTOR_LEN 4
408static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
409{
410 u8 *p = data;
411 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
412 if ((p[0] == MFIE_TYPE_GENERIC) &&
413 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
414 return p;
415 p += p[1] + 2;
416 }
417 return NULL;
418}
419
420
421/********************************************************************/
422/* Download functionality */
423/********************************************************************/
424
425struct fw_info {
426 char *pri_fw;
427 char *sta_fw;
428 char *ap_fw;
429 u32 pda_addr;
430 u16 pda_size;
431};
432
433const static struct fw_info orinoco_fw[] = {
434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 }
437};
438
439/* Structure used to access fields in FW
440 * Make sure LE decoding macros are used
441 */
442struct orinoco_fw_header {
443 char hdr_vers[6]; /* ASCII string for header version */
444 __le16 headersize; /* Total length of header */
445 __le32 entry_point; /* NIC entry point */
446 __le32 blocks; /* Number of blocks to program */
447 __le32 block_offset; /* Offset of block data from eof header */
448 __le32 pdr_offset; /* Offset to PDR data from eof header */
449 __le32 pri_offset; /* Offset to primary plug data */
450 __le32 compat_offset; /* Offset to compatibility data*/
451 char signature[0]; /* FW signature length headersize-20 */
452} __attribute__ ((packed));
453
454/* Download either STA or AP firmware into the card. */
455static int
456orinoco_dl_firmware(struct orinoco_private *priv,
457 const struct fw_info *fw,
458 int ap)
459{
460 /* Plug Data Area (PDA) */
461 __le16 pda[512] = { 0 };
462
463 hermes_t *hw = &priv->hw;
464 const struct firmware *fw_entry;
465 const struct orinoco_fw_header *hdr;
466 const unsigned char *first_block;
467 const unsigned char *end;
468 const char *firmware;
469 struct net_device *dev = priv->ndev;
470 int err;
471
472 if (ap)
473 firmware = fw->ap_fw;
474 else
475 firmware = fw->sta_fw;
476
477 printk(KERN_DEBUG "%s: Attempting to download firmware %s\n",
478 dev->name, firmware);
479
480 /* Read current plug data */
481 err = hermes_read_pda(hw, pda, fw->pda_addr,
482 min_t(u16, fw->pda_size, sizeof(pda)), 0);
483 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
484 if (err)
485 return err;
486
487 err = request_firmware(&fw_entry, firmware, priv->dev);
488 if (err) {
489 printk(KERN_ERR "%s: Cannot find firmware %s\n",
490 dev->name, firmware);
491 return -ENOENT;
492 }
493
494 hdr = (const struct orinoco_fw_header *) fw_entry->data;
495
496 /* Enable aux port to allow programming */
497 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
498 printk(KERN_DEBUG "%s: Program init returned %d\n", dev->name, err);
499 if (err != 0)
500 goto abort;
501
502 /* Program data */
503 first_block = (fw_entry->data +
504 le16_to_cpu(hdr->headersize) +
505 le32_to_cpu(hdr->block_offset));
506 end = fw_entry->data + fw_entry->size;
507
508 err = hermes_program(hw, first_block, end);
509 printk(KERN_DEBUG "%s: Program returned %d\n", dev->name, err);
510 if (err != 0)
511 goto abort;
512
513 /* Update production data */
514 first_block = (fw_entry->data +
515 le16_to_cpu(hdr->headersize) +
516 le32_to_cpu(hdr->pdr_offset));
517
518 err = hermes_apply_pda_with_defaults(hw, first_block, pda);
519 printk(KERN_DEBUG "%s: Apply PDA returned %d\n", dev->name, err);
520 if (err)
521 goto abort;
522
523 /* Tell card we've finished */
524 err = hermesi_program_end(hw);
525 printk(KERN_DEBUG "%s: Program end returned %d\n", dev->name, err);
526 if (err != 0)
527 goto abort;
528
529 /* Check if we're running */
530 printk(KERN_DEBUG "%s: hermes_present returned %d\n",
531 dev->name, hermes_present(hw));
532
533abort:
534 release_firmware(fw_entry);
535 return err;
536}
537
538/* End markers */
539#define TEXT_END 0x1A /* End of text header */
540
541/*
542 * Process a firmware image - stop the card, load the firmware, reset
543 * the card and make sure it responds. For the secondary firmware take
544 * care of the PDA - read it and then write it on top of the firmware.
545 */
546static int
547symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
548 const unsigned char *image, const unsigned char *end,
549 int secondary)
550{
551 hermes_t *hw = &priv->hw;
552 int ret;
553 const unsigned char *ptr;
554 const unsigned char *first_block;
555
556 /* Plug Data Area (PDA) */
557 __le16 pda[256];
558
559 /* Binary block begins after the 0x1A marker */
560 ptr = image;
561 while (*ptr++ != TEXT_END);
562 first_block = ptr;
563
564 /* Read the PDA from EEPROM */
565 if (secondary) {
566 ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1);
567 if (ret)
568 return ret;
569 }
570
571 /* Stop the firmware, so that it can be safely rewritten */
572 if (priv->stop_fw) {
573 ret = priv->stop_fw(priv, 1);
574 if (ret)
575 return ret;
576 }
577
578 /* Program the adapter with new firmware */
579 ret = hermes_program(hw, first_block, end);
580 if (ret)
581 return ret;
582
583 /* Write the PDA to the adapter */
584 if (secondary) {
585 size_t len = hermes_blocks_length(first_block);
586 ptr = first_block + len;
587 ret = hermes_apply_pda(hw, ptr, pda);
588 if (ret)
589 return ret;
590 }
591
592 /* Run the firmware */
593 if (priv->stop_fw) {
594 ret = priv->stop_fw(priv, 0);
595 if (ret)
596 return ret;
597 }
598
599 /* Reset hermes chip and make sure it responds */
600 ret = hermes_init(hw);
601
602 /* hermes_reset() should return 0 with the secondary firmware */
603 if (secondary && ret != 0)
604 return -ENODEV;
605
606 /* And this should work with any firmware */
607 if (!hermes_present(hw))
608 return -ENODEV;
609
610 return 0;
611}
612
613
614/*
615 * Download the firmware into the card, this also does a PCMCIA soft
616 * reset on the card, to make sure it's in a sane state.
617 */
618static int
619symbol_dl_firmware(struct orinoco_private *priv,
620 const struct fw_info *fw)
621{
622 struct net_device *dev = priv->ndev;
623 int ret;
624 const struct firmware *fw_entry;
625
626 if (request_firmware(&fw_entry, fw->pri_fw,
627 priv->dev) != 0) {
628 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
629 dev->name, fw->pri_fw);
630 return -ENOENT;
631 }
632
633 /* Load primary firmware */
634 ret = symbol_dl_image(priv, fw, fw_entry->data,
635 fw_entry->data + fw_entry->size, 0);
636 release_firmware(fw_entry);
637 if (ret) {
638 printk(KERN_ERR "%s: Primary firmware download failed\n",
639 dev->name);
640 return ret;
641 }
642
643 if (request_firmware(&fw_entry, fw->sta_fw,
644 priv->dev) != 0) {
645 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
646 dev->name, fw->sta_fw);
647 return -ENOENT;
648 }
649
650 /* Load secondary firmware */
651 ret = symbol_dl_image(priv, fw, fw_entry->data,
652 fw_entry->data + fw_entry->size, 1);
653 release_firmware(fw_entry);
654 if (ret) {
655 printk(KERN_ERR "%s: Secondary firmware download failed\n",
656 dev->name);
657 }
658
659 return ret;
660}
661
662static int orinoco_download(struct orinoco_private *priv)
663{
664 int err = 0;
665 /* Reload firmware */
666 switch (priv->firmware_type) {
667 case FIRMWARE_TYPE_AGERE:
668 /* case FIRMWARE_TYPE_INTERSIL: */
669 err = orinoco_dl_firmware(priv,
670 &orinoco_fw[priv->firmware_type], 0);
671 break;
672
673 case FIRMWARE_TYPE_SYMBOL:
674 err = symbol_dl_firmware(priv,
675 &orinoco_fw[priv->firmware_type]);
676 break;
677 case FIRMWARE_TYPE_INTERSIL:
678 break;
679 }
680 /* TODO: if we fail we probably need to reinitialise
681 * the driver */
682
683 return err;
302} 684}
303 685
304/********************************************************************/ 686/********************************************************************/
@@ -453,8 +835,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
453 int err = 0; 835 int err = 0;
454 u16 txfid = priv->txfid; 836 u16 txfid = priv->txfid;
455 struct ethhdr *eh; 837 struct ethhdr *eh;
456 int data_off; 838 int tx_control;
457 struct hermes_tx_descriptor desc;
458 unsigned long flags; 839 unsigned long flags;
459 840
460 if (! netif_running(dev)) { 841 if (! netif_running(dev)) {
@@ -486,23 +867,54 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
486 if (skb->len < ETH_HLEN) 867 if (skb->len < ETH_HLEN)
487 goto drop; 868 goto drop;
488 869
489 eh = (struct ethhdr *)skb->data; 870 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
490 871
491 memset(&desc, 0, sizeof(desc)); 872 if (priv->encode_alg == IW_ENCODE_ALG_TKIP)
492 desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX); 873 tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
493 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0); 874 HERMES_TXCTRL_MIC;
494 if (err) { 875
495 if (net_ratelimit()) 876 if (priv->has_alt_txcntl) {
496 printk(KERN_ERR "%s: Error %d writing Tx descriptor " 877 /* WPA enabled firmwares have tx_cntl at the end of
497 "to BAP\n", dev->name, err); 878 * the 802.11 header. So write zeroed descriptor and
498 goto busy; 879 * 802.11 header at the same time
880 */
881 char desc[HERMES_802_3_OFFSET];
882 __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
883
884 memset(&desc, 0, sizeof(desc));
885
886 *txcntl = cpu_to_le16(tx_control);
887 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
888 txfid, 0);
889 if (err) {
890 if (net_ratelimit())
891 printk(KERN_ERR "%s: Error %d writing Tx "
892 "descriptor to BAP\n", dev->name, err);
893 goto busy;
894 }
895 } else {
896 struct hermes_tx_descriptor desc;
897
898 memset(&desc, 0, sizeof(desc));
899
900 desc.tx_control = cpu_to_le16(tx_control);
901 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
902 txfid, 0);
903 if (err) {
904 if (net_ratelimit())
905 printk(KERN_ERR "%s: Error %d writing Tx "
906 "descriptor to BAP\n", dev->name, err);
907 goto busy;
908 }
909
910 /* Clear the 802.11 header and data length fields - some
911 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
912 * if this isn't done. */
913 hermes_clear_words(hw, HERMES_DATA0,
914 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
499 } 915 }
500 916
501 /* Clear the 802.11 header and data length fields - some 917 eh = (struct ethhdr *)skb->data;
502 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
503 * if this isn't done. */
504 hermes_clear_words(hw, HERMES_DATA0,
505 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
506 918
507 /* Encapsulate Ethernet-II frames */ 919 /* Encapsulate Ethernet-II frames */
508 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */ 920 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
@@ -513,33 +925,65 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
513 925
514 /* Strip destination and source from the data */ 926 /* Strip destination and source from the data */
515 skb_pull(skb, 2 * ETH_ALEN); 927 skb_pull(skb, 2 * ETH_ALEN);
516 data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
517 928
518 /* And move them to a separate header */ 929 /* And move them to a separate header */
519 memcpy(&hdr.eth, eh, 2 * ETH_ALEN); 930 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
520 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len); 931 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
521 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr)); 932 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
522 933
523 err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr), 934 /* Insert the SNAP header */
524 txfid, HERMES_802_3_OFFSET); 935 if (skb_headroom(skb) < sizeof(hdr)) {
525 if (err) { 936 printk(KERN_ERR
526 if (net_ratelimit()) 937 "%s: Not enough headroom for 802.2 headers %d\n",
527 printk(KERN_ERR "%s: Error %d writing packet " 938 dev->name, skb_headroom(skb));
528 "header to BAP\n", dev->name, err); 939 goto drop;
529 goto busy;
530 } 940 }
531 } else { /* IEEE 802.3 frame */ 941 eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
532 data_off = HERMES_802_3_OFFSET; 942 memcpy(eh, &hdr, sizeof(hdr));
533 } 943 }
534 944
535 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len, 945 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
536 txfid, data_off); 946 txfid, HERMES_802_3_OFFSET);
537 if (err) { 947 if (err) {
538 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 948 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
539 dev->name, err); 949 dev->name, err);
540 goto busy; 950 goto busy;
541 } 951 }
542 952
953 /* Calculate Michael MIC */
954 if (priv->encode_alg == IW_ENCODE_ALG_TKIP) {
955 u8 mic_buf[MICHAEL_MIC_LEN + 1];
956 u8 *mic;
957 size_t offset;
958 size_t len;
959
960 if (skb->len % 2) {
961 /* MIC start is on an odd boundary */
962 mic_buf[0] = skb->data[skb->len - 1];
963 mic = &mic_buf[1];
964 offset = skb->len - 1;
965 len = MICHAEL_MIC_LEN + 1;
966 } else {
967 mic = &mic_buf[0];
968 offset = skb->len;
969 len = MICHAEL_MIC_LEN;
970 }
971
972 michael_mic(priv->tx_tfm_mic,
973 priv->tkip_key[priv->tx_key].tx_mic,
974 eh->h_dest, eh->h_source, 0 /* priority */,
975 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
976
977 /* Write the MIC */
978 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
979 txfid, HERMES_802_3_OFFSET + offset);
980 if (err) {
981 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
982 dev->name, err);
983 goto busy;
984 }
985 }
986
543 /* Finally, we actually initiate the send */ 987 /* Finally, we actually initiate the send */
544 netif_stop_queue(dev); 988 netif_stop_queue(dev);
545 989
@@ -554,7 +998,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
554 } 998 }
555 999
556 dev->trans_start = jiffies; 1000 dev->trans_start = jiffies;
557 stats->tx_bytes += data_off + skb->len; 1001 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
558 goto ok; 1002 goto ok;
559 1003
560 drop: 1004 drop:
@@ -834,21 +1278,48 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
834 stats->rx_dropped++; 1278 stats->rx_dropped++;
835} 1279}
836 1280
1281/* Get tsc from the firmware */
1282static int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key,
1283 u8 *tsc)
1284{
1285 hermes_t *hw = &priv->hw;
1286 int err = 0;
1287 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
1288
1289 if ((key < 0) || (key > 4))
1290 return -EINVAL;
1291
1292 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
1293 sizeof(tsc_arr), NULL, &tsc_arr);
1294 if (!err)
1295 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
1296
1297 return err;
1298}
1299
837static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) 1300static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
838{ 1301{
839 struct orinoco_private *priv = netdev_priv(dev); 1302 struct orinoco_private *priv = netdev_priv(dev);
840 struct net_device_stats *stats = &priv->stats; 1303 struct net_device_stats *stats = &priv->stats;
841 struct iw_statistics *wstats = &priv->wstats; 1304 struct iw_statistics *wstats = &priv->wstats;
842 struct sk_buff *skb = NULL; 1305 struct sk_buff *skb = NULL;
843 u16 rxfid, status, fc; 1306 u16 rxfid, status;
844 int length; 1307 int length;
845 struct hermes_rx_descriptor desc; 1308 struct hermes_rx_descriptor *desc;
846 struct ethhdr *hdr; 1309 struct orinoco_rx_data *rx_data;
847 int err; 1310 int err;
848 1311
1312 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
1313 if (!desc) {
1314 printk(KERN_WARNING
1315 "%s: Can't allocate space for RX descriptor\n",
1316 dev->name);
1317 goto update_stats;
1318 }
1319
849 rxfid = hermes_read_regn(hw, RXFID); 1320 rxfid = hermes_read_regn(hw, RXFID);
850 1321
851 err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), 1322 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
852 rxfid, 0); 1323 rxfid, 0);
853 if (err) { 1324 if (err) {
854 printk(KERN_ERR "%s: error %d reading Rx descriptor. " 1325 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
@@ -856,7 +1327,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
856 goto update_stats; 1327 goto update_stats;
857 } 1328 }
858 1329
859 status = le16_to_cpu(desc.status); 1330 status = le16_to_cpu(desc->status);
860 1331
861 if (status & HERMES_RXSTAT_BADCRC) { 1332 if (status & HERMES_RXSTAT_BADCRC) {
862 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", 1333 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
@@ -867,8 +1338,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
867 1338
868 /* Handle frames in monitor mode */ 1339 /* Handle frames in monitor mode */
869 if (priv->iw_mode == IW_MODE_MONITOR) { 1340 if (priv->iw_mode == IW_MODE_MONITOR) {
870 orinoco_rx_monitor(dev, rxfid, &desc); 1341 orinoco_rx_monitor(dev, rxfid, desc);
871 return; 1342 goto out;
872 } 1343 }
873 1344
874 if (status & HERMES_RXSTAT_UNDECRYPTABLE) { 1345 if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
@@ -878,15 +1349,14 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
878 goto update_stats; 1349 goto update_stats;
879 } 1350 }
880 1351
881 length = le16_to_cpu(desc.data_len); 1352 length = le16_to_cpu(desc->data_len);
882 fc = le16_to_cpu(desc.frame_ctl);
883 1353
884 /* Sanity checks */ 1354 /* Sanity checks */
885 if (length < 3) { /* No for even an 802.2 LLC header */ 1355 if (length < 3) { /* No for even an 802.2 LLC header */
886 /* At least on Symbol firmware with PCF we get quite a 1356 /* At least on Symbol firmware with PCF we get quite a
887 lot of these legitimately - Poll frames with no 1357 lot of these legitimately - Poll frames with no
888 data. */ 1358 data. */
889 return; 1359 goto out;
890 } 1360 }
891 if (length > IEEE80211_DATA_LEN) { 1361 if (length > IEEE80211_DATA_LEN) {
892 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", 1362 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
@@ -895,6 +1365,11 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
895 goto update_stats; 1365 goto update_stats;
896 } 1366 }
897 1367
1368 /* Payload size does not include Michael MIC. Increase payload
1369 * size to read it together with the data. */
1370 if (status & HERMES_RXSTAT_MIC)
1371 length += MICHAEL_MIC_LEN;
1372
898 /* We need space for the packet data itself, plus an ethernet 1373 /* We need space for the packet data itself, plus an ethernet
899 header, plus 2 bytes so we can align the IP header on a 1374 header, plus 2 bytes so we can align the IP header on a
900 32bit boundary, plus 1 byte so we can read in odd length 1375 32bit boundary, plus 1 byte so we can read in odd length
@@ -921,6 +1396,100 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
921 goto drop; 1396 goto drop;
922 } 1397 }
923 1398
1399 /* Add desc and skb to rx queue */
1400 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
1401 if (!rx_data) {
1402 printk(KERN_WARNING "%s: Can't allocate RX packet\n",
1403 dev->name);
1404 goto drop;
1405 }
1406 rx_data->desc = desc;
1407 rx_data->skb = skb;
1408 list_add_tail(&rx_data->list, &priv->rx_list);
1409 tasklet_schedule(&priv->rx_tasklet);
1410
1411 return;
1412
1413drop:
1414 dev_kfree_skb_irq(skb);
1415update_stats:
1416 stats->rx_errors++;
1417 stats->rx_dropped++;
1418out:
1419 kfree(desc);
1420}
1421
1422static void orinoco_rx(struct net_device *dev,
1423 struct hermes_rx_descriptor *desc,
1424 struct sk_buff *skb)
1425{
1426 struct orinoco_private *priv = netdev_priv(dev);
1427 struct net_device_stats *stats = &priv->stats;
1428 u16 status, fc;
1429 int length;
1430 struct ethhdr *hdr;
1431
1432 status = le16_to_cpu(desc->status);
1433 length = le16_to_cpu(desc->data_len);
1434 fc = le16_to_cpu(desc->frame_ctl);
1435
1436 /* Calculate and check MIC */
1437 if (status & HERMES_RXSTAT_MIC) {
1438 int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
1439 HERMES_MIC_KEY_ID_SHIFT);
1440 u8 mic[MICHAEL_MIC_LEN];
1441 u8 *rxmic;
1442 u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
1443 desc->addr3 : desc->addr2;
1444
1445 /* Extract Michael MIC from payload */
1446 rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
1447
1448 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
1449 length -= MICHAEL_MIC_LEN;
1450
1451 michael_mic(priv->rx_tfm_mic,
1452 priv->tkip_key[key_id].rx_mic,
1453 desc->addr1,
1454 src,
1455 0, /* priority or QoS? */
1456 skb->data,
1457 skb->len,
1458 &mic[0]);
1459
1460 if (memcmp(mic, rxmic,
1461 MICHAEL_MIC_LEN)) {
1462 union iwreq_data wrqu;
1463 struct iw_michaelmicfailure wxmic;
1464 DECLARE_MAC_BUF(mac);
1465
1466 printk(KERN_WARNING "%s: "
1467 "Invalid Michael MIC in data frame from %s, "
1468 "using key %i\n",
1469 dev->name, print_mac(mac, src), key_id);
1470
1471 /* TODO: update stats */
1472
1473 /* Notify userspace */
1474 memset(&wxmic, 0, sizeof(wxmic));
1475 wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
1476 wxmic.flags |= (desc->addr1[0] & 1) ?
1477 IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
1478 wxmic.src_addr.sa_family = ARPHRD_ETHER;
1479 memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
1480
1481 (void) orinoco_hw_get_tkip_iv(priv, key_id,
1482 &wxmic.tsc[0]);
1483
1484 memset(&wrqu, 0, sizeof(wrqu));
1485 wrqu.data.length = sizeof(wxmic);
1486 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
1487 (char *) &wxmic);
1488
1489 goto drop;
1490 }
1491 }
1492
924 /* Handle decapsulation 1493 /* Handle decapsulation
925 * In most cases, the firmware tell us about SNAP frames. 1494 * In most cases, the firmware tell us about SNAP frames.
926 * For some reason, the SNAP frames sent by LinkSys APs 1495 * For some reason, the SNAP frames sent by LinkSys APs
@@ -939,11 +1508,11 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
939 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN); 1508 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
940 hdr->h_proto = htons(length); 1509 hdr->h_proto = htons(length);
941 } 1510 }
942 memcpy(hdr->h_dest, desc.addr1, ETH_ALEN); 1511 memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
943 if (fc & IEEE80211_FCTL_FROMDS) 1512 if (fc & IEEE80211_FCTL_FROMDS)
944 memcpy(hdr->h_source, desc.addr3, ETH_ALEN); 1513 memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
945 else 1514 else
946 memcpy(hdr->h_source, desc.addr2, ETH_ALEN); 1515 memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
947 1516
948 dev->last_rx = jiffies; 1517 dev->last_rx = jiffies;
949 skb->protocol = eth_type_trans(skb, dev); 1518 skb->protocol = eth_type_trans(skb, dev);
@@ -952,7 +1521,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
952 skb->pkt_type = PACKET_OTHERHOST; 1521 skb->pkt_type = PACKET_OTHERHOST;
953 1522
954 /* Process the wireless stats if needed */ 1523 /* Process the wireless stats if needed */
955 orinoco_stat_gather(dev, skb, &desc); 1524 orinoco_stat_gather(dev, skb, desc);
956 1525
957 /* Pass the packet to the networking stack */ 1526 /* Pass the packet to the networking stack */
958 netif_rx(skb); 1527 netif_rx(skb);
@@ -961,13 +1530,33 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
961 1530
962 return; 1531 return;
963 1532
964 drop: 1533 drop:
965 dev_kfree_skb_irq(skb); 1534 dev_kfree_skb(skb);
966 update_stats:
967 stats->rx_errors++; 1535 stats->rx_errors++;
968 stats->rx_dropped++; 1536 stats->rx_dropped++;
969} 1537}
970 1538
1539static void orinoco_rx_isr_tasklet(unsigned long data)
1540{
1541 struct net_device *dev = (struct net_device *) data;
1542 struct orinoco_private *priv = netdev_priv(dev);
1543 struct orinoco_rx_data *rx_data, *temp;
1544 struct hermes_rx_descriptor *desc;
1545 struct sk_buff *skb;
1546
1547 /* extract desc and skb from queue */
1548 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
1549 desc = rx_data->desc;
1550 skb = rx_data->skb;
1551 list_del(&rx_data->list);
1552 kfree(rx_data);
1553
1554 orinoco_rx(dev, desc, skb);
1555
1556 kfree(desc);
1557 }
1558}
1559
971/********************************************************************/ 1560/********************************************************************/
972/* Rx path (info frames) */ 1561/* Rx path (info frames) */
973/********************************************************************/ 1562/********************************************************************/
@@ -1087,52 +1676,172 @@ static void orinoco_join_ap(struct work_struct *work)
1087} 1676}
1088 1677
1089/* Send new BSSID to userspace */ 1678/* Send new BSSID to userspace */
1090static void orinoco_send_wevents(struct work_struct *work) 1679static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1091{ 1680{
1092 struct orinoco_private *priv =
1093 container_of(work, struct orinoco_private, wevent_work);
1094 struct net_device *dev = priv->ndev; 1681 struct net_device *dev = priv->ndev;
1095 struct hermes *hw = &priv->hw; 1682 struct hermes *hw = &priv->hw;
1096 union iwreq_data wrqu; 1683 union iwreq_data wrqu;
1097 int err; 1684 int err;
1098 unsigned long flags;
1099
1100 if (orinoco_lock(priv, &flags) != 0)
1101 return;
1102 1685
1103 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID, 1686 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
1104 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1687 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1105 if (err != 0) 1688 if (err != 0)
1106 goto out; 1689 return;
1107 1690
1108 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1691 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1109 1692
1110 /* Send event to user space */ 1693 /* Send event to user space */
1111 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 1694 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
1695}
1112 1696
1113 out: 1697static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1114 orinoco_unlock(priv, &flags); 1698{
1699 struct net_device *dev = priv->ndev;
1700 struct hermes *hw = &priv->hw;
1701 union iwreq_data wrqu;
1702 int err;
1703 u8 buf[88];
1704 u8 *ie;
1705
1706 if (!priv->has_wpa)
1707 return;
1708
1709 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1710 sizeof(buf), NULL, &buf);
1711 if (err != 0)
1712 return;
1713
1714 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1715 if (ie) {
1716 int rem = sizeof(buf) - (ie - &buf[0]);
1717 wrqu.data.length = ie[1] + 2;
1718 if (wrqu.data.length > rem)
1719 wrqu.data.length = rem;
1720
1721 if (wrqu.data.length)
1722 /* Send event to user space */
1723 wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
1724 }
1115} 1725}
1116 1726
1727static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1728{
1729 struct net_device *dev = priv->ndev;
1730 struct hermes *hw = &priv->hw;
1731 union iwreq_data wrqu;
1732 int err;
1733 u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
1734 u8 *ie;
1735
1736 if (!priv->has_wpa)
1737 return;
1738
1739 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1740 sizeof(buf), NULL, &buf);
1741 if (err != 0)
1742 return;
1743
1744 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1745 if (ie) {
1746 int rem = sizeof(buf) - (ie - &buf[0]);
1747 wrqu.data.length = ie[1] + 2;
1748 if (wrqu.data.length > rem)
1749 wrqu.data.length = rem;
1750
1751 if (wrqu.data.length)
1752 /* Send event to user space */
1753 wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
1754 }
1755}
1756
1757static void orinoco_send_wevents(struct work_struct *work)
1758{
1759 struct orinoco_private *priv =
1760 container_of(work, struct orinoco_private, wevent_work);
1761 unsigned long flags;
1762
1763 if (orinoco_lock(priv, &flags) != 0)
1764 return;
1765
1766 orinoco_send_assocreqie_wevent(priv);
1767 orinoco_send_assocrespie_wevent(priv);
1768 orinoco_send_bssid_wevent(priv);
1769
1770 orinoco_unlock(priv, &flags);
1771}
1117 1772
1118static inline void orinoco_clear_scan_results(struct orinoco_private *priv, 1773static inline void orinoco_clear_scan_results(struct orinoco_private *priv,
1119 unsigned long scan_age) 1774 unsigned long scan_age)
1120{ 1775{
1121 bss_element *bss; 1776 if (priv->has_ext_scan) {
1122 bss_element *tmp_bss; 1777 struct xbss_element *bss;
1123 1778 struct xbss_element *tmp_bss;
1124 /* Blow away current list of scan results */ 1779
1125 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) { 1780 /* Blow away current list of scan results */
1126 if (!scan_age || 1781 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1127 time_after(jiffies, bss->last_scanned + scan_age)) { 1782 if (!scan_age ||
1128 list_move_tail(&bss->list, &priv->bss_free_list); 1783 time_after(jiffies, bss->last_scanned + scan_age)) {
1129 /* Don't blow away ->list, just BSS data */ 1784 list_move_tail(&bss->list,
1130 memset(bss, 0, sizeof(bss->bss)); 1785 &priv->bss_free_list);
1131 bss->last_scanned = 0; 1786 /* Don't blow away ->list, just BSS data */
1787 memset(&bss->bss, 0, sizeof(bss->bss));
1788 bss->last_scanned = 0;
1789 }
1790 }
1791 } else {
1792 struct bss_element *bss;
1793 struct bss_element *tmp_bss;
1794
1795 /* Blow away current list of scan results */
1796 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1797 if (!scan_age ||
1798 time_after(jiffies, bss->last_scanned + scan_age)) {
1799 list_move_tail(&bss->list,
1800 &priv->bss_free_list);
1801 /* Don't blow away ->list, just BSS data */
1802 memset(&bss->bss, 0, sizeof(bss->bss));
1803 bss->last_scanned = 0;
1804 }
1132 } 1805 }
1133 } 1806 }
1134} 1807}
1135 1808
1809static void orinoco_add_ext_scan_result(struct orinoco_private *priv,
1810 struct agere_ext_scan_info *atom)
1811{
1812 struct xbss_element *bss = NULL;
1813 int found = 0;
1814
1815 /* Try to update an existing bss first */
1816 list_for_each_entry(bss, &priv->bss_list, list) {
1817 if (compare_ether_addr(bss->bss.bssid, atom->bssid))
1818 continue;
1819 /* ESSID lengths */
1820 if (bss->bss.data[1] != atom->data[1])
1821 continue;
1822 if (memcmp(&bss->bss.data[2], &atom->data[2],
1823 atom->data[1]))
1824 continue;
1825 found = 1;
1826 break;
1827 }
1828
1829 /* Grab a bss off the free list */
1830 if (!found && !list_empty(&priv->bss_free_list)) {
1831 bss = list_entry(priv->bss_free_list.next,
1832 struct xbss_element, list);
1833 list_del(priv->bss_free_list.next);
1834
1835 list_add_tail(&bss->list, &priv->bss_list);
1836 }
1837
1838 if (bss) {
1839 /* Always update the BSS to get latest beacon info */
1840 memcpy(&bss->bss, atom, sizeof(bss->bss));
1841 bss->last_scanned = jiffies;
1842 }
1843}
1844
1136static int orinoco_process_scan_results(struct net_device *dev, 1845static int orinoco_process_scan_results(struct net_device *dev,
1137 unsigned char *buf, 1846 unsigned char *buf,
1138 int len) 1847 int len)
@@ -1194,7 +1903,7 @@ static int orinoco_process_scan_results(struct net_device *dev,
1194 /* Read the entries one by one */ 1903 /* Read the entries one by one */
1195 for (; offset + atom_len <= len; offset += atom_len) { 1904 for (; offset + atom_len <= len; offset += atom_len) {
1196 int found = 0; 1905 int found = 0;
1197 bss_element *bss = NULL; 1906 struct bss_element *bss = NULL;
1198 1907
1199 /* Get next atom */ 1908 /* Get next atom */
1200 atom = (union hermes_scan_info *) (buf + offset); 1909 atom = (union hermes_scan_info *) (buf + offset);
@@ -1216,7 +1925,7 @@ static int orinoco_process_scan_results(struct net_device *dev,
1216 /* Grab a bss off the free list */ 1925 /* Grab a bss off the free list */
1217 if (!found && !list_empty(&priv->bss_free_list)) { 1926 if (!found && !list_empty(&priv->bss_free_list)) {
1218 bss = list_entry(priv->bss_free_list.next, 1927 bss = list_entry(priv->bss_free_list.next,
1219 bss_element, list); 1928 struct bss_element, list);
1220 list_del(priv->bss_free_list.next); 1929 list_del(priv->bss_free_list.next);
1221 1930
1222 list_add_tail(&bss->list, &priv->bss_list); 1931 list_add_tail(&bss->list, &priv->bss_list);
@@ -1404,6 +2113,63 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1404 kfree(buf); 2113 kfree(buf);
1405 } 2114 }
1406 break; 2115 break;
2116 case HERMES_INQ_CHANNELINFO:
2117 {
2118 struct agere_ext_scan_info *bss;
2119
2120 if (!priv->scan_inprogress) {
2121 printk(KERN_DEBUG "%s: Got chaninfo without scan, "
2122 "len=%d\n", dev->name, len);
2123 break;
2124 }
2125
2126 /* An empty result indicates that the scan is complete */
2127 if (len == 0) {
2128 union iwreq_data wrqu;
2129
2130 /* Scan is no longer in progress */
2131 priv->scan_inprogress = 0;
2132
2133 wrqu.data.length = 0;
2134 wrqu.data.flags = 0;
2135 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
2136 break;
2137 }
2138
2139 /* Sanity check */
2140 else if (len > sizeof(*bss)) {
2141 printk(KERN_WARNING
2142 "%s: Ext scan results too large (%d bytes). "
2143 "Truncating results to %zd bytes.\n",
2144 dev->name, len, sizeof(*bss));
2145 len = sizeof(*bss);
2146 } else if (len < (offsetof(struct agere_ext_scan_info,
2147 data) + 2)) {
2148 /* Drop this result now so we don't have to
2149 * keep checking later */
2150 printk(KERN_WARNING
2151 "%s: Ext scan results too short (%d bytes)\n",
2152 dev->name, len);
2153 break;
2154 }
2155
2156 bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
2157 if (bss == NULL)
2158 break;
2159
2160 /* Read scan data */
2161 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
2162 infofid, sizeof(info));
2163 if (err) {
2164 kfree(bss);
2165 break;
2166 }
2167
2168 orinoco_add_ext_scan_result(priv, bss);
2169
2170 kfree(bss);
2171 break;
2172 }
1407 case HERMES_INQ_SEC_STAT_AGERE: 2173 case HERMES_INQ_SEC_STAT_AGERE:
1408 /* Security status (Agere specific) */ 2174 /* Security status (Agere specific) */
1409 /* Ignore this frame for now */ 2175 /* Ignore this frame for now */
@@ -1586,7 +2352,7 @@ static int __orinoco_hw_set_wap(struct orinoco_private *priv)
1586} 2352}
1587 2353
1588/* Change the WEP keys and/or the current keys. Can be called 2354/* Change the WEP keys and/or the current keys. Can be called
1589 * either from __orinoco_hw_setup_wep() or directly from 2355 * either from __orinoco_hw_setup_enc() or directly from
1590 * orinoco_ioctl_setiwencode(). In the later case the association 2356 * orinoco_ioctl_setiwencode(). In the later case the association
1591 * with the AP is not broken (if the firmware can handle it), 2357 * with the AP is not broken (if the firmware can handle it),
1592 * which is needed for 802.1x implementations. */ 2358 * which is needed for 802.1x implementations. */
@@ -1646,14 +2412,16 @@ static int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
1646 return 0; 2412 return 0;
1647} 2413}
1648 2414
1649static int __orinoco_hw_setup_wep(struct orinoco_private *priv) 2415static int __orinoco_hw_setup_enc(struct orinoco_private *priv)
1650{ 2416{
1651 hermes_t *hw = &priv->hw; 2417 hermes_t *hw = &priv->hw;
1652 int err = 0; 2418 int err = 0;
1653 int master_wep_flag; 2419 int master_wep_flag;
1654 int auth_flag; 2420 int auth_flag;
2421 int enc_flag;
1655 2422
1656 if (priv->wep_on) 2423 /* Setup WEP keys for WEP and WPA */
2424 if (priv->encode_alg)
1657 __orinoco_hw_setup_wepkeys(priv); 2425 __orinoco_hw_setup_wepkeys(priv);
1658 2426
1659 if (priv->wep_restrict) 2427 if (priv->wep_restrict)
@@ -1661,9 +2429,16 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1661 else 2429 else
1662 auth_flag = HERMES_AUTH_OPEN; 2430 auth_flag = HERMES_AUTH_OPEN;
1663 2431
2432 if (priv->wpa_enabled)
2433 enc_flag = 2;
2434 else if (priv->encode_alg == IW_ENCODE_ALG_WEP)
2435 enc_flag = 1;
2436 else
2437 enc_flag = 0;
2438
1664 switch (priv->firmware_type) { 2439 switch (priv->firmware_type) {
1665 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */ 2440 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
1666 if (priv->wep_on) { 2441 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
1667 /* Enable the shared-key authentication. */ 2442 /* Enable the shared-key authentication. */
1668 err = hermes_write_wordrec(hw, USER_BAP, 2443 err = hermes_write_wordrec(hw, USER_BAP,
1669 HERMES_RID_CNFAUTHENTICATION_AGERE, 2444 HERMES_RID_CNFAUTHENTICATION_AGERE,
@@ -1671,14 +2446,24 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1671 } 2446 }
1672 err = hermes_write_wordrec(hw, USER_BAP, 2447 err = hermes_write_wordrec(hw, USER_BAP,
1673 HERMES_RID_CNFWEPENABLED_AGERE, 2448 HERMES_RID_CNFWEPENABLED_AGERE,
1674 priv->wep_on); 2449 enc_flag);
1675 if (err) 2450 if (err)
1676 return err; 2451 return err;
2452
2453 if (priv->has_wpa) {
2454 /* Set WPA key management */
2455 err = hermes_write_wordrec(hw, USER_BAP,
2456 HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
2457 priv->key_mgmt);
2458 if (err)
2459 return err;
2460 }
2461
1677 break; 2462 break;
1678 2463
1679 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */ 2464 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
1680 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */ 2465 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
1681 if (priv->wep_on) { 2466 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
1682 if (priv->wep_restrict || 2467 if (priv->wep_restrict ||
1683 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)) 2468 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
1684 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED | 2469 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
@@ -1710,6 +2495,84 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1710 return 0; 2495 return 0;
1711} 2496}
1712 2497
2498/* key must be 32 bytes, including the tx and rx MIC keys.
2499 * rsc must be 8 bytes
2500 * tsc must be 8 bytes or NULL
2501 */
2502static int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
2503 u8 *key, u8 *rsc, u8 *tsc)
2504{
2505 struct {
2506 __le16 idx;
2507 u8 rsc[IW_ENCODE_SEQ_MAX_SIZE];
2508 u8 key[TKIP_KEYLEN];
2509 u8 tx_mic[MIC_KEYLEN];
2510 u8 rx_mic[MIC_KEYLEN];
2511 u8 tsc[IW_ENCODE_SEQ_MAX_SIZE];
2512 } __attribute__ ((packed)) buf;
2513 int ret;
2514 int err;
2515 int k;
2516 u16 xmitting;
2517
2518 key_idx &= 0x3;
2519
2520 if (set_tx)
2521 key_idx |= 0x8000;
2522
2523 buf.idx = cpu_to_le16(key_idx);
2524 memcpy(buf.key, key,
2525 sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
2526
2527 if (rsc == NULL)
2528 memset(buf.rsc, 0, sizeof(buf.rsc));
2529 else
2530 memcpy(buf.rsc, rsc, sizeof(buf.rsc));
2531
2532 if (tsc == NULL) {
2533 memset(buf.tsc, 0, sizeof(buf.tsc));
2534 buf.tsc[4] = 0x10;
2535 } else {
2536 memcpy(buf.tsc, tsc, sizeof(buf.tsc));
2537 }
2538
2539 /* Wait upto 100ms for tx queue to empty */
2540 k = 100;
2541 do {
2542 k--;
2543 udelay(1000);
2544 ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
2545 &xmitting);
2546 if (ret)
2547 break;
2548 } while ((k > 0) && xmitting);
2549
2550 if (k == 0)
2551 ret = -ETIMEDOUT;
2552
2553 err = HERMES_WRITE_RECORD(hw, USER_BAP,
2554 HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
2555 &buf);
2556
2557 return ret ? ret : err;
2558}
2559
2560static int orinoco_clear_tkip_key(struct orinoco_private *priv,
2561 int key_idx)
2562{
2563 hermes_t *hw = &priv->hw;
2564 int err;
2565
2566 memset(&priv->tkip_key[key_idx], 0, sizeof(priv->tkip_key[key_idx]));
2567 err = hermes_write_wordrec(hw, USER_BAP,
2568 HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
2569 key_idx);
2570 if (err)
2571 printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
2572 priv->ndev->name, err, key_idx);
2573 return err;
2574}
2575
1713static int __orinoco_program_rids(struct net_device *dev) 2576static int __orinoco_program_rids(struct net_device *dev)
1714{ 2577{
1715 struct orinoco_private *priv = netdev_priv(dev); 2578 struct orinoco_private *priv = netdev_priv(dev);
@@ -1906,10 +2769,10 @@ static int __orinoco_program_rids(struct net_device *dev)
1906 } 2769 }
1907 2770
1908 /* Set up encryption */ 2771 /* Set up encryption */
1909 if (priv->has_wep) { 2772 if (priv->has_wep || priv->has_wpa) {
1910 err = __orinoco_hw_setup_wep(priv); 2773 err = __orinoco_hw_setup_enc(priv);
1911 if (err) { 2774 if (err) {
1912 printk(KERN_ERR "%s: Error %d activating WEP\n", 2775 printk(KERN_ERR "%s: Error %d activating encryption\n",
1913 dev->name, err); 2776 dev->name, err);
1914 return err; 2777 return err;
1915 } 2778 }
@@ -1970,6 +2833,9 @@ __orinoco_set_multicast_list(struct net_device *dev)
1970 priv->promiscuous = promisc; 2833 priv->promiscuous = promisc;
1971 } 2834 }
1972 2835
2836 /* If we're not in promiscuous mode, then we need to set the
2837 * group address if either we want to multicast, or if we were
2838 * multicasting and want to stop */
1973 if (! promisc && (mc_count || priv->mc_count) ) { 2839 if (! promisc && (mc_count || priv->mc_count) ) {
1974 struct dev_mc_list *p = dev->mc_list; 2840 struct dev_mc_list *p = dev->mc_list;
1975 struct hermes_multicast mclist; 2841 struct hermes_multicast mclist;
@@ -1989,9 +2855,10 @@ __orinoco_set_multicast_list(struct net_device *dev)
1989 printk(KERN_WARNING "%s: Multicast list is " 2855 printk(KERN_WARNING "%s: Multicast list is "
1990 "longer than mc_count\n", dev->name); 2856 "longer than mc_count\n", dev->name);
1991 2857
1992 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES, 2858 err = hermes_write_ltv(hw, USER_BAP,
1993 HERMES_BYTES_TO_RECLEN(priv->mc_count * ETH_ALEN), 2859 HERMES_RID_CNFGROUPADDRESSES,
1994 &mclist); 2860 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
2861 &mclist);
1995 if (err) 2862 if (err)
1996 printk(KERN_ERR "%s: Error %d setting multicast list.\n", 2863 printk(KERN_ERR "%s: Error %d setting multicast list.\n",
1997 dev->name, err); 2864 dev->name, err);
@@ -2043,6 +2910,12 @@ static void orinoco_reset(struct work_struct *work)
2043 } 2910 }
2044 } 2911 }
2045 2912
2913 if (priv->do_fw_download) {
2914 err = orinoco_download(priv);
2915 if (err)
2916 priv->do_fw_download = 0;
2917 }
2918
2046 err = orinoco_reinit_firmware(dev); 2919 err = orinoco_reinit_firmware(dev);
2047 if (err) { 2920 if (err) {
2048 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n", 2921 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
@@ -2254,6 +3127,10 @@ static int determine_firmware(struct net_device *dev)
2254 priv->has_ibss = 1; 3127 priv->has_ibss = 1;
2255 priv->has_wep = 0; 3128 priv->has_wep = 0;
2256 priv->has_big_wep = 0; 3129 priv->has_big_wep = 0;
3130 priv->has_alt_txcntl = 0;
3131 priv->has_ext_scan = 0;
3132 priv->has_wpa = 0;
3133 priv->do_fw_download = 0;
2257 3134
2258 /* Determine capabilities from the firmware version */ 3135 /* Determine capabilities from the firmware version */
2259 switch (priv->firmware_type) { 3136 switch (priv->firmware_type) {
@@ -2273,8 +3150,11 @@ static int determine_firmware(struct net_device *dev)
2273 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */ 3150 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
2274 priv->ibss_port = 1; 3151 priv->ibss_port = 1;
2275 priv->has_hostscan = (firmver >= 0x8000a); 3152 priv->has_hostscan = (firmver >= 0x8000a);
3153 priv->do_fw_download = 1;
2276 priv->broken_monitor = (firmver >= 0x80000); 3154 priv->broken_monitor = (firmver >= 0x80000);
2277 3155 priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
3156 priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
3157 priv->has_wpa = (firmver >= 0x9002a);
2278 /* Tested with Agere firmware : 3158 /* Tested with Agere firmware :
2279 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II 3159 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
2280 * Tested CableTron firmware : 4.32 => Anton */ 3160 * Tested CableTron firmware : 4.32 => Anton */
@@ -2317,6 +3197,21 @@ static int determine_firmware(struct net_device *dev)
2317 firmver >= 0x31000; 3197 firmver >= 0x31000;
2318 priv->has_preamble = (firmver >= 0x20000); 3198 priv->has_preamble = (firmver >= 0x20000);
2319 priv->ibss_port = 4; 3199 priv->ibss_port = 4;
3200
3201 /* Symbol firmware is found on various cards, but
3202 * there has been no attempt to check firmware
3203 * download on non-spectrum_cs based cards.
3204 *
3205 * Given that the Agere firmware download works
3206 * differently, we should avoid doing a firmware
3207 * download with the Symbol algorithm on non-spectrum
3208 * cards.
3209 *
3210 * For now we can identify a spectrum_cs based card
3211 * because it has a firmware reset function.
3212 */
3213 priv->do_fw_download = (priv->stop_fw != NULL);
3214
2320 priv->broken_disableport = (firmver == 0x25013) || 3215 priv->broken_disableport = (firmver == 0x25013) ||
2321 (firmver >= 0x30000 && firmver <= 0x31000); 3216 (firmver >= 0x30000 && firmver <= 0x31000);
2322 priv->has_hostscan = (firmver >= 0x31001) || 3217 priv->has_hostscan = (firmver >= 0x31001) ||
@@ -2387,6 +3282,20 @@ static int orinoco_init(struct net_device *dev)
2387 goto out; 3282 goto out;
2388 } 3283 }
2389 3284
3285 if (priv->do_fw_download) {
3286 err = orinoco_download(priv);
3287 if (err)
3288 priv->do_fw_download = 0;
3289
3290 /* Check firmware version again */
3291 err = determine_firmware(dev);
3292 if (err != 0) {
3293 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
3294 dev->name);
3295 goto out;
3296 }
3297 }
3298
2390 if (priv->has_port3) 3299 if (priv->has_port3)
2391 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name); 3300 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
2392 if (priv->has_ibss) 3301 if (priv->has_ibss)
@@ -2399,6 +3308,20 @@ static int orinoco_init(struct net_device *dev)
2399 else 3308 else
2400 printk("40-bit key\n"); 3309 printk("40-bit key\n");
2401 } 3310 }
3311 if (priv->has_wpa) {
3312 printk(KERN_DEBUG "%s: WPA-PSK supported\n", dev->name);
3313 if (orinoco_mic_init(priv)) {
3314 printk(KERN_ERR "%s: Failed to setup MIC crypto "
3315 "algorithm. Disabling WPA support\n", dev->name);
3316 priv->has_wpa = 0;
3317 }
3318 }
3319
3320 /* Now we have the firmware capabilities, allocate appropiate
3321 * sized scan buffers */
3322 if (orinoco_bss_data_allocate(priv))
3323 goto out;
3324 orinoco_bss_data_init(priv);
2402 3325
2403 /* Get the MAC address */ 3326 /* Get the MAC address */
2404 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 3327 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
@@ -2514,8 +3437,13 @@ static int orinoco_init(struct net_device *dev)
2514 priv->channel = 0; /* use firmware default */ 3437 priv->channel = 0; /* use firmware default */
2515 3438
2516 priv->promiscuous = 0; 3439 priv->promiscuous = 0;
2517 priv->wep_on = 0; 3440 priv->encode_alg = IW_ENCODE_ALG_NONE;
2518 priv->tx_key = 0; 3441 priv->tx_key = 0;
3442 priv->wpa_enabled = 0;
3443 priv->tkip_cm_active = 0;
3444 priv->key_mgmt = 0;
3445 priv->wpa_ie_len = 0;
3446 priv->wpa_ie = NULL;
2519 3447
2520 /* Make the hardware available, as long as it hasn't been 3448 /* Make the hardware available, as long as it hasn't been
2521 * removed elsewhere (e.g. by PCMCIA hot unplug) */ 3449 * removed elsewhere (e.g. by PCMCIA hot unplug) */
@@ -2529,8 +3457,11 @@ static int orinoco_init(struct net_device *dev)
2529 return err; 3457 return err;
2530} 3458}
2531 3459
2532struct net_device *alloc_orinocodev(int sizeof_card, 3460struct net_device
2533 int (*hard_reset)(struct orinoco_private *)) 3461*alloc_orinocodev(int sizeof_card,
3462 struct device *device,
3463 int (*hard_reset)(struct orinoco_private *),
3464 int (*stop_fw)(struct orinoco_private *, int))
2534{ 3465{
2535 struct net_device *dev; 3466 struct net_device *dev;
2536 struct orinoco_private *priv; 3467 struct orinoco_private *priv;
@@ -2545,10 +3476,7 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2545 + sizeof(struct orinoco_private)); 3476 + sizeof(struct orinoco_private));
2546 else 3477 else
2547 priv->card = NULL; 3478 priv->card = NULL;
2548 3479 priv->dev = device;
2549 if (orinoco_bss_data_allocate(priv))
2550 goto err_out_free;
2551 orinoco_bss_data_init(priv);
2552 3480
2553 /* Setup / override net_device fields */ 3481 /* Setup / override net_device fields */
2554 dev->init = orinoco_init; 3482 dev->init = orinoco_init;
@@ -2566,10 +3494,14 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2566 dev->set_multicast_list = orinoco_set_multicast_list; 3494 dev->set_multicast_list = orinoco_set_multicast_list;
2567 /* we use the default eth_mac_addr for setting the MAC addr */ 3495 /* we use the default eth_mac_addr for setting the MAC addr */
2568 3496
3497 /* Reserve space in skb for the SNAP header */
3498 dev->hard_header_len += ENCAPS_OVERHEAD;
3499
2569 /* Set up default callbacks */ 3500 /* Set up default callbacks */
2570 dev->open = orinoco_open; 3501 dev->open = orinoco_open;
2571 dev->stop = orinoco_stop; 3502 dev->stop = orinoco_stop;
2572 priv->hard_reset = hard_reset; 3503 priv->hard_reset = hard_reset;
3504 priv->stop_fw = stop_fw;
2573 3505
2574 spin_lock_init(&priv->lock); 3506 spin_lock_init(&priv->lock);
2575 priv->open = 0; 3507 priv->open = 0;
@@ -2580,20 +3512,27 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2580 INIT_WORK(&priv->join_work, orinoco_join_ap); 3512 INIT_WORK(&priv->join_work, orinoco_join_ap);
2581 INIT_WORK(&priv->wevent_work, orinoco_send_wevents); 3513 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
2582 3514
3515 INIT_LIST_HEAD(&priv->rx_list);
3516 tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
3517 (unsigned long) dev);
3518
2583 netif_carrier_off(dev); 3519 netif_carrier_off(dev);
2584 priv->last_linkstatus = 0xffff; 3520 priv->last_linkstatus = 0xffff;
2585 3521
2586 return dev; 3522 return dev;
2587
2588err_out_free:
2589 free_netdev(dev);
2590 return NULL;
2591} 3523}
2592 3524
2593void free_orinocodev(struct net_device *dev) 3525void free_orinocodev(struct net_device *dev)
2594{ 3526{
2595 struct orinoco_private *priv = netdev_priv(dev); 3527 struct orinoco_private *priv = netdev_priv(dev);
2596 3528
3529 /* No need to empty priv->rx_list: if the tasklet is scheduled
3530 * when we call tasklet_kill it will run one final time,
3531 * emptying the list */
3532 tasklet_kill(&priv->rx_tasklet);
3533 priv->wpa_ie_len = 0;
3534 kfree(priv->wpa_ie);
3535 orinoco_mic_free(priv);
2597 orinoco_bss_data_free(priv); 3536 orinoco_bss_data_free(priv);
2598 free_netdev(dev); 3537 free_netdev(dev);
2599} 3538}
@@ -2905,7 +3844,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2905 memset(range, 0, sizeof(struct iw_range)); 3844 memset(range, 0, sizeof(struct iw_range));
2906 3845
2907 range->we_version_compiled = WIRELESS_EXT; 3846 range->we_version_compiled = WIRELESS_EXT;
2908 range->we_version_source = 14; 3847 range->we_version_source = 22;
2909 3848
2910 /* Set available channels/frequencies */ 3849 /* Set available channels/frequencies */
2911 range->num_channels = NUM_CHANNELS; 3850 range->num_channels = NUM_CHANNELS;
@@ -2935,6 +3874,9 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2935 } 3874 }
2936 } 3875 }
2937 3876
3877 if (priv->has_wpa)
3878 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
3879
2938 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){ 3880 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
2939 /* Quality stats meaningless in ad-hoc mode */ 3881 /* Quality stats meaningless in ad-hoc mode */
2940 } else { 3882 } else {
@@ -2982,6 +3924,11 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2982 range->min_r_time = 0; 3924 range->min_r_time = 0;
2983 range->max_r_time = 65535 * 1000; /* ??? */ 3925 range->max_r_time = 65535 * 1000; /* ??? */
2984 3926
3927 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
3928 range->scan_capa = IW_SCAN_CAPA_ESSID;
3929 else
3930 range->scan_capa = IW_SCAN_CAPA_NONE;
3931
2985 /* Event capability (kernel) */ 3932 /* Event capability (kernel) */
2986 IW_EVENT_CAPA_SET_KERNEL(range->event_capa); 3933 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
2987 /* Event capability (driver) */ 3934 /* Event capability (driver) */
@@ -3001,7 +3948,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3001 struct orinoco_private *priv = netdev_priv(dev); 3948 struct orinoco_private *priv = netdev_priv(dev);
3002 int index = (erq->flags & IW_ENCODE_INDEX) - 1; 3949 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
3003 int setindex = priv->tx_key; 3950 int setindex = priv->tx_key;
3004 int enable = priv->wep_on; 3951 int encode_alg = priv->encode_alg;
3005 int restricted = priv->wep_restrict; 3952 int restricted = priv->wep_restrict;
3006 u16 xlen = 0; 3953 u16 xlen = 0;
3007 int err = -EINPROGRESS; /* Call commit handler */ 3954 int err = -EINPROGRESS; /* Call commit handler */
@@ -3022,6 +3969,10 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3022 if (orinoco_lock(priv, &flags) != 0) 3969 if (orinoco_lock(priv, &flags) != 0)
3023 return -EBUSY; 3970 return -EBUSY;
3024 3971
3972 /* Clear any TKIP key we have */
3973 if ((priv->has_wpa) && (priv->encode_alg == IW_ENCODE_ALG_TKIP))
3974 (void) orinoco_clear_tkip_key(priv, setindex);
3975
3025 if (erq->length > 0) { 3976 if (erq->length > 0) {
3026 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 3977 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
3027 index = priv->tx_key; 3978 index = priv->tx_key;
@@ -3035,9 +3986,9 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3035 xlen = 0; 3986 xlen = 0;
3036 3987
3037 /* Switch on WEP if off */ 3988 /* Switch on WEP if off */
3038 if ((!enable) && (xlen > 0)) { 3989 if ((encode_alg != IW_ENCODE_ALG_WEP) && (xlen > 0)) {
3039 setindex = index; 3990 setindex = index;
3040 enable = 1; 3991 encode_alg = IW_ENCODE_ALG_WEP;
3041 } 3992 }
3042 } else { 3993 } else {
3043 /* Important note : if the user do "iwconfig eth0 enc off", 3994 /* Important note : if the user do "iwconfig eth0 enc off",
@@ -3059,7 +4010,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3059 } 4010 }
3060 4011
3061 if (erq->flags & IW_ENCODE_DISABLED) 4012 if (erq->flags & IW_ENCODE_DISABLED)
3062 enable = 0; 4013 encode_alg = IW_ENCODE_ALG_NONE;
3063 if (erq->flags & IW_ENCODE_OPEN) 4014 if (erq->flags & IW_ENCODE_OPEN)
3064 restricted = 0; 4015 restricted = 0;
3065 if (erq->flags & IW_ENCODE_RESTRICTED) 4016 if (erq->flags & IW_ENCODE_RESTRICTED)
@@ -3074,14 +4025,15 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3074 priv->tx_key = setindex; 4025 priv->tx_key = setindex;
3075 4026
3076 /* Try fast key change if connected and only keys are changed */ 4027 /* Try fast key change if connected and only keys are changed */
3077 if (priv->wep_on && enable && (priv->wep_restrict == restricted) && 4028 if ((priv->encode_alg == encode_alg) &&
4029 (priv->wep_restrict == restricted) &&
3078 netif_carrier_ok(dev)) { 4030 netif_carrier_ok(dev)) {
3079 err = __orinoco_hw_setup_wepkeys(priv); 4031 err = __orinoco_hw_setup_wepkeys(priv);
3080 /* No need to commit if successful */ 4032 /* No need to commit if successful */
3081 goto out; 4033 goto out;
3082 } 4034 }
3083 4035
3084 priv->wep_on = enable; 4036 priv->encode_alg = encode_alg;
3085 priv->wep_restrict = restricted; 4037 priv->wep_restrict = restricted;
3086 4038
3087 out: 4039 out:
@@ -3110,7 +4062,7 @@ static int orinoco_ioctl_getiwencode(struct net_device *dev,
3110 index = priv->tx_key; 4062 index = priv->tx_key;
3111 4063
3112 erq->flags = 0; 4064 erq->flags = 0;
3113 if (! priv->wep_on) 4065 if (!priv->encode_alg)
3114 erq->flags |= IW_ENCODE_DISABLED; 4066 erq->flags |= IW_ENCODE_DISABLED;
3115 erq->flags |= index + 1; 4067 erq->flags |= index + 1;
3116 4068
@@ -3685,6 +4637,399 @@ static int orinoco_ioctl_getpower(struct net_device *dev,
3685 return err; 4637 return err;
3686} 4638}
3687 4639
4640static int orinoco_ioctl_set_encodeext(struct net_device *dev,
4641 struct iw_request_info *info,
4642 union iwreq_data *wrqu,
4643 char *extra)
4644{
4645 struct orinoco_private *priv = netdev_priv(dev);
4646 struct iw_point *encoding = &wrqu->encoding;
4647 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4648 int idx, alg = ext->alg, set_key = 1;
4649 unsigned long flags;
4650 int err = -EINVAL;
4651 u16 key_len;
4652
4653 if (orinoco_lock(priv, &flags) != 0)
4654 return -EBUSY;
4655
4656 /* Determine and validate the key index */
4657 idx = encoding->flags & IW_ENCODE_INDEX;
4658 if (idx) {
4659 if ((idx < 1) || (idx > WEP_KEYS))
4660 goto out;
4661 idx--;
4662 } else
4663 idx = priv->tx_key;
4664
4665 if (encoding->flags & IW_ENCODE_DISABLED)
4666 alg = IW_ENCODE_ALG_NONE;
4667
4668 if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
4669 /* Clear any TKIP TX key we had */
4670 (void) orinoco_clear_tkip_key(priv, priv->tx_key);
4671 }
4672
4673 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
4674 priv->tx_key = idx;
4675 set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
4676 (ext->key_len > 0)) ? 1 : 0;
4677 }
4678
4679 if (set_key) {
4680 /* Set the requested key first */
4681 switch (alg) {
4682 case IW_ENCODE_ALG_NONE:
4683 priv->encode_alg = alg;
4684 priv->keys[idx].len = 0;
4685 break;
4686
4687 case IW_ENCODE_ALG_WEP:
4688 if (ext->key_len > SMALL_KEY_SIZE)
4689 key_len = LARGE_KEY_SIZE;
4690 else if (ext->key_len > 0)
4691 key_len = SMALL_KEY_SIZE;
4692 else
4693 goto out;
4694
4695 priv->encode_alg = alg;
4696 priv->keys[idx].len = cpu_to_le16(key_len);
4697
4698 key_len = min(ext->key_len, key_len);
4699
4700 memset(priv->keys[idx].data, 0, ORINOCO_MAX_KEY_SIZE);
4701 memcpy(priv->keys[idx].data, ext->key, key_len);
4702 break;
4703
4704 case IW_ENCODE_ALG_TKIP:
4705 {
4706 hermes_t *hw = &priv->hw;
4707 u8 *tkip_iv = NULL;
4708
4709 if (!priv->has_wpa ||
4710 (ext->key_len > sizeof(priv->tkip_key[0])))
4711 goto out;
4712
4713 priv->encode_alg = alg;
4714 memset(&priv->tkip_key[idx], 0,
4715 sizeof(priv->tkip_key[idx]));
4716 memcpy(&priv->tkip_key[idx], ext->key, ext->key_len);
4717
4718 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
4719 tkip_iv = &ext->rx_seq[0];
4720
4721 err = __orinoco_hw_set_tkip_key(hw, idx,
4722 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
4723 (u8 *) &priv->tkip_key[idx],
4724 tkip_iv, NULL);
4725 if (err)
4726 printk(KERN_ERR "%s: Error %d setting TKIP key"
4727 "\n", dev->name, err);
4728
4729 goto out;
4730 }
4731 default:
4732 goto out;
4733 }
4734 }
4735 err = -EINPROGRESS;
4736 out:
4737 orinoco_unlock(priv, &flags);
4738
4739 return err;
4740}
4741
4742static int orinoco_ioctl_get_encodeext(struct net_device *dev,
4743 struct iw_request_info *info,
4744 union iwreq_data *wrqu,
4745 char *extra)
4746{
4747 struct orinoco_private *priv = netdev_priv(dev);
4748 struct iw_point *encoding = &wrqu->encoding;
4749 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4750 int idx, max_key_len;
4751 unsigned long flags;
4752 int err;
4753
4754 if (orinoco_lock(priv, &flags) != 0)
4755 return -EBUSY;
4756
4757 err = -EINVAL;
4758 max_key_len = encoding->length - sizeof(*ext);
4759 if (max_key_len < 0)
4760 goto out;
4761
4762 idx = encoding->flags & IW_ENCODE_INDEX;
4763 if (idx) {
4764 if ((idx < 1) || (idx > WEP_KEYS))
4765 goto out;
4766 idx--;
4767 } else
4768 idx = priv->tx_key;
4769
4770 encoding->flags = idx + 1;
4771 memset(ext, 0, sizeof(*ext));
4772
4773 ext->alg = priv->encode_alg;
4774 switch (priv->encode_alg) {
4775 case IW_ENCODE_ALG_NONE:
4776 ext->key_len = 0;
4777 encoding->flags |= IW_ENCODE_DISABLED;
4778 break;
4779 case IW_ENCODE_ALG_WEP:
4780 ext->key_len = min_t(u16, le16_to_cpu(priv->keys[idx].len),
4781 max_key_len);
4782 memcpy(ext->key, priv->keys[idx].data, ext->key_len);
4783 encoding->flags |= IW_ENCODE_ENABLED;
4784 break;
4785 case IW_ENCODE_ALG_TKIP:
4786 ext->key_len = min_t(u16, sizeof(struct orinoco_tkip_key),
4787 max_key_len);
4788 memcpy(ext->key, &priv->tkip_key[idx], ext->key_len);
4789 encoding->flags |= IW_ENCODE_ENABLED;
4790 break;
4791 }
4792
4793 err = 0;
4794 out:
4795 orinoco_unlock(priv, &flags);
4796
4797 return err;
4798}
4799
4800static int orinoco_ioctl_set_auth(struct net_device *dev,
4801 struct iw_request_info *info,
4802 union iwreq_data *wrqu, char *extra)
4803{
4804 struct orinoco_private *priv = netdev_priv(dev);
4805 hermes_t *hw = &priv->hw;
4806 struct iw_param *param = &wrqu->param;
4807 unsigned long flags;
4808 int ret = -EINPROGRESS;
4809
4810 if (orinoco_lock(priv, &flags) != 0)
4811 return -EBUSY;
4812
4813 switch (param->flags & IW_AUTH_INDEX) {
4814 case IW_AUTH_WPA_VERSION:
4815 case IW_AUTH_CIPHER_PAIRWISE:
4816 case IW_AUTH_CIPHER_GROUP:
4817 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
4818 case IW_AUTH_PRIVACY_INVOKED:
4819 case IW_AUTH_DROP_UNENCRYPTED:
4820 /*
4821 * orinoco does not use these parameters
4822 */
4823 break;
4824
4825 case IW_AUTH_KEY_MGMT:
4826 /* wl_lkm implies value 2 == PSK for Hermes I
4827 * which ties in with WEXT
4828 * no other hints tho :(
4829 */
4830 priv->key_mgmt = param->value;
4831 break;
4832
4833 case IW_AUTH_TKIP_COUNTERMEASURES:
4834 /* When countermeasures are enabled, shut down the
4835 * card; when disabled, re-enable the card. This must
4836 * take effect immediately.
4837 *
4838 * TODO: Make sure that the EAPOL message is getting
4839 * out before card disabled
4840 */
4841 if (param->value) {
4842 priv->tkip_cm_active = 1;
4843 ret = hermes_enable_port(hw, 0);
4844 } else {
4845 priv->tkip_cm_active = 0;
4846 ret = hermes_disable_port(hw, 0);
4847 }
4848 break;
4849
4850 case IW_AUTH_80211_AUTH_ALG:
4851 if (param->value & IW_AUTH_ALG_SHARED_KEY)
4852 priv->wep_restrict = 1;
4853 else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
4854 priv->wep_restrict = 0;
4855 else
4856 ret = -EINVAL;
4857 break;
4858
4859 case IW_AUTH_WPA_ENABLED:
4860 if (priv->has_wpa) {
4861 priv->wpa_enabled = param->value ? 1 : 0;
4862 } else {
4863 if (param->value)
4864 ret = -EOPNOTSUPP;
4865 /* else silently accept disable of WPA */
4866 priv->wpa_enabled = 0;
4867 }
4868 break;
4869
4870 default:
4871 ret = -EOPNOTSUPP;
4872 }
4873
4874 orinoco_unlock(priv, &flags);
4875 return ret;
4876}
4877
4878static int orinoco_ioctl_get_auth(struct net_device *dev,
4879 struct iw_request_info *info,
4880 union iwreq_data *wrqu, char *extra)
4881{
4882 struct orinoco_private *priv = netdev_priv(dev);
4883 struct iw_param *param = &wrqu->param;
4884 unsigned long flags;
4885 int ret = 0;
4886
4887 if (orinoco_lock(priv, &flags) != 0)
4888 return -EBUSY;
4889
4890 switch (param->flags & IW_AUTH_INDEX) {
4891 case IW_AUTH_KEY_MGMT:
4892 param->value = priv->key_mgmt;
4893 break;
4894
4895 case IW_AUTH_TKIP_COUNTERMEASURES:
4896 param->value = priv->tkip_cm_active;
4897 break;
4898
4899 case IW_AUTH_80211_AUTH_ALG:
4900 if (priv->wep_restrict)
4901 param->value = IW_AUTH_ALG_SHARED_KEY;
4902 else
4903 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
4904 break;
4905
4906 case IW_AUTH_WPA_ENABLED:
4907 param->value = priv->wpa_enabled;
4908 break;
4909
4910 default:
4911 ret = -EOPNOTSUPP;
4912 }
4913
4914 orinoco_unlock(priv, &flags);
4915 return ret;
4916}
4917
4918static int orinoco_ioctl_set_genie(struct net_device *dev,
4919 struct iw_request_info *info,
4920 union iwreq_data *wrqu, char *extra)
4921{
4922 struct orinoco_private *priv = netdev_priv(dev);
4923 u8 *buf;
4924 unsigned long flags;
4925 int err = 0;
4926
4927 if ((wrqu->data.length > MAX_WPA_IE_LEN) ||
4928 (wrqu->data.length && (extra == NULL)))
4929 return -EINVAL;
4930
4931 if (orinoco_lock(priv, &flags) != 0)
4932 return -EBUSY;
4933
4934 if (wrqu->data.length) {
4935 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
4936 if (buf == NULL) {
4937 err = -ENOMEM;
4938 goto out;
4939 }
4940
4941 memcpy(buf, extra, wrqu->data.length);
4942 kfree(priv->wpa_ie);
4943 priv->wpa_ie = buf;
4944 priv->wpa_ie_len = wrqu->data.length;
4945 } else {
4946 kfree(priv->wpa_ie);
4947 priv->wpa_ie = NULL;
4948 priv->wpa_ie_len = 0;
4949 }
4950
4951 if (priv->wpa_ie) {
4952 /* Looks like wl_lkm wants to check the auth alg, and
4953 * somehow pass it to the firmware.
4954 * Instead it just calls the key mgmt rid
4955 * - we do this in set auth.
4956 */
4957 }
4958
4959out:
4960 orinoco_unlock(priv, &flags);
4961 return err;
4962}
4963
4964static int orinoco_ioctl_get_genie(struct net_device *dev,
4965 struct iw_request_info *info,
4966 union iwreq_data *wrqu, char *extra)
4967{
4968 struct orinoco_private *priv = netdev_priv(dev);
4969 unsigned long flags;
4970 int err = 0;
4971
4972 if (orinoco_lock(priv, &flags) != 0)
4973 return -EBUSY;
4974
4975 if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
4976 wrqu->data.length = 0;
4977 goto out;
4978 }
4979
4980 if (wrqu->data.length < priv->wpa_ie_len) {
4981 err = -E2BIG;
4982 goto out;
4983 }
4984
4985 wrqu->data.length = priv->wpa_ie_len;
4986 memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
4987
4988out:
4989 orinoco_unlock(priv, &flags);
4990 return err;
4991}
4992
4993static int orinoco_ioctl_set_mlme(struct net_device *dev,
4994 struct iw_request_info *info,
4995 union iwreq_data *wrqu, char *extra)
4996{
4997 struct orinoco_private *priv = netdev_priv(dev);
4998 hermes_t *hw = &priv->hw;
4999 struct iw_mlme *mlme = (struct iw_mlme *)extra;
5000 unsigned long flags;
5001 int ret = 0;
5002
5003 if (orinoco_lock(priv, &flags) != 0)
5004 return -EBUSY;
5005
5006 switch (mlme->cmd) {
5007 case IW_MLME_DEAUTH:
5008 /* silently ignore */
5009 break;
5010
5011 case IW_MLME_DISASSOC:
5012 {
5013 struct {
5014 u8 addr[ETH_ALEN];
5015 __le16 reason_code;
5016 } __attribute__ ((packed)) buf;
5017
5018 memcpy(buf.addr, mlme->addr.sa_data, ETH_ALEN);
5019 buf.reason_code = cpu_to_le16(mlme->reason_code);
5020 ret = HERMES_WRITE_RECORD(hw, USER_BAP,
5021 HERMES_RID_CNFDISASSOCIATE,
5022 &buf);
5023 break;
5024 }
5025 default:
5026 ret = -EOPNOTSUPP;
5027 }
5028
5029 orinoco_unlock(priv, &flags);
5030 return ret;
5031}
5032
3688static int orinoco_ioctl_getretry(struct net_device *dev, 5033static int orinoco_ioctl_getretry(struct net_device *dev,
3689 struct iw_request_info *info, 5034 struct iw_request_info *info,
3690 struct iw_param *rrq, 5035 struct iw_param *rrq,
@@ -3943,7 +5288,7 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
3943 return err; 5288 return err;
3944} 5289}
3945 5290
3946/* Trigger a scan (look for other cells in the vicinity */ 5291/* Trigger a scan (look for other cells in the vicinity) */
3947static int orinoco_ioctl_setscan(struct net_device *dev, 5292static int orinoco_ioctl_setscan(struct net_device *dev,
3948 struct iw_request_info *info, 5293 struct iw_request_info *info,
3949 struct iw_param *srq, 5294 struct iw_param *srq,
@@ -3951,6 +5296,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
3951{ 5296{
3952 struct orinoco_private *priv = netdev_priv(dev); 5297 struct orinoco_private *priv = netdev_priv(dev);
3953 hermes_t *hw = &priv->hw; 5298 hermes_t *hw = &priv->hw;
5299 struct iw_scan_req *si = (struct iw_scan_req *) extra;
3954 int err = 0; 5300 int err = 0;
3955 unsigned long flags; 5301 unsigned long flags;
3956 5302
@@ -3982,7 +5328,6 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
3982 * we access scan variables in priv is critical. 5328 * we access scan variables in priv is critical.
3983 * o scan_inprogress : not touched by irq handler 5329 * o scan_inprogress : not touched by irq handler
3984 * o scan_mode : not touched by irq handler 5330 * o scan_mode : not touched by irq handler
3985 * o scan_len : synchronised with scan_result
3986 * Before modifying anything on those variables, please think hard ! 5331 * Before modifying anything on those variables, please think hard !
3987 * Jean II */ 5332 * Jean II */
3988 5333
@@ -4012,13 +5357,43 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
4012 } 5357 }
4013 break; 5358 break;
4014 case FIRMWARE_TYPE_AGERE: 5359 case FIRMWARE_TYPE_AGERE:
4015 err = hermes_write_wordrec(hw, USER_BAP, 5360 if (priv->scan_mode & IW_SCAN_THIS_ESSID) {
5361 struct hermes_idstring idbuf;
5362 size_t len = min(sizeof(idbuf.val),
5363 (size_t) si->essid_len);
5364 idbuf.len = cpu_to_le16(len);
5365 memcpy(idbuf.val, si->essid, len);
5366
5367 err = hermes_write_ltv(hw, USER_BAP,
5368 HERMES_RID_CNFSCANSSID_AGERE,
5369 HERMES_BYTES_TO_RECLEN(len + 2),
5370 &idbuf);
5371 } else
5372 err = hermes_write_wordrec(hw, USER_BAP,
4016 HERMES_RID_CNFSCANSSID_AGERE, 5373 HERMES_RID_CNFSCANSSID_AGERE,
4017 0); /* Any ESSID */ 5374 0); /* Any ESSID */
4018 if (err) 5375 if (err)
4019 break; 5376 break;
4020 5377
4021 err = hermes_inquire(hw, HERMES_INQ_SCAN); 5378 if (priv->has_ext_scan) {
5379 /* Clear scan results at the start of
5380 * an extended scan */
5381 orinoco_clear_scan_results(priv,
5382 msecs_to_jiffies(15000));
5383
5384 /* TODO: Is this available on older firmware?
5385 * Can we use it to scan specific channels
5386 * for IW_SCAN_THIS_FREQ? */
5387 err = hermes_write_wordrec(hw, USER_BAP,
5388 HERMES_RID_CNFSCANCHANNELS2GHZ,
5389 0x7FFF);
5390 if (err)
5391 goto out;
5392
5393 err = hermes_inquire(hw,
5394 HERMES_INQ_CHANNELINFO);
5395 } else
5396 err = hermes_inquire(hw, HERMES_INQ_SCAN);
4022 break; 5397 break;
4023 } 5398 }
4024 } else 5399 } else
@@ -4036,8 +5411,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
4036#define MAX_CUSTOM_LEN 64 5411#define MAX_CUSTOM_LEN 64
4037 5412
4038/* Translate scan data returned from the card to a card independant 5413/* Translate scan data returned from the card to a card independant
4039 * format that the Wireless Tools will understand - Jean II 5414 * format that the Wireless Tools will understand - Jean II */
4040 * Return message length or -errno for fatal errors */
4041static inline char *orinoco_translate_scan(struct net_device *dev, 5415static inline char *orinoco_translate_scan(struct net_device *dev,
4042 struct iw_request_info *info, 5416 struct iw_request_info *info,
4043 char *current_ev, 5417 char *current_ev,
@@ -4049,9 +5423,10 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4049 u16 capabilities; 5423 u16 capabilities;
4050 u16 channel; 5424 u16 channel;
4051 struct iw_event iwe; /* Temporary buffer */ 5425 struct iw_event iwe; /* Temporary buffer */
4052 char *p;
4053 char custom[MAX_CUSTOM_LEN]; 5426 char custom[MAX_CUSTOM_LEN];
4054 5427
5428 memset(&iwe, 0, sizeof(iwe));
5429
4055 /* First entry *MUST* be the AP MAC address */ 5430 /* First entry *MUST* be the AP MAC address */
4056 iwe.cmd = SIOCGIWAP; 5431 iwe.cmd = SIOCGIWAP;
4057 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 5432 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
@@ -4073,8 +5448,8 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4073 /* Add mode */ 5448 /* Add mode */
4074 iwe.cmd = SIOCGIWMODE; 5449 iwe.cmd = SIOCGIWMODE;
4075 capabilities = le16_to_cpu(bss->a.capabilities); 5450 capabilities = le16_to_cpu(bss->a.capabilities);
4076 if (capabilities & 0x3) { 5451 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
4077 if (capabilities & 0x1) 5452 if (capabilities & WLAN_CAPABILITY_ESS)
4078 iwe.u.mode = IW_MODE_MASTER; 5453 iwe.u.mode = IW_MODE_MASTER;
4079 else 5454 else
4080 iwe.u.mode = IW_MODE_ADHOC; 5455 iwe.u.mode = IW_MODE_ADHOC;
@@ -4084,17 +5459,22 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4084 5459
4085 channel = bss->s.channel; 5460 channel = bss->s.channel;
4086 if ((channel >= 1) && (channel <= NUM_CHANNELS)) { 5461 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
4087 /* Add frequency */ 5462 /* Add channel and frequency */
4088 iwe.cmd = SIOCGIWFREQ; 5463 iwe.cmd = SIOCGIWFREQ;
5464 iwe.u.freq.m = channel;
5465 iwe.u.freq.e = 0;
5466 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5467 &iwe, IW_EV_FREQ_LEN);
5468
4089 iwe.u.freq.m = channel_frequency[channel-1] * 100000; 5469 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
4090 iwe.u.freq.e = 1; 5470 iwe.u.freq.e = 1;
4091 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 5471 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4092 &iwe, IW_EV_FREQ_LEN); 5472 &iwe, IW_EV_FREQ_LEN);
4093 } 5473 }
4094 5474
4095 /* Add quality statistics */ 5475 /* Add quality statistics. level and noise in dB. No link quality */
4096 iwe.cmd = IWEVQUAL; 5476 iwe.cmd = IWEVQUAL;
4097 iwe.u.qual.updated = 0x10; /* no link quality */ 5477 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
4098 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95; 5478 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95;
4099 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95; 5479 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95;
4100 /* Wireless tools prior to 27.pre22 will show link quality 5480 /* Wireless tools prior to 27.pre22 will show link quality
@@ -4108,25 +5488,13 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4108 5488
4109 /* Add encryption capability */ 5489 /* Add encryption capability */
4110 iwe.cmd = SIOCGIWENCODE; 5490 iwe.cmd = SIOCGIWENCODE;
4111 if (capabilities & 0x10) 5491 if (capabilities & WLAN_CAPABILITY_PRIVACY)
4112 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 5492 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
4113 else 5493 else
4114 iwe.u.data.flags = IW_ENCODE_DISABLED; 5494 iwe.u.data.flags = IW_ENCODE_DISABLED;
4115 iwe.u.data.length = 0; 5495 iwe.u.data.length = 0;
4116 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 5496 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4117 &iwe, bss->a.essid); 5497 &iwe, NULL);
4118
4119 /* Add EXTRA: Age to display seconds since last beacon/probe response
4120 * for given network. */
4121 iwe.cmd = IWEVCUSTOM;
4122 p = custom;
4123 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
4124 " Last beacon: %dms ago",
4125 jiffies_to_msecs(jiffies - last_scanned));
4126 iwe.u.data.length = p - custom;
4127 if (iwe.u.data.length)
4128 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4129 &iwe, custom);
4130 5498
4131 /* Bit rate is not available in Lucent/Agere firmwares */ 5499 /* Bit rate is not available in Lucent/Agere firmwares */
4132 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) { 5500 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
@@ -4148,7 +5516,8 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4148 if (bss->p.rates[i] == 0x0) 5516 if (bss->p.rates[i] == 0x0)
4149 break; 5517 break;
4150 /* Bit rate given in 500 kb/s units (+ 0x80) */ 5518 /* Bit rate given in 500 kb/s units (+ 0x80) */
4151 iwe.u.bitrate.value = ((bss->p.rates[i] & 0x7f) * 500000); 5519 iwe.u.bitrate.value =
5520 ((bss->p.rates[i] & 0x7f) * 500000);
4152 current_val = iwe_stream_add_value(info, current_ev, 5521 current_val = iwe_stream_add_value(info, current_ev,
4153 current_val, 5522 current_val,
4154 end_buf, &iwe, 5523 end_buf, &iwe,
@@ -4159,6 +5528,199 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4159 current_ev = current_val; 5528 current_ev = current_val;
4160 } 5529 }
4161 5530
5531 /* Beacon interval */
5532 iwe.cmd = IWEVCUSTOM;
5533 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5534 "bcn_int=%d",
5535 le16_to_cpu(bss->a.beacon_interv));
5536 if (iwe.u.data.length)
5537 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5538 &iwe, custom);
5539
5540 /* Capabilites */
5541 iwe.cmd = IWEVCUSTOM;
5542 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5543 "capab=0x%04x",
5544 capabilities);
5545 if (iwe.u.data.length)
5546 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5547 &iwe, custom);
5548
5549 /* Add EXTRA: Age to display seconds since last beacon/probe response
5550 * for given network. */
5551 iwe.cmd = IWEVCUSTOM;
5552 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5553 " Last beacon: %dms ago",
5554 jiffies_to_msecs(jiffies - last_scanned));
5555 if (iwe.u.data.length)
5556 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5557 &iwe, custom);
5558
5559 return current_ev;
5560}
5561
5562static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5563 struct iw_request_info *info,
5564 char *current_ev,
5565 char *end_buf,
5566 struct agere_ext_scan_info *bss,
5567 unsigned int last_scanned)
5568{
5569 u16 capabilities;
5570 u16 channel;
5571 struct iw_event iwe; /* Temporary buffer */
5572 char custom[MAX_CUSTOM_LEN];
5573 u8 *ie;
5574
5575 memset(&iwe, 0, sizeof(iwe));
5576
5577 /* First entry *MUST* be the AP MAC address */
5578 iwe.cmd = SIOCGIWAP;
5579 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
5580 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
5581 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5582 &iwe, IW_EV_ADDR_LEN);
5583
5584 /* Other entries will be displayed in the order we give them */
5585
5586 /* Add the ESSID */
5587 ie = bss->data;
5588 iwe.u.data.length = ie[1];
5589 if (iwe.u.data.length) {
5590 if (iwe.u.data.length > 32)
5591 iwe.u.data.length = 32;
5592 iwe.cmd = SIOCGIWESSID;
5593 iwe.u.data.flags = 1;
5594 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5595 &iwe, &ie[2]);
5596 }
5597
5598 /* Add mode */
5599 capabilities = le16_to_cpu(bss->capabilities);
5600 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
5601 iwe.cmd = SIOCGIWMODE;
5602 if (capabilities & WLAN_CAPABILITY_ESS)
5603 iwe.u.mode = IW_MODE_MASTER;
5604 else
5605 iwe.u.mode = IW_MODE_ADHOC;
5606 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5607 &iwe, IW_EV_UINT_LEN);
5608 }
5609
5610 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_DS_SET);
5611 channel = ie ? ie[2] : 0;
5612 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
5613 /* Add channel and frequency */
5614 iwe.cmd = SIOCGIWFREQ;
5615 iwe.u.freq.m = channel;
5616 iwe.u.freq.e = 0;
5617 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5618 &iwe, IW_EV_FREQ_LEN);
5619
5620 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
5621 iwe.u.freq.e = 1;
5622 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5623 &iwe, IW_EV_FREQ_LEN);
5624 }
5625
5626 /* Add quality statistics. level and noise in dB. No link quality */
5627 iwe.cmd = IWEVQUAL;
5628 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
5629 iwe.u.qual.level = bss->level - 0x95;
5630 iwe.u.qual.noise = bss->noise - 0x95;
5631 /* Wireless tools prior to 27.pre22 will show link quality
5632 * anyway, so we provide a reasonable value. */
5633 if (iwe.u.qual.level > iwe.u.qual.noise)
5634 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
5635 else
5636 iwe.u.qual.qual = 0;
5637 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5638 &iwe, IW_EV_QUAL_LEN);
5639
5640 /* Add encryption capability */
5641 iwe.cmd = SIOCGIWENCODE;
5642 if (capabilities & WLAN_CAPABILITY_PRIVACY)
5643 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
5644 else
5645 iwe.u.data.flags = IW_ENCODE_DISABLED;
5646 iwe.u.data.length = 0;
5647 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5648 &iwe, NULL);
5649
5650 /* WPA IE */
5651 ie = orinoco_get_wpa_ie(bss->data, sizeof(bss->data));
5652 if (ie) {
5653 iwe.cmd = IWEVGENIE;
5654 iwe.u.data.length = ie[1] + 2;
5655 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5656 &iwe, ie);
5657 }
5658
5659 /* RSN IE */
5660 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RSN);
5661 if (ie) {
5662 iwe.cmd = IWEVGENIE;
5663 iwe.u.data.length = ie[1] + 2;
5664 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5665 &iwe, ie);
5666 }
5667
5668 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RATES);
5669 if (ie) {
5670 char *p = current_ev + iwe_stream_lcp_len(info);
5671 int i;
5672
5673 iwe.cmd = SIOCGIWRATE;
5674 /* Those two flags are ignored... */
5675 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
5676
5677 for (i = 2; i < (ie[1] + 2); i++) {
5678 iwe.u.bitrate.value = ((ie[i] & 0x7F) * 500000);
5679 p = iwe_stream_add_value(info, current_ev, p, end_buf,
5680 &iwe, IW_EV_PARAM_LEN);
5681 }
5682 /* Check if we added any event */
5683 if (p > (current_ev + iwe_stream_lcp_len(info)))
5684 current_ev = p;
5685 }
5686
5687 /* Timestamp */
5688 iwe.cmd = IWEVCUSTOM;
5689 iwe.u.data.length =
5690 snprintf(custom, MAX_CUSTOM_LEN, "tsf=%016llx",
5691 (unsigned long long) le64_to_cpu(bss->timestamp));
5692 if (iwe.u.data.length)
5693 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5694 &iwe, custom);
5695
5696 /* Beacon interval */
5697 iwe.cmd = IWEVCUSTOM;
5698 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5699 "bcn_int=%d",
5700 le16_to_cpu(bss->beacon_interval));
5701 if (iwe.u.data.length)
5702 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5703 &iwe, custom);
5704
5705 /* Capabilites */
5706 iwe.cmd = IWEVCUSTOM;
5707 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5708 "capab=0x%04x",
5709 capabilities);
5710 if (iwe.u.data.length)
5711 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5712 &iwe, custom);
5713
5714 /* Add EXTRA: Age to display seconds since last beacon/probe response
5715 * for given network. */
5716 iwe.cmd = IWEVCUSTOM;
5717 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5718 " Last beacon: %dms ago",
5719 jiffies_to_msecs(jiffies - last_scanned));
5720 if (iwe.u.data.length)
5721 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5722 &iwe, custom);
5723
4162 return current_ev; 5724 return current_ev;
4163} 5725}
4164 5726
@@ -4169,7 +5731,6 @@ static int orinoco_ioctl_getscan(struct net_device *dev,
4169 char *extra) 5731 char *extra)
4170{ 5732{
4171 struct orinoco_private *priv = netdev_priv(dev); 5733 struct orinoco_private *priv = netdev_priv(dev);
4172 bss_element *bss;
4173 int err = 0; 5734 int err = 0;
4174 unsigned long flags; 5735 unsigned long flags;
4175 char *current_ev = extra; 5736 char *current_ev = extra;
@@ -4189,18 +5750,47 @@ static int orinoco_ioctl_getscan(struct net_device *dev,
4189 goto out; 5750 goto out;
4190 } 5751 }
4191 5752
4192 list_for_each_entry(bss, &priv->bss_list, list) { 5753 if (priv->has_ext_scan) {
4193 /* Translate to WE format this entry */ 5754 struct xbss_element *bss;
4194 current_ev = orinoco_translate_scan(dev, info, current_ev, 5755
4195 extra + srq->length, 5756 list_for_each_entry(bss, &priv->bss_list, list) {
4196 &bss->bss, 5757 /* Translate this entry to WE format */
4197 bss->last_scanned); 5758 current_ev =
4198 5759 orinoco_translate_ext_scan(dev, info,
4199 /* Check if there is space for one more entry */ 5760 current_ev,
4200 if ((extra + srq->length - current_ev) <= IW_EV_ADDR_LEN) { 5761 extra + srq->length,
4201 /* Ask user space to try again with a bigger buffer */ 5762 &bss->bss,
4202 err = -E2BIG; 5763 bss->last_scanned);
4203 goto out; 5764
5765 /* Check if there is space for one more entry */
5766 if ((extra + srq->length - current_ev)
5767 <= IW_EV_ADDR_LEN) {
5768 /* Ask user space to try again with a
5769 * bigger buffer */
5770 err = -E2BIG;
5771 goto out;
5772 }
5773 }
5774
5775 } else {
5776 struct bss_element *bss;
5777
5778 list_for_each_entry(bss, &priv->bss_list, list) {
5779 /* Translate this entry to WE format */
5780 current_ev = orinoco_translate_scan(dev, info,
5781 current_ev,
5782 extra + srq->length,
5783 &bss->bss,
5784 bss->last_scanned);
5785
5786 /* Check if there is space for one more entry */
5787 if ((extra + srq->length - current_ev)
5788 <= IW_EV_ADDR_LEN) {
5789 /* Ask user space to try again with a
5790 * bigger buffer */
5791 err = -E2BIG;
5792 goto out;
5793 }
4204 } 5794 }
4205 } 5795 }
4206 5796
@@ -4291,39 +5881,48 @@ static const struct iw_priv_args orinoco_privtab[] = {
4291 * Structures to export the Wireless Handlers 5881 * Structures to export the Wireless Handlers
4292 */ 5882 */
4293 5883
5884#define STD_IW_HANDLER(id, func) \
5885 [IW_IOCTL_IDX(id)] = (iw_handler) func
4294static const iw_handler orinoco_handler[] = { 5886static const iw_handler orinoco_handler[] = {
4295 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit, 5887 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
4296 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname, 5888 STD_IW_HANDLER(SIOCGIWNAME, orinoco_ioctl_getname),
4297 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq, 5889 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
4298 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq, 5890 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
4299 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode, 5891 STD_IW_HANDLER(SIOCSIWMODE, orinoco_ioctl_setmode),
4300 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode, 5892 STD_IW_HANDLER(SIOCGIWMODE, orinoco_ioctl_getmode),
4301 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens, 5893 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
4302 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens, 5894 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
4303 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange, 5895 STD_IW_HANDLER(SIOCGIWRANGE, orinoco_ioctl_getiwrange),
4304 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, 5896 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
4305 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, 5897 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
4306 [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, 5898 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
4307 [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, 5899 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
4308 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap, 5900 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
4309 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap, 5901 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
4310 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan, 5902 STD_IW_HANDLER(SIOCSIWSCAN, orinoco_ioctl_setscan),
4311 [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan, 5903 STD_IW_HANDLER(SIOCGIWSCAN, orinoco_ioctl_getscan),
4312 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid, 5904 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
4313 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid, 5905 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
4314 [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick, 5906 STD_IW_HANDLER(SIOCSIWNICKN, orinoco_ioctl_setnick),
4315 [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick, 5907 STD_IW_HANDLER(SIOCGIWNICKN, orinoco_ioctl_getnick),
4316 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate, 5908 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
4317 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate, 5909 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
4318 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts, 5910 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
4319 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts, 5911 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
4320 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag, 5912 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
4321 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag, 5913 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
4322 [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry, 5914 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
4323 [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode, 5915 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
4324 [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode, 5916 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
4325 [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower, 5917 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
4326 [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower, 5918 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
5919 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
5920 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
5921 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
5922 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
5923 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
5924 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
5925 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
4327}; 5926};
4328 5927
4329 5928
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index c6b1858abde8..981570bd3b9d 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -9,6 +9,7 @@
9 9
10#define DRIVER_VERSION "0.15" 10#define DRIVER_VERSION "0.15"
11 11
12#include <linux/interrupt.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/wireless.h> 14#include <linux/wireless.h>
14#include <net/iw_handler.h> 15#include <net/iw_handler.h>
@@ -30,27 +31,57 @@ struct orinoco_key {
30 char data[ORINOCO_MAX_KEY_SIZE]; 31 char data[ORINOCO_MAX_KEY_SIZE];
31} __attribute__ ((packed)); 32} __attribute__ ((packed));
32 33
34#define TKIP_KEYLEN 16
35#define MIC_KEYLEN 8
36
37struct orinoco_tkip_key {
38 u8 tkip[TKIP_KEYLEN];
39 u8 tx_mic[MIC_KEYLEN];
40 u8 rx_mic[MIC_KEYLEN];
41};
42
33typedef enum { 43typedef enum {
34 FIRMWARE_TYPE_AGERE, 44 FIRMWARE_TYPE_AGERE,
35 FIRMWARE_TYPE_INTERSIL, 45 FIRMWARE_TYPE_INTERSIL,
36 FIRMWARE_TYPE_SYMBOL 46 FIRMWARE_TYPE_SYMBOL
37} fwtype_t; 47} fwtype_t;
38 48
39typedef struct { 49struct bss_element {
40 union hermes_scan_info bss; 50 union hermes_scan_info bss;
41 unsigned long last_scanned; 51 unsigned long last_scanned;
42 struct list_head list; 52 struct list_head list;
43} bss_element; 53};
54
55struct xbss_element {
56 struct agere_ext_scan_info bss;
57 unsigned long last_scanned;
58 struct list_head list;
59};
60
61struct hermes_rx_descriptor;
62
63struct orinoco_rx_data {
64 struct hermes_rx_descriptor *desc;
65 struct sk_buff *skb;
66 struct list_head list;
67};
44 68
45struct orinoco_private { 69struct orinoco_private {
46 void *card; /* Pointer to card dependent structure */ 70 void *card; /* Pointer to card dependent structure */
71 struct device *dev;
47 int (*hard_reset)(struct orinoco_private *); 72 int (*hard_reset)(struct orinoco_private *);
73 int (*stop_fw)(struct orinoco_private *, int);
48 74
49 /* Synchronisation stuff */ 75 /* Synchronisation stuff */
50 spinlock_t lock; 76 spinlock_t lock;
51 int hw_unavailable; 77 int hw_unavailable;
52 struct work_struct reset_work; 78 struct work_struct reset_work;
53 79
80 /* Interrupt tasklets */
81 struct tasklet_struct rx_tasklet;
82 struct list_head rx_list;
83 struct orinoco_rx_data *rx_data;
84
54 /* driver state */ 85 /* driver state */
55 int open; 86 int open;
56 u16 last_linkstatus; 87 u16 last_linkstatus;
@@ -83,13 +114,17 @@ struct orinoco_private {
83 unsigned int has_preamble:1; 114 unsigned int has_preamble:1;
84 unsigned int has_sensitivity:1; 115 unsigned int has_sensitivity:1;
85 unsigned int has_hostscan:1; 116 unsigned int has_hostscan:1;
117 unsigned int has_alt_txcntl:1;
118 unsigned int has_ext_scan:1;
119 unsigned int has_wpa:1;
120 unsigned int do_fw_download:1;
86 unsigned int broken_disableport:1; 121 unsigned int broken_disableport:1;
87 unsigned int broken_monitor:1; 122 unsigned int broken_monitor:1;
88 123
89 /* Configuration paramaters */ 124 /* Configuration paramaters */
90 u32 iw_mode; 125 u32 iw_mode;
91 int prefer_port3; 126 int prefer_port3;
92 u16 wep_on, wep_restrict, tx_key; 127 u16 encode_alg, wep_restrict, tx_key;
93 struct orinoco_key keys[ORINOCO_MAX_KEYS]; 128 struct orinoco_key keys[ORINOCO_MAX_KEYS];
94 int bitratemode; 129 int bitratemode;
95 char nick[IW_ESSID_MAX_SIZE+1]; 130 char nick[IW_ESSID_MAX_SIZE+1];
@@ -113,10 +148,22 @@ struct orinoco_private {
113 /* Scanning support */ 148 /* Scanning support */
114 struct list_head bss_list; 149 struct list_head bss_list;
115 struct list_head bss_free_list; 150 struct list_head bss_free_list;
116 bss_element *bss_data; 151 void *bss_xbss_data;
117 152
118 int scan_inprogress; /* Scan pending... */ 153 int scan_inprogress; /* Scan pending... */
119 u32 scan_mode; /* Type of scan done */ 154 u32 scan_mode; /* Type of scan done */
155
156 /* WPA support */
157 u8 *wpa_ie;
158 int wpa_ie_len;
159
160 struct orinoco_tkip_key tkip_key[ORINOCO_MAX_KEYS];
161 struct crypto_hash *rx_tfm_mic;
162 struct crypto_hash *tx_tfm_mic;
163
164 unsigned int wpa_enabled:1;
165 unsigned int tkip_cm_active:1;
166 unsigned int key_mgmt:3;
120}; 167};
121 168
122#ifdef ORINOCO_DEBUG 169#ifdef ORINOCO_DEBUG
@@ -130,8 +177,10 @@ extern int orinoco_debug;
130/* Exported prototypes */ 177/* Exported prototypes */
131/********************************************************************/ 178/********************************************************************/
132 179
133extern struct net_device *alloc_orinocodev(int sizeof_card, 180extern struct net_device *alloc_orinocodev(
134 int (*hard_reset)(struct orinoco_private *)); 181 int sizeof_card, struct device *device,
182 int (*hard_reset)(struct orinoco_private *),
183 int (*stop_fw)(struct orinoco_private *, int));
135extern void free_orinocodev(struct net_device *dev); 184extern void free_orinocodev(struct net_device *dev);
136extern int __orinoco_up(struct net_device *dev); 185extern int __orinoco_up(struct net_device *dev);
137extern int __orinoco_down(struct net_device *dev); 186extern int __orinoco_down(struct net_device *dev);
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 1c216e015f64..1ccf5a40cf06 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -109,7 +109,8 @@ orinoco_cs_probe(struct pcmcia_device *link)
109 struct orinoco_private *priv; 109 struct orinoco_private *priv;
110 struct orinoco_pccard *card; 110 struct orinoco_pccard *card;
111 111
112 dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset); 112 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
113 orinoco_cs_hard_reset, NULL);
113 if (! dev) 114 if (! dev)
114 return -ENOMEM; 115 return -ENOMEM;
115 priv = netdev_priv(dev); 116 priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index 35ec5fcf81a6..2fc86596302e 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -182,7 +182,8 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
182 } 182 }
183 183
184 /* Allocate network device */ 184 /* Allocate network device */
185 dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset); 185 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
186 orinoco_nortel_cor_reset, NULL);
186 if (!dev) { 187 if (!dev) {
187 printk(KERN_ERR PFX "Cannot allocate network device\n"); 188 printk(KERN_ERR PFX "Cannot allocate network device\n");
188 err = -ENOMEM; 189 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 2547d5dac0d3..4ebd638a073e 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -139,7 +139,8 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
139 } 139 }
140 140
141 /* Allocate network device */ 141 /* Allocate network device */
142 dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset); 142 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
143 orinoco_pci_cor_reset, NULL);
143 if (!dev) { 144 if (!dev) {
144 printk(KERN_ERR PFX "Cannot allocate network device\n"); 145 printk(KERN_ERR PFX "Cannot allocate network device\n");
145 err = -ENOMEM; 146 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 98fe165337d1..ef761857bb38 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -221,7 +221,8 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
221 } 221 }
222 222
223 /* Allocate network device */ 223 /* Allocate network device */
224 dev = alloc_orinocodev(sizeof(*card), orinoco_plx_cor_reset); 224 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
225 orinoco_plx_cor_reset, NULL);
225 if (!dev) { 226 if (!dev) {
226 printk(KERN_ERR PFX "Cannot allocate network device\n"); 227 printk(KERN_ERR PFX "Cannot allocate network device\n");
227 err = -ENOMEM; 228 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index df493185a4af..ede24ec309c0 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -124,7 +124,8 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
124 } 124 }
125 125
126 /* Allocate network device */ 126 /* Allocate network device */
127 dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset); 127 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
128 orinoco_tmd_cor_reset, NULL);
128 if (!dev) { 129 if (!dev) {
129 printk(KERN_ERR PFX "Cannot allocate network device\n"); 130 printk(KERN_ERR PFX "Cannot allocate network device\n");
130 err = -ENOMEM; 131 err = -ENOMEM;
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 4801a363507b..1d0704fe146f 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54_H 1#ifndef P54_H
2#define PRISM54_H 2#define P54_H
3 3
4/* 4/*
5 * Shared defines for all mac80211 Prism54 code 5 * Shared defines for all mac80211 Prism54 code
@@ -19,13 +19,24 @@ enum control_frame_types {
19 P54_CONTROL_TYPE_CHANNEL_CHANGE, 19 P54_CONTROL_TYPE_CHANNEL_CHANGE,
20 P54_CONTROL_TYPE_FREQDONE, 20 P54_CONTROL_TYPE_FREQDONE,
21 P54_CONTROL_TYPE_DCFINIT, 21 P54_CONTROL_TYPE_DCFINIT,
22 P54_CONTROL_TYPE_FREEQUEUE = 7, 22 P54_CONTROL_TYPE_ENCRYPTION,
23 P54_CONTROL_TYPE_TIM,
24 P54_CONTROL_TYPE_POWERMGT,
25 P54_CONTROL_TYPE_FREEQUEUE,
23 P54_CONTROL_TYPE_TXDONE, 26 P54_CONTROL_TYPE_TXDONE,
24 P54_CONTROL_TYPE_PING, 27 P54_CONTROL_TYPE_PING,
25 P54_CONTROL_TYPE_STAT_READBACK, 28 P54_CONTROL_TYPE_STAT_READBACK,
26 P54_CONTROL_TYPE_BBP, 29 P54_CONTROL_TYPE_BBP,
27 P54_CONTROL_TYPE_EEPROM_READBACK, 30 P54_CONTROL_TYPE_EEPROM_READBACK,
28 P54_CONTROL_TYPE_LED 31 P54_CONTROL_TYPE_LED,
32 P54_CONTROL_TYPE_GPIO,
33 P54_CONTROL_TYPE_TIMER,
34 P54_CONTROL_TYPE_MODULATION,
35 P54_CONTROL_TYPE_SYNTH_CONFIG,
36 P54_CONTROL_TYPE_DETECTOR_VALUE,
37 P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
38 P54_CONTROL_TYPE_CCE_QUIET,
39 P54_CONTROL_TYPE_PSM_STA_UNLOCK,
29}; 40};
30 41
31struct p54_control_hdr { 42struct p54_control_hdr {
@@ -38,11 +49,15 @@ struct p54_control_hdr {
38 u8 data[0]; 49 u8 data[0];
39} __attribute__ ((packed)); 50} __attribute__ ((packed));
40 51
41#define EEPROM_READBACK_LEN (sizeof(struct p54_control_hdr) + 4 /* p54_eeprom_lm86 */) 52#define EEPROM_READBACK_LEN 0x3fc
42#define MAX_RX_SIZE (IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct p54_control_hdr) + 20 /* length of struct p54_rx_hdr */ + 16 )
43 53
44#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 54#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000
45 55
56#define FW_FMAC 0x464d4143
57#define FW_LM86 0x4c4d3836
58#define FW_LM87 0x4c4d3837
59#define FW_LM20 0x4c4d3230
60
46struct p54_common { 61struct p54_common {
47 u32 rx_start; 62 u32 rx_start;
48 u32 rx_end; 63 u32 rx_end;
@@ -53,27 +68,43 @@ struct p54_common {
53 void (*stop)(struct ieee80211_hw *dev); 68 void (*stop)(struct ieee80211_hw *dev);
54 int mode; 69 int mode;
55 u16 seqno; 70 u16 seqno;
71 u16 rx_mtu;
72 u8 headroom;
73 u8 tailroom;
56 struct mutex conf_mutex; 74 struct mutex conf_mutex;
57 u8 mac_addr[ETH_ALEN]; 75 u8 mac_addr[ETH_ALEN];
58 u8 bssid[ETH_ALEN]; 76 u8 bssid[ETH_ALEN];
77 __le16 filter_type;
59 struct pda_iq_autocal_entry *iq_autocal; 78 struct pda_iq_autocal_entry *iq_autocal;
60 unsigned int iq_autocal_len; 79 unsigned int iq_autocal_len;
61 struct pda_channel_output_limit *output_limit; 80 struct pda_channel_output_limit *output_limit;
62 unsigned int output_limit_len; 81 unsigned int output_limit_len;
63 struct pda_pa_curve_data *curve_data; 82 struct pda_pa_curve_data *curve_data;
64 __le16 rxhw; 83 unsigned int filter_flags;
84 u16 rxhw;
65 u8 version; 85 u8 version;
86 u8 rx_antenna;
66 unsigned int tx_hdr_len; 87 unsigned int tx_hdr_len;
67 void *cached_vdcf; 88 void *cached_vdcf;
68 unsigned int fw_var; 89 unsigned int fw_var;
69 struct ieee80211_tx_queue_stats tx_stats[4]; 90 unsigned int fw_interface;
91 unsigned int output_power;
92 u32 tsf_low32;
93 u32 tsf_high32;
94 struct ieee80211_tx_queue_stats tx_stats[8];
95 struct ieee80211_low_level_stats stats;
96 struct timer_list stats_timer;
97 struct completion stats_comp;
98 void *cached_stats;
99 int noise;
100 void *eeprom;
101 struct completion eeprom_comp;
70}; 102};
71 103
72int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 104int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
73void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); 105int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
74int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len); 106int p54_read_eeprom(struct ieee80211_hw *dev);
75void p54_fill_eeprom_readback(struct p54_control_hdr *hdr);
76struct ieee80211_hw *p54_init_common(size_t priv_data_len); 107struct ieee80211_hw *p54_init_common(size_t priv_data_len);
77void p54_free_common(struct ieee80211_hw *dev); 108void p54_free_common(struct ieee80211_hw *dev);
78 109
79#endif /* PRISM54_H */ 110#endif /* P54_H */
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 29be3dc8ee09..bac58ed03e5c 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
27MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
28MODULE_ALIAS("prism54common"); 28MODULE_ALIAS("prism54common");
29 29
30static struct ieee80211_rate p54_rates[] = { 30static struct ieee80211_rate p54_bgrates[] = {
31 { .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 31 { .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
32 { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 32 { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
33 { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 33 { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -42,7 +42,7 @@ static struct ieee80211_rate p54_rates[] = {
42 { .bitrate = 540, .hw_value = 11, }, 42 { .bitrate = 540, .hw_value = 11, },
43}; 43};
44 44
45static struct ieee80211_channel p54_channels[] = { 45static struct ieee80211_channel p54_bgchannels[] = {
46 { .center_freq = 2412, .hw_value = 1, }, 46 { .center_freq = 2412, .hw_value = 1, },
47 { .center_freq = 2417, .hw_value = 2, }, 47 { .center_freq = 2417, .hw_value = 2, },
48 { .center_freq = 2422, .hw_value = 3, }, 48 { .center_freq = 2422, .hw_value = 3, },
@@ -60,14 +60,69 @@ static struct ieee80211_channel p54_channels[] = {
60}; 60};
61 61
62static struct ieee80211_supported_band band_2GHz = { 62static struct ieee80211_supported_band band_2GHz = {
63 .channels = p54_channels, 63 .channels = p54_bgchannels,
64 .n_channels = ARRAY_SIZE(p54_channels), 64 .n_channels = ARRAY_SIZE(p54_bgchannels),
65 .bitrates = p54_rates, 65 .bitrates = p54_bgrates,
66 .n_bitrates = ARRAY_SIZE(p54_rates), 66 .n_bitrates = ARRAY_SIZE(p54_bgrates),
67}; 67};
68 68
69static struct ieee80211_rate p54_arates[] = {
70 { .bitrate = 60, .hw_value = 4, },
71 { .bitrate = 90, .hw_value = 5, },
72 { .bitrate = 120, .hw_value = 6, },
73 { .bitrate = 180, .hw_value = 7, },
74 { .bitrate = 240, .hw_value = 8, },
75 { .bitrate = 360, .hw_value = 9, },
76 { .bitrate = 480, .hw_value = 10, },
77 { .bitrate = 540, .hw_value = 11, },
78};
79
80static struct ieee80211_channel p54_achannels[] = {
81 { .center_freq = 4920 },
82 { .center_freq = 4940 },
83 { .center_freq = 4960 },
84 { .center_freq = 4980 },
85 { .center_freq = 5040 },
86 { .center_freq = 5060 },
87 { .center_freq = 5080 },
88 { .center_freq = 5170 },
89 { .center_freq = 5180 },
90 { .center_freq = 5190 },
91 { .center_freq = 5200 },
92 { .center_freq = 5210 },
93 { .center_freq = 5220 },
94 { .center_freq = 5230 },
95 { .center_freq = 5240 },
96 { .center_freq = 5260 },
97 { .center_freq = 5280 },
98 { .center_freq = 5300 },
99 { .center_freq = 5320 },
100 { .center_freq = 5500 },
101 { .center_freq = 5520 },
102 { .center_freq = 5540 },
103 { .center_freq = 5560 },
104 { .center_freq = 5580 },
105 { .center_freq = 5600 },
106 { .center_freq = 5620 },
107 { .center_freq = 5640 },
108 { .center_freq = 5660 },
109 { .center_freq = 5680 },
110 { .center_freq = 5700 },
111 { .center_freq = 5745 },
112 { .center_freq = 5765 },
113 { .center_freq = 5785 },
114 { .center_freq = 5805 },
115 { .center_freq = 5825 },
116};
117
118static struct ieee80211_supported_band band_5GHz = {
119 .channels = p54_achannels,
120 .n_channels = ARRAY_SIZE(p54_achannels),
121 .bitrates = p54_arates,
122 .n_bitrates = ARRAY_SIZE(p54_arates),
123};
69 124
70void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) 125int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
71{ 126{
72 struct p54_common *priv = dev->priv; 127 struct p54_common *priv = dev->priv;
73 struct bootrec_exp_if *exp_if; 128 struct bootrec_exp_if *exp_if;
@@ -79,7 +134,7 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
79 int i; 134 int i;
80 135
81 if (priv->rx_start) 136 if (priv->rx_start)
82 return; 137 return 0;
83 138
84 while (data < end_data && *data) 139 while (data < end_data && *data)
85 data++; 140 data++;
@@ -94,7 +149,8 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
94 u32 code = le32_to_cpu(bootrec->code); 149 u32 code = le32_to_cpu(bootrec->code);
95 switch (code) { 150 switch (code) {
96 case BR_CODE_COMPONENT_ID: 151 case BR_CODE_COMPONENT_ID:
97 switch (be32_to_cpu(*(__be32 *)bootrec->data)) { 152 priv->fw_interface = be32_to_cpup(bootrec->data);
153 switch (priv->fw_interface) {
98 case FW_FMAC: 154 case FW_FMAC:
99 printk(KERN_INFO "p54: FreeMAC firmware\n"); 155 printk(KERN_INFO "p54: FreeMAC firmware\n");
100 break; 156 break;
@@ -105,7 +161,7 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
105 printk(KERN_INFO "p54: LM86 firmware\n"); 161 printk(KERN_INFO "p54: LM86 firmware\n");
106 break; 162 break;
107 case FW_LM87: 163 case FW_LM87:
108 printk(KERN_INFO "p54: LM87 firmware - not supported yet!\n"); 164 printk(KERN_INFO "p54: LM87 firmware\n");
109 break; 165 break;
110 default: 166 default:
111 printk(KERN_INFO "p54: unknown firmware\n"); 167 printk(KERN_INFO "p54: unknown firmware\n");
@@ -117,11 +173,22 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
117 if (strnlen((unsigned char*)bootrec->data, 24) < 24) 173 if (strnlen((unsigned char*)bootrec->data, 24) < 24)
118 fw_version = (unsigned char*)bootrec->data; 174 fw_version = (unsigned char*)bootrec->data;
119 break; 175 break;
120 case BR_CODE_DESCR: 176 case BR_CODE_DESCR: {
121 priv->rx_start = le32_to_cpu(((__le32 *)bootrec->data)[1]); 177 struct bootrec_desc *desc =
178 (struct bootrec_desc *)bootrec->data;
179 priv->rx_start = le32_to_cpu(desc->rx_start);
122 /* FIXME add sanity checking */ 180 /* FIXME add sanity checking */
123 priv->rx_end = le32_to_cpu(((__le32 *)bootrec->data)[2]) - 0x3500; 181 priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500;
182 priv->headroom = desc->headroom;
183 priv->tailroom = desc->tailroom;
184 if (bootrec->len == 11)
185 priv->rx_mtu = (size_t) le16_to_cpu(
186 (__le16)bootrec->data[10]);
187 else
188 priv->rx_mtu = (size_t)
189 0x620 - priv->tx_hdr_len;
124 break; 190 break;
191 }
125 case BR_CODE_EXPOSED_IF: 192 case BR_CODE_EXPOSED_IF:
126 exp_if = (struct bootrec_exp_if *) bootrec->data; 193 exp_if = (struct bootrec_exp_if *) bootrec->data;
127 for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) 194 for (i = 0; i < (len * sizeof(*exp_if) / 4); i++)
@@ -146,23 +213,25 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
146 213
147 if (priv->fw_var >= 0x300) { 214 if (priv->fw_var >= 0x300) {
148 /* Firmware supports QoS, use it! */ 215 /* Firmware supports QoS, use it! */
149 priv->tx_stats[0].limit = 3; 216 priv->tx_stats[4].limit = 3;
150 priv->tx_stats[1].limit = 4; 217 priv->tx_stats[5].limit = 4;
151 priv->tx_stats[2].limit = 3; 218 priv->tx_stats[6].limit = 3;
152 priv->tx_stats[3].limit = 1; 219 priv->tx_stats[7].limit = 1;
153 dev->queues = 4; 220 dev->queues = 4;
154 } 221 }
222
223 return 0;
155} 224}
156EXPORT_SYMBOL_GPL(p54_parse_firmware); 225EXPORT_SYMBOL_GPL(p54_parse_firmware);
157 226
158static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev, 227static int p54_convert_rev0(struct ieee80211_hw *dev,
159 struct pda_pa_curve_data *curve_data) 228 struct pda_pa_curve_data *curve_data)
160{ 229{
161 struct p54_common *priv = dev->priv; 230 struct p54_common *priv = dev->priv;
162 struct pda_pa_curve_data_sample_rev1 *rev1; 231 struct p54_pa_curve_data_sample *dst;
163 struct pda_pa_curve_data_sample_rev0 *rev0; 232 struct pda_pa_curve_data_sample_rev0 *src;
164 size_t cd_len = sizeof(*curve_data) + 233 size_t cd_len = sizeof(*curve_data) +
165 (curve_data->points_per_channel*sizeof(*rev1) + 2) * 234 (curve_data->points_per_channel*sizeof(*dst) + 2) *
166 curve_data->channels; 235 curve_data->channels;
167 unsigned int i, j; 236 unsigned int i, j;
168 void *source, *target; 237 void *source, *target;
@@ -180,27 +249,67 @@ static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev,
180 *((__le16 *)target) = *freq; 249 *((__le16 *)target) = *freq;
181 target += sizeof(__le16); 250 target += sizeof(__le16);
182 for (j = 0; j < curve_data->points_per_channel; j++) { 251 for (j = 0; j < curve_data->points_per_channel; j++) {
183 rev1 = target; 252 dst = target;
184 rev0 = source; 253 src = source;
185 254
186 rev1->rf_power = rev0->rf_power; 255 dst->rf_power = src->rf_power;
187 rev1->pa_detector = rev0->pa_detector; 256 dst->pa_detector = src->pa_detector;
188 rev1->data_64qam = rev0->pcv; 257 dst->data_64qam = src->pcv;
189 /* "invent" the points for the other modulations */ 258 /* "invent" the points for the other modulations */
190#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y) 259#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y)
191 rev1->data_16qam = SUB(rev0->pcv, 12); 260 dst->data_16qam = SUB(src->pcv, 12);
192 rev1->data_qpsk = SUB(rev1->data_16qam, 12); 261 dst->data_qpsk = SUB(dst->data_16qam, 12);
193 rev1->data_bpsk = SUB(rev1->data_qpsk, 12); 262 dst->data_bpsk = SUB(dst->data_qpsk, 12);
194 rev1->data_barker= SUB(rev1->data_bpsk, 14); 263 dst->data_barker = SUB(dst->data_bpsk, 14);
195#undef SUB 264#undef SUB
196 target += sizeof(*rev1); 265 target += sizeof(*dst);
197 source += sizeof(*rev0); 266 source += sizeof(*src);
198 } 267 }
199 } 268 }
200 269
201 return 0; 270 return 0;
202} 271}
203 272
273static int p54_convert_rev1(struct ieee80211_hw *dev,
274 struct pda_pa_curve_data *curve_data)
275{
276 struct p54_common *priv = dev->priv;
277 struct p54_pa_curve_data_sample *dst;
278 struct pda_pa_curve_data_sample_rev1 *src;
279 size_t cd_len = sizeof(*curve_data) +
280 (curve_data->points_per_channel*sizeof(*dst) + 2) *
281 curve_data->channels;
282 unsigned int i, j;
283 void *source, *target;
284
285 priv->curve_data = kmalloc(cd_len, GFP_KERNEL);
286 if (!priv->curve_data)
287 return -ENOMEM;
288
289 memcpy(priv->curve_data, curve_data, sizeof(*curve_data));
290 source = curve_data->data;
291 target = priv->curve_data->data;
292 for (i = 0; i < curve_data->channels; i++) {
293 __le16 *freq = source;
294 source += sizeof(__le16);
295 *((__le16 *)target) = *freq;
296 target += sizeof(__le16);
297 for (j = 0; j < curve_data->points_per_channel; j++) {
298 memcpy(target, source, sizeof(*src));
299
300 target += sizeof(*dst);
301 source += sizeof(*src);
302 }
303 source++;
304 }
305
306 return 0;
307}
308
309const char* p54_rf_chips[] = { "NULL", "Indigo?", "Duette",
310 "Frisbee", "Xbow", "Longbow" };
311static int p54_init_xbow_synth(struct ieee80211_hw *dev);
312
204int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 313int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
205{ 314{
206 struct p54_common *priv = dev->priv; 315 struct p54_common *priv = dev->priv;
@@ -210,6 +319,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
210 void *tmp; 319 void *tmp;
211 int err; 320 int err;
212 u8 *end = (u8 *)eeprom + len; 321 u8 *end = (u8 *)eeprom + len;
322 DECLARE_MAC_BUF(mac);
213 323
214 wrap = (struct eeprom_pda_wrap *) eeprom; 324 wrap = (struct eeprom_pda_wrap *) eeprom;
215 entry = (void *)wrap->data + le16_to_cpu(wrap->len); 325 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -250,27 +360,32 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
250 entry->data[1]*sizeof(*priv->output_limit)); 360 entry->data[1]*sizeof(*priv->output_limit));
251 priv->output_limit_len = entry->data[1]; 361 priv->output_limit_len = entry->data[1];
252 break; 362 break;
253 case PDR_PRISM_PA_CAL_CURVE_DATA: 363 case PDR_PRISM_PA_CAL_CURVE_DATA: {
254 if (data_len < sizeof(struct pda_pa_curve_data)) { 364 struct pda_pa_curve_data *curve_data =
365 (struct pda_pa_curve_data *)entry->data;
366 if (data_len < sizeof(*curve_data)) {
255 err = -EINVAL; 367 err = -EINVAL;
256 goto err; 368 goto err;
257 } 369 }
258 370
259 if (((struct pda_pa_curve_data *)entry->data)->cal_method_rev) { 371 switch (curve_data->cal_method_rev) {
260 priv->curve_data = kmalloc(data_len, GFP_KERNEL); 372 case 0:
261 if (!priv->curve_data) { 373 err = p54_convert_rev0(dev, curve_data);
262 err = -ENOMEM; 374 break;
263 goto err; 375 case 1:
264 } 376 err = p54_convert_rev1(dev, curve_data);
265 377 break;
266 memcpy(priv->curve_data, entry->data, data_len); 378 default:
267 } else { 379 printk(KERN_ERR "p54: unknown curve data "
268 err = p54_convert_rev0_to_rev1(dev, (struct pda_pa_curve_data *)entry->data); 380 "revision %d\n",
269 if (err) 381 curve_data->cal_method_rev);
270 goto err; 382 err = -ENODEV;
383 break;
271 } 384 }
385 if (err)
386 goto err;
272 387
273 break; 388 }
274 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 389 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
275 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 390 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
276 if (!priv->iq_autocal) { 391 if (!priv->iq_autocal) {
@@ -286,7 +401,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
286 while ((u8 *)tmp < entry->data + data_len) { 401 while ((u8 *)tmp < entry->data + data_len) {
287 struct bootrec_exp_if *exp_if = tmp; 402 struct bootrec_exp_if *exp_if = tmp;
288 if (le16_to_cpu(exp_if->if_id) == 0xF) 403 if (le16_to_cpu(exp_if->if_id) == 0xF)
289 priv->rxhw = exp_if->variant & cpu_to_le16(0x07); 404 priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07;
290 tmp += sizeof(struct bootrec_exp_if); 405 tmp += sizeof(struct bootrec_exp_if);
291 } 406 }
292 break; 407 break;
@@ -312,6 +427,37 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
312 goto err; 427 goto err;
313 } 428 }
314 429
430 switch (priv->rxhw) {
431 case 4: /* XBow */
432 p54_init_xbow_synth(dev);
433 case 1: /* Indigo? */
434 case 2: /* Duette */
435 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
436 case 3: /* Frisbee */
437 case 5: /* Longbow */
438 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
439 break;
440 default:
441 printk(KERN_ERR "%s: unsupported RF-Chip\n",
442 wiphy_name(dev->wiphy));
443 err = -EINVAL;
444 goto err;
445 }
446
447 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
448 u8 perm_addr[ETH_ALEN];
449
450 printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n",
451 wiphy_name(dev->wiphy));
452 random_ether_addr(perm_addr);
453 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
454 }
455
456 printk(KERN_INFO "%s: hwaddr %s, MAC:isl38%02x RF:%s\n",
457 wiphy_name(dev->wiphy),
458 print_mac(mac, dev->wiphy->perm_addr),
459 priv->version, p54_rf_chips[priv->rxhw]);
460
315 return 0; 461 return 0;
316 462
317 err: 463 err:
@@ -335,40 +481,54 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
335} 481}
336EXPORT_SYMBOL_GPL(p54_parse_eeprom); 482EXPORT_SYMBOL_GPL(p54_parse_eeprom);
337 483
338void p54_fill_eeprom_readback(struct p54_control_hdr *hdr) 484static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi)
339{ 485{
340 struct p54_eeprom_lm86 *eeprom_hdr; 486 /* TODO: get the rssi_add & rssi_mul data from the eeprom */
341 487 return ((rssi * 0x83) / 64 - 400) / 4;
342 hdr->magic1 = cpu_to_le16(0x8000);
343 hdr->len = cpu_to_le16(sizeof(*eeprom_hdr) + 0x2000);
344 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
345 hdr->retry1 = hdr->retry2 = 0;
346 eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
347 eeprom_hdr->offset = 0x0;
348 eeprom_hdr->len = cpu_to_le16(0x2000);
349} 488}
350EXPORT_SYMBOL_GPL(p54_fill_eeprom_readback);
351 489
352static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb) 490static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
353{ 491{
492 struct p54_common *priv = dev->priv;
354 struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data; 493 struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data;
355 struct ieee80211_rx_status rx_status = {0}; 494 struct ieee80211_rx_status rx_status = {0};
356 u16 freq = le16_to_cpu(hdr->freq); 495 u16 freq = le16_to_cpu(hdr->freq);
496 size_t header_len = sizeof(*hdr);
497 u32 tsf32;
498
499 if (!(hdr->magic & cpu_to_le16(0x0001))) {
500 if (priv->filter_flags & FIF_FCSFAIL)
501 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
502 else
503 return 0;
504 }
357 505
358 rx_status.signal = hdr->rssi; 506 rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi);
507 rx_status.noise = priv->noise;
359 /* XX correct? */ 508 /* XX correct? */
360 rx_status.qual = (100 * hdr->rssi) / 127; 509 rx_status.qual = (100 * hdr->rssi) / 127;
361 rx_status.rate_idx = hdr->rate & 0xf; 510 rx_status.rate_idx = hdr->rate & 0xf;
362 rx_status.freq = freq; 511 rx_status.freq = freq;
363 rx_status.band = IEEE80211_BAND_2GHZ; 512 rx_status.band = IEEE80211_BAND_2GHZ;
364 rx_status.antenna = hdr->antenna; 513 rx_status.antenna = hdr->antenna;
365 rx_status.mactime = le64_to_cpu(hdr->timestamp); 514
515 tsf32 = le32_to_cpu(hdr->tsf32);
516 if (tsf32 < priv->tsf_low32)
517 priv->tsf_high32++;
518 rx_status.mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
519 priv->tsf_low32 = tsf32;
520
366 rx_status.flag |= RX_FLAG_TSFT; 521 rx_status.flag |= RX_FLAG_TSFT;
367 522
368 skb_pull(skb, sizeof(*hdr)); 523 if (hdr->magic & cpu_to_le16(0x4000))
524 header_len += hdr->align[0];
525
526 skb_pull(skb, header_len);
369 skb_trim(skb, le16_to_cpu(hdr->len)); 527 skb_trim(skb, le16_to_cpu(hdr->len));
370 528
371 ieee80211_rx_irqsafe(dev, skb, &rx_status); 529 ieee80211_rx_irqsafe(dev, skb, &rx_status);
530
531 return -1;
372} 532}
373 533
374static void inline p54_wake_free_queues(struct ieee80211_hw *dev) 534static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
@@ -377,7 +537,7 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
377 int i; 537 int i;
378 538
379 for (i = 0; i < dev->queues; i++) 539 for (i = 0; i < dev->queues; i++)
380 if (priv->tx_stats[i].len < priv->tx_stats[i].limit) 540 if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit)
381 ieee80211_wake_queue(dev, i); 541 ieee80211_wake_queue(dev, i);
382} 542}
383 543
@@ -387,11 +547,13 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
387 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 547 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
388 struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data; 548 struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data;
389 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; 549 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
390 u32 addr = le32_to_cpu(hdr->req_id) - 0x70; 550 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
391 struct memrecord *range = NULL; 551 struct memrecord *range = NULL;
392 u32 freed = 0; 552 u32 freed = 0;
393 u32 last_addr = priv->rx_start; 553 u32 last_addr = priv->rx_start;
554 unsigned long flags;
394 555
556 spin_lock_irqsave(&priv->tx_queue.lock, flags);
395 while (entry != (struct sk_buff *)&priv->tx_queue) { 557 while (entry != (struct sk_buff *)&priv->tx_queue) {
396 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 558 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
397 range = (void *)info->driver_data; 559 range = (void *)info->driver_data;
@@ -412,13 +574,15 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
412 574
413 last_addr = range->end_addr; 575 last_addr = range->end_addr;
414 __skb_unlink(entry, &priv->tx_queue); 576 __skb_unlink(entry, &priv->tx_queue);
577 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
578
415 memset(&info->status, 0, sizeof(info->status)); 579 memset(&info->status, 0, sizeof(info->status));
416 entry_hdr = (struct p54_control_hdr *) entry->data; 580 entry_hdr = (struct p54_control_hdr *) entry->data;
417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 581 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 582 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
419 pad = entry_data->align[0]; 583 pad = entry_data->align[0];
420 584
421 priv->tx_stats[entry_data->hw_queue - 4].len--; 585 priv->tx_stats[entry_data->hw_queue].len--;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 586 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 if (!(payload->status & 0x01)) 587 if (!(payload->status & 0x01))
424 info->flags |= IEEE80211_TX_STAT_ACK; 588 info->flags |= IEEE80211_TX_STAT_ACK;
@@ -426,21 +590,60 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
426 info->status.excessive_retries = 1; 590 info->status.excessive_retries = 1;
427 } 591 }
428 info->status.retry_count = payload->retries - 1; 592 info->status.retry_count = payload->retries - 1;
429 info->status.ack_signal = le16_to_cpu(payload->ack_rssi); 593 info->status.ack_signal = p54_rssi_to_dbm(dev,
594 le16_to_cpu(payload->ack_rssi));
430 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 595 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
431 ieee80211_tx_status_irqsafe(dev, entry); 596 ieee80211_tx_status_irqsafe(dev, entry);
432 break; 597 goto out;
433 } else 598 } else
434 last_addr = range->end_addr; 599 last_addr = range->end_addr;
435 entry = entry->next; 600 entry = entry->next;
436 } 601 }
602 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
437 603
604out:
438 if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 605 if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
439 sizeof(struct p54_control_hdr)) 606 sizeof(struct p54_control_hdr))
440 p54_wake_free_queues(dev); 607 p54_wake_free_queues(dev);
441} 608}
442 609
443static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb) 610static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
611 struct sk_buff *skb)
612{
613 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
614 struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
615 struct p54_common *priv = dev->priv;
616
617 if (!priv->eeprom)
618 return ;
619
620 memcpy(priv->eeprom, eeprom->data, eeprom->len);
621
622 complete(&priv->eeprom_comp);
623}
624
625static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
626{
627 struct p54_common *priv = dev->priv;
628 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
629 struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
630 u32 tsf32 = le32_to_cpu(stats->tsf32);
631
632 if (tsf32 < priv->tsf_low32)
633 priv->tsf_high32++;
634 priv->tsf_low32 = tsf32;
635
636 priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
637 priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
638 priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
639
640 priv->noise = p54_rssi_to_dbm(dev, le32_to_cpu(stats->noise));
641 complete(&priv->stats_comp);
642
643 mod_timer(&priv->stats_timer, jiffies + 5 * HZ);
644}
645
646static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
444{ 647{
445 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 648 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
446 649
@@ -450,36 +653,30 @@ static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
450 break; 653 break;
451 case P54_CONTROL_TYPE_BBP: 654 case P54_CONTROL_TYPE_BBP:
452 break; 655 break;
656 case P54_CONTROL_TYPE_STAT_READBACK:
657 p54_rx_stats(dev, skb);
658 break;
659 case P54_CONTROL_TYPE_EEPROM_READBACK:
660 p54_rx_eeprom_readback(dev, skb);
661 break;
453 default: 662 default:
454 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", 663 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n",
455 wiphy_name(dev->wiphy), le16_to_cpu(hdr->type)); 664 wiphy_name(dev->wiphy), le16_to_cpu(hdr->type));
456 break; 665 break;
457 } 666 }
667
668 return 0;
458} 669}
459 670
460/* returns zero if skb can be reused */ 671/* returns zero if skb can be reused */
461int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) 672int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
462{ 673{
463 u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8; 674 u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8;
464 switch (type) { 675
465 case 0x00: 676 if (type == 0x80)
466 case 0x01: 677 return p54_rx_control(dev, skb);
467 p54_rx_data(dev, skb); 678 else
468 return -1; 679 return p54_rx_data(dev, skb);
469 case 0x4d:
470 /* TODO: do something better... but then again, I've never seen this happen */
471 printk(KERN_ERR "%s: Received fault. Probably need to restart hardware now..\n",
472 wiphy_name(dev->wiphy));
473 break;
474 case 0x80:
475 p54_rx_control(dev, skb);
476 break;
477 default:
478 printk(KERN_ERR "%s: unknown frame RXed (0x%02x)\n",
479 wiphy_name(dev->wiphy), type);
480 break;
481 }
482 return 0;
483} 680}
484EXPORT_SYMBOL_GPL(p54_rx); 681EXPORT_SYMBOL_GPL(p54_rx);
485 682
@@ -503,7 +700,7 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
503 u32 target_addr = priv->rx_start; 700 u32 target_addr = priv->rx_start;
504 unsigned long flags; 701 unsigned long flags;
505 unsigned int left; 702 unsigned int left;
506 len = (len + 0x170 + 3) & ~0x3; /* 0x70 headroom, 0x100 tailroom */ 703 len = (len + priv->headroom + priv->tailroom + 3) & ~0x3;
507 704
508 spin_lock_irqsave(&priv->tx_queue.lock, flags); 705 spin_lock_irqsave(&priv->tx_queue.lock, flags);
509 left = skb_queue_len(&priv->tx_queue); 706 left = skb_queue_len(&priv->tx_queue);
@@ -538,14 +735,73 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
538 range->start_addr = target_addr; 735 range->start_addr = target_addr;
539 range->end_addr = target_addr + len; 736 range->end_addr = target_addr + len;
540 __skb_queue_after(&priv->tx_queue, target_skb, skb); 737 __skb_queue_after(&priv->tx_queue, target_skb, skb);
541 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 738 if (largest_hole < priv->rx_mtu + priv->headroom +
739 priv->tailroom +
542 sizeof(struct p54_control_hdr)) 740 sizeof(struct p54_control_hdr))
543 ieee80211_stop_queues(dev); 741 ieee80211_stop_queues(dev);
544 } 742 }
545 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 743 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
546 744
547 data->req_id = cpu_to_le32(target_addr + 0x70); 745 data->req_id = cpu_to_le32(target_addr + priv->headroom);
746}
747
748int p54_read_eeprom(struct ieee80211_hw *dev)
749{
750 struct p54_common *priv = dev->priv;
751 struct p54_control_hdr *hdr = NULL;
752 struct p54_eeprom_lm86 *eeprom_hdr;
753 size_t eeprom_size = 0x2020, offset = 0, blocksize;
754 int ret = -ENOMEM;
755 void *eeprom = NULL;
756
757 hdr = (struct p54_control_hdr *)kzalloc(sizeof(*hdr) +
758 sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN, GFP_KERNEL);
759 if (!hdr)
760 goto free;
761
762 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
763 if (!priv->eeprom)
764 goto free;
765
766 eeprom = kzalloc(eeprom_size, GFP_KERNEL);
767 if (!eeprom)
768 goto free;
769
770 hdr->magic1 = cpu_to_le16(0x8000);
771 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
772 hdr->retry1 = hdr->retry2 = 0;
773 eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
774
775 while (eeprom_size) {
776 blocksize = min(eeprom_size, (size_t)EEPROM_READBACK_LEN);
777 hdr->len = cpu_to_le16(blocksize + sizeof(*eeprom_hdr));
778 eeprom_hdr->offset = cpu_to_le16(offset);
779 eeprom_hdr->len = cpu_to_le16(blocksize);
780 p54_assign_address(dev, NULL, hdr, hdr->len + sizeof(*hdr));
781 priv->tx(dev, hdr, hdr->len + sizeof(*hdr), 0);
782
783 if (!wait_for_completion_interruptible_timeout(&priv->eeprom_comp, HZ)) {
784 printk(KERN_ERR "%s: device does not respond!\n",
785 wiphy_name(dev->wiphy));
786 ret = -EBUSY;
787 goto free;
788 }
789
790 memcpy(eeprom + offset, priv->eeprom, blocksize);
791 offset += blocksize;
792 eeprom_size -= blocksize;
793 }
794
795 ret = p54_parse_eeprom(dev, eeprom, offset);
796free:
797 kfree(priv->eeprom);
798 priv->eeprom = NULL;
799 kfree(hdr);
800 kfree(eeprom);
801
802 return ret;
548} 803}
804EXPORT_SYMBOL_GPL(p54_read_eeprom);
549 805
550static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 806static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
551{ 807{
@@ -559,7 +815,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
559 u8 rate; 815 u8 rate;
560 u8 cts_rate = 0x20; 816 u8 cts_rate = 0x20;
561 817
562 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; 818 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb) + 4];
563 if (unlikely(current_queue->len > current_queue->limit)) 819 if (unlikely(current_queue->len > current_queue->limit))
564 return NETDEV_TX_BUSY; 820 return NETDEV_TX_BUSY;
565 current_queue->len++; 821 current_queue->len++;
@@ -601,7 +857,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
601 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; 857 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
602 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? 858 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
603 2 : info->antenna_sel_tx - 1; 859 2 : info->antenna_sel_tx - 1;
604 txhdr->output_power = 0x7f; // HW Maximum 860 txhdr->output_power = priv->output_power;
605 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 861 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
606 0 : cts_rate; 862 0 : cts_rate;
607 if (padding) 863 if (padding)
@@ -628,12 +884,12 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
628} 884}
629 885
630static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type, 886static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
631 const u8 *dst, const u8 *src, u8 antenna, 887 const u8 *bssid)
632 u32 magic3, u32 magic8, u32 magic9)
633{ 888{
634 struct p54_common *priv = dev->priv; 889 struct p54_common *priv = dev->priv;
635 struct p54_control_hdr *hdr; 890 struct p54_control_hdr *hdr;
636 struct p54_tx_control_filter *filter; 891 struct p54_tx_control_filter *filter;
892 size_t data_len;
637 893
638 hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) + 894 hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) +
639 priv->tx_hdr_len, GFP_ATOMIC); 895 priv->tx_hdr_len, GFP_ATOMIC);
@@ -644,25 +900,35 @@ static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
644 900
645 filter = (struct p54_tx_control_filter *) hdr->data; 901 filter = (struct p54_tx_control_filter *) hdr->data;
646 hdr->magic1 = cpu_to_le16(0x8001); 902 hdr->magic1 = cpu_to_le16(0x8001);
647 hdr->len = cpu_to_le16(sizeof(*filter));
648 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter));
649 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); 903 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET);
650 904
651 filter->filter_type = cpu_to_le16(filter_type); 905 priv->filter_type = filter->filter_type = cpu_to_le16(filter_type);
652 memcpy(filter->dst, dst, ETH_ALEN); 906 memcpy(filter->mac_addr, priv->mac_addr, ETH_ALEN);
653 if (!src) 907 if (!bssid)
654 memset(filter->src, ~0, ETH_ALEN); 908 memset(filter->bssid, ~0, ETH_ALEN);
655 else 909 else
656 memcpy(filter->src, src, ETH_ALEN); 910 memcpy(filter->bssid, bssid, ETH_ALEN);
657 filter->antenna = antenna; 911
658 filter->magic3 = cpu_to_le32(magic3); 912 filter->rx_antenna = priv->rx_antenna;
659 filter->rx_addr = cpu_to_le32(priv->rx_end); 913
660 filter->max_rx = cpu_to_le16(0x0620); /* FIXME: for usb ver 1.. maybe */ 914 if (priv->fw_var < 0x500) {
661 filter->rxhw = priv->rxhw; 915 data_len = P54_TX_CONTROL_FILTER_V1_LEN;
662 filter->magic8 = cpu_to_le16(magic8); 916 filter->v1.basic_rate_mask = cpu_to_le32(0x15F);
663 filter->magic9 = cpu_to_le16(magic9); 917 filter->v1.rx_addr = cpu_to_le32(priv->rx_end);
664 918 filter->v1.max_rx = cpu_to_le16(priv->rx_mtu);
665 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*filter), 1); 919 filter->v1.rxhw = cpu_to_le16(priv->rxhw);
920 filter->v1.wakeup_timer = cpu_to_le16(500);
921 } else {
922 data_len = P54_TX_CONTROL_FILTER_V2_LEN;
923 filter->v2.rx_addr = cpu_to_le32(priv->rx_end);
924 filter->v2.max_rx = cpu_to_le16(priv->rx_mtu);
925 filter->v2.rxhw = cpu_to_le16(priv->rxhw);
926 filter->v2.timer = cpu_to_le16(1000);
927 }
928
929 hdr->len = cpu_to_le16(data_len);
930 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
931 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
666 return 0; 932 return 0;
667} 933}
668 934
@@ -672,12 +938,10 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
672 struct p54_control_hdr *hdr; 938 struct p54_control_hdr *hdr;
673 struct p54_tx_control_channel *chan; 939 struct p54_tx_control_channel *chan;
674 unsigned int i; 940 unsigned int i;
675 size_t payload_len = sizeof(*chan) + sizeof(u32)*2 + 941 size_t data_len;
676 sizeof(*chan->curve_data) *
677 priv->curve_data->points_per_channel;
678 void *entry; 942 void *entry;
679 943
680 hdr = kzalloc(sizeof(*hdr) + payload_len + 944 hdr = kzalloc(sizeof(*hdr) + sizeof(*chan) +
681 priv->tx_hdr_len, GFP_KERNEL); 945 priv->tx_hdr_len, GFP_KERNEL);
682 if (!hdr) 946 if (!hdr)
683 return -ENOMEM; 947 return -ENOMEM;
@@ -687,12 +951,11 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
687 chan = (struct p54_tx_control_channel *) hdr->data; 951 chan = (struct p54_tx_control_channel *) hdr->data;
688 952
689 hdr->magic1 = cpu_to_le16(0x8001); 953 hdr->magic1 = cpu_to_le16(0x8001);
690 hdr->len = cpu_to_le16(sizeof(*chan)); 954
691 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); 955 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
692 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len);
693 956
694 chan->magic1 = cpu_to_le16(0x1); 957 chan->flags = cpu_to_le16(0x1);
695 chan->magic2 = cpu_to_le16(0x0); 958 chan->dwell = cpu_to_le16(0x0);
696 959
697 for (i = 0; i < priv->iq_autocal_len; i++) { 960 for (i = 0; i < priv->iq_autocal_len; i++) {
698 if (priv->iq_autocal[i].freq != freq) 961 if (priv->iq_autocal[i].freq != freq)
@@ -710,35 +973,51 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
710 continue; 973 continue;
711 974
712 chan->val_barker = 0x38; 975 chan->val_barker = 0x38;
713 chan->val_bpsk = priv->output_limit[i].val_bpsk; 976 chan->val_bpsk = chan->dup_bpsk =
714 chan->val_qpsk = priv->output_limit[i].val_qpsk; 977 priv->output_limit[i].val_bpsk;
715 chan->val_16qam = priv->output_limit[i].val_16qam; 978 chan->val_qpsk = chan->dup_qpsk =
716 chan->val_64qam = priv->output_limit[i].val_64qam; 979 priv->output_limit[i].val_qpsk;
980 chan->val_16qam = chan->dup_16qam =
981 priv->output_limit[i].val_16qam;
982 chan->val_64qam = chan->dup_64qam =
983 priv->output_limit[i].val_64qam;
717 break; 984 break;
718 } 985 }
719 if (i == priv->output_limit_len) 986 if (i == priv->output_limit_len)
720 goto err; 987 goto err;
721 988
722 chan->pa_points_per_curve = priv->curve_data->points_per_channel;
723
724 entry = priv->curve_data->data; 989 entry = priv->curve_data->data;
725 for (i = 0; i < priv->curve_data->channels; i++) { 990 for (i = 0; i < priv->curve_data->channels; i++) {
726 if (*((__le16 *)entry) != freq) { 991 if (*((__le16 *)entry) != freq) {
727 entry += sizeof(__le16); 992 entry += sizeof(__le16);
728 entry += sizeof(struct pda_pa_curve_data_sample_rev1) * 993 entry += sizeof(struct p54_pa_curve_data_sample) *
729 chan->pa_points_per_curve; 994 priv->curve_data->points_per_channel;
730 continue; 995 continue;
731 } 996 }
732 997
733 entry += sizeof(__le16); 998 entry += sizeof(__le16);
999 chan->pa_points_per_curve =
1000 min(priv->curve_data->points_per_channel, (u8) 8);
1001
734 memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) * 1002 memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) *
735 chan->pa_points_per_curve); 1003 chan->pa_points_per_curve);
736 break; 1004 break;
737 } 1005 }
738 1006
739 memcpy(hdr->data + payload_len - 4, &chan->val_bpsk, 4); 1007 if (priv->fw_var < 0x500) {
1008 data_len = P54_TX_CONTROL_CHANNEL_V1_LEN;
1009 chan->v1.rssical_mul = cpu_to_le16(130);
1010 chan->v1.rssical_add = cpu_to_le16(0xfe70);
1011 } else {
1012 data_len = P54_TX_CONTROL_CHANNEL_V2_LEN;
1013 chan->v2.rssical_mul = cpu_to_le16(130);
1014 chan->v2.rssical_add = cpu_to_le16(0xfe70);
1015 chan->v2.basic_rate_mask = cpu_to_le32(0x15f);
1016 }
740 1017
741 priv->tx(dev, hdr, sizeof(*hdr) + payload_len, 1); 1018 hdr->len = cpu_to_le16(data_len);
1019 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
1020 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
742 return 0; 1021 return 0;
743 1022
744 err: 1023 err:
@@ -846,12 +1125,25 @@ static int p54_start(struct ieee80211_hw *dev)
846 return -ENOMEM; 1125 return -ENOMEM;
847 } 1126 }
848 1127
1128 if (!priv->cached_stats) {
1129 priv->cached_stats = kzalloc(sizeof(struct p54_statistics) +
1130 priv->tx_hdr_len + sizeof(struct p54_control_hdr),
1131 GFP_KERNEL);
1132
1133 if (!priv->cached_stats) {
1134 kfree(priv->cached_vdcf);
1135 priv->cached_vdcf = NULL;
1136 return -ENOMEM;
1137 }
1138 }
1139
849 err = priv->open(dev); 1140 err = priv->open(dev);
850 if (!err) 1141 if (!err)
851 priv->mode = IEEE80211_IF_TYPE_MNTR; 1142 priv->mode = NL80211_IFTYPE_MONITOR;
852 1143
853 p54_init_vdcf(dev); 1144 p54_init_vdcf(dev);
854 1145
1146 mod_timer(&priv->stats_timer, jiffies + HZ);
855 return err; 1147 return err;
856} 1148}
857 1149
@@ -859,10 +1151,13 @@ static void p54_stop(struct ieee80211_hw *dev)
859{ 1151{
860 struct p54_common *priv = dev->priv; 1152 struct p54_common *priv = dev->priv;
861 struct sk_buff *skb; 1153 struct sk_buff *skb;
1154
1155 del_timer(&priv->stats_timer);
862 while ((skb = skb_dequeue(&priv->tx_queue))) 1156 while ((skb = skb_dequeue(&priv->tx_queue)))
863 kfree_skb(skb); 1157 kfree_skb(skb);
864 priv->stop(dev); 1158 priv->stop(dev);
865 priv->mode = IEEE80211_IF_TYPE_INVALID; 1159 priv->tsf_high32 = priv->tsf_low32 = 0;
1160 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
866} 1161}
867 1162
868static int p54_add_interface(struct ieee80211_hw *dev, 1163static int p54_add_interface(struct ieee80211_hw *dev,
@@ -870,11 +1165,11 @@ static int p54_add_interface(struct ieee80211_hw *dev,
870{ 1165{
871 struct p54_common *priv = dev->priv; 1166 struct p54_common *priv = dev->priv;
872 1167
873 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 1168 if (priv->mode != NL80211_IFTYPE_MONITOR)
874 return -EOPNOTSUPP; 1169 return -EOPNOTSUPP;
875 1170
876 switch (conf->type) { 1171 switch (conf->type) {
877 case IEEE80211_IF_TYPE_STA: 1172 case NL80211_IFTYPE_STATION:
878 priv->mode = conf->type; 1173 priv->mode = conf->type;
879 break; 1174 break;
880 default: 1175 default:
@@ -883,12 +1178,11 @@ static int p54_add_interface(struct ieee80211_hw *dev,
883 1178
884 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 1179 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
885 1180
886 p54_set_filter(dev, 0, priv->mac_addr, NULL, 0, 1, 0, 0xF642); 1181 p54_set_filter(dev, 0, NULL);
887 p54_set_filter(dev, 0, priv->mac_addr, NULL, 1, 0, 0, 0xF642);
888 1182
889 switch (conf->type) { 1183 switch (conf->type) {
890 case IEEE80211_IF_TYPE_STA: 1184 case NL80211_IFTYPE_STATION:
891 p54_set_filter(dev, 1, priv->mac_addr, NULL, 0, 0x15F, 0x1F4, 0); 1185 p54_set_filter(dev, 1, NULL);
892 break; 1186 break;
893 default: 1187 default:
894 BUG(); /* impossible */ 1188 BUG(); /* impossible */
@@ -904,9 +1198,9 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
904 struct ieee80211_if_init_conf *conf) 1198 struct ieee80211_if_init_conf *conf)
905{ 1199{
906 struct p54_common *priv = dev->priv; 1200 struct p54_common *priv = dev->priv;
907 priv->mode = IEEE80211_IF_TYPE_MNTR; 1201 priv->mode = NL80211_IFTYPE_MONITOR;
908 memset(priv->mac_addr, 0, ETH_ALEN); 1202 memset(priv->mac_addr, 0, ETH_ALEN);
909 p54_set_filter(dev, 0, priv->mac_addr, NULL, 2, 0, 0, 0); 1203 p54_set_filter(dev, 0, NULL);
910} 1204}
911 1205
912static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 1206static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -915,6 +1209,9 @@ static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
915 struct p54_common *priv = dev->priv; 1209 struct p54_common *priv = dev->priv;
916 1210
917 mutex_lock(&priv->conf_mutex); 1211 mutex_lock(&priv->conf_mutex);
1212 priv->rx_antenna = (conf->antenna_sel_rx == 0) ?
1213 2 : conf->antenna_sel_tx - 1;
1214 priv->output_power = conf->power_level << 2;
918 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq)); 1215 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
919 p54_set_vdcf(dev); 1216 p54_set_vdcf(dev);
920 mutex_unlock(&priv->conf_mutex); 1217 mutex_unlock(&priv->conf_mutex);
@@ -928,8 +1225,7 @@ static int p54_config_interface(struct ieee80211_hw *dev,
928 struct p54_common *priv = dev->priv; 1225 struct p54_common *priv = dev->priv;
929 1226
930 mutex_lock(&priv->conf_mutex); 1227 mutex_lock(&priv->conf_mutex);
931 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); 1228 p54_set_filter(dev, 0, conf->bssid);
932 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
933 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); 1229 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
934 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 1230 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
935 mutex_unlock(&priv->conf_mutex); 1231 mutex_unlock(&priv->conf_mutex);
@@ -943,15 +1239,26 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
943{ 1239{
944 struct p54_common *priv = dev->priv; 1240 struct p54_common *priv = dev->priv;
945 1241
946 *total_flags &= FIF_BCN_PRBRESP_PROMISC; 1242 *total_flags &= FIF_BCN_PRBRESP_PROMISC |
1243 FIF_PROMISC_IN_BSS |
1244 FIF_FCSFAIL;
1245
1246 priv->filter_flags = *total_flags;
947 1247
948 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 1248 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
949 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 1249 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
950 p54_set_filter(dev, 0, priv->mac_addr, 1250 p54_set_filter(dev, priv->filter_type, NULL);
951 NULL, 2, 0, 0, 0); 1251 else
1252 p54_set_filter(dev, priv->filter_type, priv->bssid);
1253 }
1254
1255 if (changed_flags & FIF_PROMISC_IN_BSS) {
1256 if (*total_flags & FIF_PROMISC_IN_BSS)
1257 p54_set_filter(dev, priv->filter_type |
1258 cpu_to_le16(0x8), NULL);
952 else 1259 else
953 p54_set_filter(dev, 0, priv->mac_addr, 1260 p54_set_filter(dev, priv->filter_type &
954 priv->bssid, 2, 0, 0, 0); 1261 ~cpu_to_le16(0x8), priv->bssid);
955 } 1262 }
956} 1263}
957 1264
@@ -975,10 +1282,67 @@ static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
975 return 0; 1282 return 0;
976} 1283}
977 1284
1285static int p54_init_xbow_synth(struct ieee80211_hw *dev)
1286{
1287 struct p54_common *priv = dev->priv;
1288 struct p54_control_hdr *hdr;
1289 struct p54_tx_control_xbow_synth *xbow;
1290
1291 hdr = kzalloc(sizeof(*hdr) + sizeof(*xbow) +
1292 priv->tx_hdr_len, GFP_KERNEL);
1293 if (!hdr)
1294 return -ENOMEM;
1295
1296 hdr = (void *)hdr + priv->tx_hdr_len;
1297 hdr->magic1 = cpu_to_le16(0x8001);
1298 hdr->len = cpu_to_le16(sizeof(*xbow));
1299 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_XBOW_SYNTH_CFG);
1300 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*xbow));
1301
1302 xbow = (struct p54_tx_control_xbow_synth *) hdr->data;
1303 xbow->magic1 = cpu_to_le16(0x1);
1304 xbow->magic2 = cpu_to_le16(0x2);
1305 xbow->freq = cpu_to_le16(5390);
1306
1307 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*xbow), 1);
1308
1309 return 0;
1310}
1311
1312static void p54_statistics_timer(unsigned long data)
1313{
1314 struct ieee80211_hw *dev = (struct ieee80211_hw *) data;
1315 struct p54_common *priv = dev->priv;
1316 struct p54_control_hdr *hdr;
1317 struct p54_statistics *stats;
1318
1319 BUG_ON(!priv->cached_stats);
1320
1321 hdr = (void *)priv->cached_stats + priv->tx_hdr_len;
1322 hdr->magic1 = cpu_to_le16(0x8000);
1323 hdr->len = cpu_to_le16(sizeof(*stats));
1324 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_STAT_READBACK);
1325 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*stats));
1326
1327 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*stats), 0);
1328}
1329
978static int p54_get_stats(struct ieee80211_hw *dev, 1330static int p54_get_stats(struct ieee80211_hw *dev,
979 struct ieee80211_low_level_stats *stats) 1331 struct ieee80211_low_level_stats *stats)
980{ 1332{
981 /* TODO */ 1333 struct p54_common *priv = dev->priv;
1334
1335 del_timer(&priv->stats_timer);
1336 p54_statistics_timer((unsigned long)dev);
1337
1338 if (!wait_for_completion_interruptible_timeout(&priv->stats_comp, HZ)) {
1339 printk(KERN_ERR "%s: device does not respond!\n",
1340 wiphy_name(dev->wiphy));
1341 return -EBUSY;
1342 }
1343
1344 memcpy(stats, &priv->stats, sizeof(*stats));
1345
982 return 0; 1346 return 0;
983} 1347}
984 1348
@@ -987,7 +1351,7 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
987{ 1351{
988 struct p54_common *priv = dev->priv; 1352 struct p54_common *priv = dev->priv;
989 1353
990 memcpy(stats, &priv->tx_stats, sizeof(stats[0]) * dev->queues); 1354 memcpy(stats, &priv->tx_stats[4], sizeof(stats[0]) * dev->queues);
991 1355
992 return 0; 1356 return 0;
993} 1357}
@@ -1016,22 +1380,32 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1016 return NULL; 1380 return NULL;
1017 1381
1018 priv = dev->priv; 1382 priv = dev->priv;
1019 priv->mode = IEEE80211_IF_TYPE_INVALID; 1383 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1020 skb_queue_head_init(&priv->tx_queue); 1384 skb_queue_head_init(&priv->tx_queue);
1021 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
1022 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 1385 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
1023 IEEE80211_HW_RX_INCLUDES_FCS | 1386 IEEE80211_HW_RX_INCLUDES_FCS |
1024 IEEE80211_HW_SIGNAL_UNSPEC; 1387 IEEE80211_HW_SIGNAL_DBM |
1388 IEEE80211_HW_NOISE_DBM;
1389
1390 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1391
1025 dev->channel_change_time = 1000; /* TODO: find actual value */ 1392 dev->channel_change_time = 1000; /* TODO: find actual value */
1026 dev->max_signal = 127;
1027 1393
1028 priv->tx_stats[0].limit = 5; 1394 priv->tx_stats[0].limit = 1;
1395 priv->tx_stats[1].limit = 1;
1396 priv->tx_stats[2].limit = 1;
1397 priv->tx_stats[3].limit = 1;
1398 priv->tx_stats[4].limit = 5;
1029 dev->queues = 1; 1399 dev->queues = 1;
1030 1400 priv->noise = -94;
1031 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 1401 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
1032 sizeof(struct p54_tx_control_allocdata); 1402 sizeof(struct p54_tx_control_allocdata);
1033 1403
1034 mutex_init(&priv->conf_mutex); 1404 mutex_init(&priv->conf_mutex);
1405 init_completion(&priv->eeprom_comp);
1406 init_completion(&priv->stats_comp);
1407 setup_timer(&priv->stats_timer, p54_statistics_timer,
1408 (unsigned long)dev);
1035 1409
1036 return dev; 1410 return dev;
1037} 1411}
@@ -1040,6 +1414,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
1040void p54_free_common(struct ieee80211_hw *dev) 1414void p54_free_common(struct ieee80211_hw *dev)
1041{ 1415{
1042 struct p54_common *priv = dev->priv; 1416 struct p54_common *priv = dev->priv;
1417 kfree(priv->cached_stats);
1043 kfree(priv->iq_autocal); 1418 kfree(priv->iq_autocal);
1044 kfree(priv->output_limit); 1419 kfree(priv->output_limit);
1045 kfree(priv->curve_data); 1420 kfree(priv->curve_data);
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 8db6c0e8e540..4da736c789ac 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54COMMON_H 1#ifndef P54COMMON_H
2#define PRISM54COMMON_H 2#define P54COMMON_H
3 3
4/* 4/*
5 * Common code specific definitions for mac80211 Prism54 drivers 5 * Common code specific definitions for mac80211 Prism54 drivers
@@ -29,6 +29,17 @@ struct bootrec_exp_if {
29 __le16 top_compat; 29 __le16 top_compat;
30} __attribute__((packed)); 30} __attribute__((packed));
31 31
32struct bootrec_desc {
33 __le16 modes;
34 __le16 flags;
35 __le32 rx_start;
36 __le32 rx_end;
37 u8 headroom;
38 u8 tailroom;
39 u8 unimportant[6];
40 u8 rates[16];
41} __attribute__((packed));
42
32#define BR_CODE_MIN 0x80000000 43#define BR_CODE_MIN 0x80000000
33#define BR_CODE_COMPONENT_ID 0x80000001 44#define BR_CODE_COMPONENT_ID 0x80000001
34#define BR_CODE_COMPONENT_VERSION 0x80000002 45#define BR_CODE_COMPONENT_VERSION 0x80000002
@@ -39,11 +50,6 @@ struct bootrec_exp_if {
39#define BR_CODE_END_OF_BRA 0xFF0000FF 50#define BR_CODE_END_OF_BRA 0xFF0000FF
40#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF 51#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF
41 52
42#define FW_FMAC 0x464d4143
43#define FW_LM86 0x4c4d3836
44#define FW_LM87 0x4c4d3837
45#define FW_LM20 0x4c4d3230
46
47/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ 53/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */
48 54
49struct pda_entry { 55struct pda_entry {
@@ -89,6 +95,16 @@ struct pda_pa_curve_data_sample_rev1 {
89 u8 data_qpsk; 95 u8 data_qpsk;
90 u8 data_16qam; 96 u8 data_16qam;
91 u8 data_64qam; 97 u8 data_64qam;
98} __attribute__ ((packed));
99
100struct p54_pa_curve_data_sample {
101 u8 rf_power;
102 u8 pa_detector;
103 u8 data_barker;
104 u8 data_bpsk;
105 u8 data_qpsk;
106 u8 data_16qam;
107 u8 data_64qam;
92 u8 padding; 108 u8 padding;
93} __attribute__ ((packed)); 109} __attribute__ ((packed));
94 110
@@ -169,8 +185,9 @@ struct p54_rx_hdr {
169 u8 rssi; 185 u8 rssi;
170 u8 quality; 186 u8 quality;
171 u16 unknown2; 187 u16 unknown2;
172 __le64 timestamp; 188 __le32 tsf32;
173 u8 data[0]; 189 __le32 unalloc0;
190 u8 align[0];
174} __attribute__ ((packed)); 191} __attribute__ ((packed));
175 192
176struct p54_frame_sent_hdr { 193struct p54_frame_sent_hdr {
@@ -198,22 +215,37 @@ struct p54_tx_control_allocdata {
198 215
199struct p54_tx_control_filter { 216struct p54_tx_control_filter {
200 __le16 filter_type; 217 __le16 filter_type;
201 u8 dst[ETH_ALEN]; 218 u8 mac_addr[ETH_ALEN];
202 u8 src[ETH_ALEN]; 219 u8 bssid[ETH_ALEN];
203 u8 antenna; 220 u8 rx_antenna;
204 u8 debug; 221 u8 rx_align;
205 __le32 magic3; 222 union {
206 u8 rates[8]; // FIXME: what's this for? 223 struct {
207 __le32 rx_addr; 224 __le32 basic_rate_mask;
208 __le16 max_rx; 225 u8 rts_rates[8];
209 __le16 rxhw; 226 __le32 rx_addr;
210 __le16 magic8; 227 __le16 max_rx;
211 __le16 magic9; 228 __le16 rxhw;
229 __le16 wakeup_timer;
230 __le16 unalloc0;
231 } v1 __attribute__ ((packed));
232 struct {
233 __le32 rx_addr;
234 __le16 max_rx;
235 __le16 rxhw;
236 __le16 timer;
237 __le16 unalloc0;
238 __le32 unalloc1;
239 } v2 __attribute__ ((packed));
240 } __attribute__ ((packed));
212} __attribute__ ((packed)); 241} __attribute__ ((packed));
213 242
243#define P54_TX_CONTROL_FILTER_V1_LEN (sizeof(struct p54_tx_control_filter))
244#define P54_TX_CONTROL_FILTER_V2_LEN (sizeof(struct p54_tx_control_filter)-8)
245
214struct p54_tx_control_channel { 246struct p54_tx_control_channel {
215 __le16 magic1; 247 __le16 flags;
216 __le16 magic2; 248 __le16 dwell;
217 u8 padding1[20]; 249 u8 padding1[20];
218 struct pda_iq_autocal_entry iq_autocal; 250 struct pda_iq_autocal_entry iq_autocal;
219 u8 pa_points_per_curve; 251 u8 pa_points_per_curve;
@@ -222,10 +254,29 @@ struct p54_tx_control_channel {
222 u8 val_qpsk; 254 u8 val_qpsk;
223 u8 val_16qam; 255 u8 val_16qam;
224 u8 val_64qam; 256 u8 val_64qam;
225 struct pda_pa_curve_data_sample_rev1 curve_data[0]; 257 struct p54_pa_curve_data_sample curve_data[8];
226 /* additional padding/data after curve_data */ 258 u8 dup_bpsk;
259 u8 dup_qpsk;
260 u8 dup_16qam;
261 u8 dup_64qam;
262 union {
263 struct {
264 __le16 rssical_mul;
265 __le16 rssical_add;
266 } v1 __attribute__ ((packed));
267
268 struct {
269 __le32 basic_rate_mask;
270 u8 rts_rates[8];
271 __le16 rssical_mul;
272 __le16 rssical_add;
273 } v2 __attribute__ ((packed));
274 } __attribute__ ((packed));
227} __attribute__ ((packed)); 275} __attribute__ ((packed));
228 276
277#define P54_TX_CONTROL_CHANNEL_V1_LEN (sizeof(struct p54_tx_control_channel)-12)
278#define P54_TX_CONTROL_CHANNEL_V2_LEN (sizeof(struct p54_tx_control_channel))
279
229struct p54_tx_control_led { 280struct p54_tx_control_led {
230 __le16 mode; 281 __le16 mode;
231 __le16 led_temporary; 282 __le16 led_temporary;
@@ -250,4 +301,24 @@ struct p54_tx_control_vdcf {
250 __le16 frameburst; 301 __le16 frameburst;
251} __attribute__ ((packed)); 302} __attribute__ ((packed));
252 303
253#endif /* PRISM54COMMON_H */ 304struct p54_statistics {
305 __le32 rx_success;
306 __le32 rx_bad_fcs;
307 __le32 rx_abort;
308 __le32 rx_abort_phy;
309 __le32 rts_success;
310 __le32 rts_fail;
311 __le32 tsf32;
312 __le32 airtime;
313 __le32 noise;
314 __le32 unkn[10]; /* CCE / CCA / RADAR */
315} __attribute__ ((packed));
316
317struct p54_tx_control_xbow_synth {
318 __le16 magic1;
319 __le16 magic2;
320 __le16 freq;
321 u32 padding[5];
322} __attribute__ ((packed));
323
324#endif /* P54COMMON_H */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 7dd4add4bf4e..1c2a02a741af 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -3,6 +3,7 @@
3 * Linux device driver for PCI based Prism54 3 * Linux device driver for PCI based Prism54
4 * 4 *
5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
6 * 7 *
7 * Based on the islsm (softmac prism54) driver, which is: 8 * Based on the islsm (softmac prism54) driver, which is:
8 * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al. 9 * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
@@ -71,16 +72,18 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
71 P54P_WRITE(ctrl_stat, reg); 72 P54P_WRITE(ctrl_stat, reg);
72 wmb(); 73 wmb();
73 74
74 mdelay(50);
75
76 err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev); 75 err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev);
77 if (err) { 76 if (err) {
78 printk(KERN_ERR "%s (prism54pci): cannot find firmware " 77 printk(KERN_ERR "%s (p54pci): cannot find firmware "
79 "(isl3886)\n", pci_name(priv->pdev)); 78 "(isl3886)\n", pci_name(priv->pdev));
80 return err; 79 return err;
81 } 80 }
82 81
83 p54_parse_firmware(dev, fw_entry); 82 err = p54_parse_firmware(dev, fw_entry);
83 if (err) {
84 release_firmware(fw_entry);
85 return err;
86 }
84 87
85 data = (__le32 *) fw_entry->data; 88 data = (__le32 *) fw_entry->data;
86 remains = fw_entry->size; 89 remains = fw_entry->size;
@@ -121,162 +124,147 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
121 wmb(); 124 wmb();
122 udelay(10); 125 udelay(10);
123 126
127 /* wait for the firmware to boot properly */
128 mdelay(100);
129
124 return 0; 130 return 0;
125} 131}
126 132
127static irqreturn_t p54p_simple_interrupt(int irq, void *dev_id) 133static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
134 int ring_index, struct p54p_desc *ring, u32 ring_limit,
135 struct sk_buff **rx_buf)
128{ 136{
129 struct p54p_priv *priv = (struct p54p_priv *) dev_id; 137 struct p54p_priv *priv = dev->priv;
130 __le32 reg; 138 struct p54p_ring_control *ring_control = priv->ring_control;
131 139 u32 limit, idx, i;
132 reg = P54P_READ(int_ident);
133 P54P_WRITE(int_ack, reg);
134 140
135 if (reg & P54P_READ(int_enable)) 141 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
136 complete(&priv->boot_comp); 142 limit = idx;
143 limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
144 limit = ring_limit - limit;
137 145
138 return IRQ_HANDLED; 146 i = idx % ring_limit;
139} 147 while (limit-- > 1) {
148 struct p54p_desc *desc = &ring[i];
140 149
141static int p54p_read_eeprom(struct ieee80211_hw *dev) 150 if (!desc->host_addr) {
142{ 151 struct sk_buff *skb;
143 struct p54p_priv *priv = dev->priv; 152 dma_addr_t mapping;
144 struct p54p_ring_control *ring_control = priv->ring_control; 153 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
145 int err; 154 if (!skb)
146 struct p54_control_hdr *hdr; 155 break;
147 void *eeprom;
148 dma_addr_t rx_mapping, tx_mapping;
149 u16 alen;
150 156
151 init_completion(&priv->boot_comp); 157 mapping = pci_map_single(priv->pdev,
152 err = request_irq(priv->pdev->irq, &p54p_simple_interrupt, 158 skb_tail_pointer(skb),
153 IRQF_SHARED, "prism54pci", priv); 159 priv->common.rx_mtu + 32,
154 if (err) { 160 PCI_DMA_FROMDEVICE);
155 printk(KERN_ERR "%s (prism54pci): failed to register IRQ handler\n", 161 desc->host_addr = cpu_to_le32(mapping);
156 pci_name(priv->pdev)); 162 desc->device_addr = 0; // FIXME: necessary?
157 return err; 163 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
158 } 164 desc->flags = 0;
165 rx_buf[i] = skb;
166 }
159 167
160 eeprom = kmalloc(0x2010 + EEPROM_READBACK_LEN, GFP_KERNEL); 168 i++;
161 if (!eeprom) { 169 idx++;
162 printk(KERN_ERR "%s (prism54pci): no memory for eeprom!\n", 170 i %= ring_limit;
163 pci_name(priv->pdev));
164 err = -ENOMEM;
165 goto out;
166 } 171 }
167 172
168 memset(ring_control, 0, sizeof(*ring_control)); 173 wmb();
169 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 174 ring_control->host_idx[ring_index] = cpu_to_le32(idx);
170 P54P_READ(ring_control_base); 175}
171 udelay(10);
172
173 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
174 P54P_READ(int_enable);
175 udelay(10);
176 176
177 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 177static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
178 int ring_index, struct p54p_desc *ring, u32 ring_limit,
179 struct sk_buff **rx_buf)
180{
181 struct p54p_priv *priv = dev->priv;
182 struct p54p_ring_control *ring_control = priv->ring_control;
183 struct p54p_desc *desc;
184 u32 idx, i;
185
186 i = (*index) % ring_limit;
187 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
188 idx %= ring_limit;
189 while (i != idx) {
190 u16 len;
191 struct sk_buff *skb;
192 desc = &ring[i];
193 len = le16_to_cpu(desc->len);
194 skb = rx_buf[i];
195
196 if (!skb) {
197 i++;
198 i %= ring_limit;
199 continue;
200 }
201 skb_put(skb, len);
202
203 if (p54_rx(dev, skb)) {
204 pci_unmap_single(priv->pdev,
205 le32_to_cpu(desc->host_addr),
206 priv->common.rx_mtu + 32,
207 PCI_DMA_FROMDEVICE);
208 rx_buf[i] = NULL;
209 desc->host_addr = 0;
210 } else {
211 skb_trim(skb, 0);
212 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
213 }
178 214
179 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { 215 i++;
180 printk(KERN_ERR "%s (prism54pci): Cannot boot firmware!\n", 216 i %= ring_limit;
181 pci_name(priv->pdev));
182 err = -EINVAL;
183 goto out;
184 } 217 }
185 218
186 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); 219 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
187 P54P_READ(int_enable); 220}
188 221
189 hdr = eeprom + 0x2010; 222/* caller must hold priv->lock */
190 p54_fill_eeprom_readback(hdr); 223static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
191 hdr->req_id = cpu_to_le32(priv->common.rx_start); 224 int ring_index, struct p54p_desc *ring, u32 ring_limit,
225 void **tx_buf)
226{
227 struct p54p_priv *priv = dev->priv;
228 struct p54p_ring_control *ring_control = priv->ring_control;
229 struct p54p_desc *desc;
230 u32 idx, i;
192 231
193 rx_mapping = pci_map_single(priv->pdev, eeprom, 232 i = (*index) % ring_limit;
194 0x2010, PCI_DMA_FROMDEVICE); 233 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
195 tx_mapping = pci_map_single(priv->pdev, (void *)hdr, 234 idx %= ring_limit;
196 EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
197 235
198 ring_control->rx_mgmt[0].host_addr = cpu_to_le32(rx_mapping); 236 while (i != idx) {
199 ring_control->rx_mgmt[0].len = cpu_to_le16(0x2010); 237 desc = &ring[i];
200 ring_control->tx_data[0].host_addr = cpu_to_le32(tx_mapping); 238 kfree(tx_buf[i]);
201 ring_control->tx_data[0].device_addr = hdr->req_id; 239 tx_buf[i] = NULL;
202 ring_control->tx_data[0].len = cpu_to_le16(EEPROM_READBACK_LEN);
203 240
204 ring_control->host_idx[2] = cpu_to_le32(1); 241 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
205 ring_control->host_idx[1] = cpu_to_le32(1); 242 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
206 243
207 wmb(); 244 desc->host_addr = 0;
208 mdelay(100); 245 desc->device_addr = 0;
209 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); 246 desc->len = 0;
247 desc->flags = 0;
210 248
211 wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); 249 i++;
212 wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); 250 i %= ring_limit;
213
214 pci_unmap_single(priv->pdev, tx_mapping,
215 EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
216 pci_unmap_single(priv->pdev, rx_mapping,
217 0x2010, PCI_DMA_FROMDEVICE);
218
219 alen = le16_to_cpu(ring_control->rx_mgmt[0].len);
220 if (le32_to_cpu(ring_control->device_idx[2]) != 1 ||
221 alen < 0x10) {
222 printk(KERN_ERR "%s (prism54pci): Cannot read eeprom!\n",
223 pci_name(priv->pdev));
224 err = -EINVAL;
225 goto out;
226 } 251 }
227
228 p54_parse_eeprom(dev, (u8 *)eeprom + 0x10, alen - 0x10);
229
230 out:
231 kfree(eeprom);
232 P54P_WRITE(int_enable, cpu_to_le32(0));
233 P54P_READ(int_enable);
234 udelay(10);
235 free_irq(priv->pdev->irq, priv);
236 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
237 return err;
238} 252}
239 253
240static void p54p_refill_rx_ring(struct ieee80211_hw *dev) 254static void p54p_rx_tasklet(unsigned long dev_id)
241{ 255{
256 struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
242 struct p54p_priv *priv = dev->priv; 257 struct p54p_priv *priv = dev->priv;
243 struct p54p_ring_control *ring_control = priv->ring_control; 258 struct p54p_ring_control *ring_control = priv->ring_control;
244 u32 limit, host_idx, idx;
245 259
246 host_idx = le32_to_cpu(ring_control->host_idx[0]); 260 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
247 limit = host_idx; 261 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
248 limit -= le32_to_cpu(ring_control->device_idx[0]);
249 limit = ARRAY_SIZE(ring_control->rx_data) - limit;
250
251 idx = host_idx % ARRAY_SIZE(ring_control->rx_data);
252 while (limit-- > 1) {
253 struct p54p_desc *desc = &ring_control->rx_data[idx];
254
255 if (!desc->host_addr) {
256 struct sk_buff *skb;
257 dma_addr_t mapping;
258 skb = dev_alloc_skb(MAX_RX_SIZE);
259 if (!skb)
260 break;
261
262 mapping = pci_map_single(priv->pdev,
263 skb_tail_pointer(skb),
264 MAX_RX_SIZE,
265 PCI_DMA_FROMDEVICE);
266 desc->host_addr = cpu_to_le32(mapping);
267 desc->device_addr = 0; // FIXME: necessary?
268 desc->len = cpu_to_le16(MAX_RX_SIZE);
269 desc->flags = 0;
270 priv->rx_buf[idx] = skb;
271 }
272 262
273 idx++; 263 p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
274 host_idx++; 264 ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
275 idx %= ARRAY_SIZE(ring_control->rx_data);
276 }
277 265
278 wmb(); 266 wmb();
279 ring_control->host_idx[0] = cpu_to_le32(host_idx); 267 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
280} 268}
281 269
282static irqreturn_t p54p_interrupt(int irq, void *dev_id) 270static irqreturn_t p54p_interrupt(int irq, void *dev_id)
@@ -298,65 +286,18 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
298 reg &= P54P_READ(int_enable); 286 reg &= P54P_READ(int_enable);
299 287
300 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { 288 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
301 struct p54p_desc *desc; 289 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
302 u32 idx, i; 290 3, ring_control->tx_mgmt,
303 i = priv->tx_idx; 291 ARRAY_SIZE(ring_control->tx_mgmt),
304 i %= ARRAY_SIZE(ring_control->tx_data); 292 priv->tx_buf_mgmt);
305 priv->tx_idx = idx = le32_to_cpu(ring_control->device_idx[1]);
306 idx %= ARRAY_SIZE(ring_control->tx_data);
307
308 while (i != idx) {
309 desc = &ring_control->tx_data[i];
310 if (priv->tx_buf[i]) {
311 kfree(priv->tx_buf[i]);
312 priv->tx_buf[i] = NULL;
313 }
314
315 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
316 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
317
318 desc->host_addr = 0;
319 desc->device_addr = 0;
320 desc->len = 0;
321 desc->flags = 0;
322
323 i++;
324 i %= ARRAY_SIZE(ring_control->tx_data);
325 }
326
327 i = priv->rx_idx;
328 i %= ARRAY_SIZE(ring_control->rx_data);
329 priv->rx_idx = idx = le32_to_cpu(ring_control->device_idx[0]);
330 idx %= ARRAY_SIZE(ring_control->rx_data);
331 while (i != idx) {
332 u16 len;
333 struct sk_buff *skb;
334 desc = &ring_control->rx_data[i];
335 len = le16_to_cpu(desc->len);
336 skb = priv->rx_buf[i];
337 293
338 skb_put(skb, len); 294 p54p_check_tx_ring(dev, &priv->tx_idx_data,
295 1, ring_control->tx_data,
296 ARRAY_SIZE(ring_control->tx_data),
297 priv->tx_buf_data);
339 298
340 if (p54_rx(dev, skb)) { 299 tasklet_schedule(&priv->rx_tasklet);
341 pci_unmap_single(priv->pdev,
342 le32_to_cpu(desc->host_addr),
343 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
344 300
345 priv->rx_buf[i] = NULL;
346 desc->host_addr = 0;
347 } else {
348 skb_trim(skb, 0);
349 desc->len = cpu_to_le16(MAX_RX_SIZE);
350 }
351
352 i++;
353 i %= ARRAY_SIZE(ring_control->rx_data);
354 }
355
356 p54p_refill_rx_ring(dev);
357
358 wmb();
359 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
360 } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) 301 } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
361 complete(&priv->boot_comp); 302 complete(&priv->boot_comp);
362 303
@@ -392,7 +333,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
392 ring_control->host_idx[1] = cpu_to_le32(idx + 1); 333 ring_control->host_idx[1] = cpu_to_le32(idx + 1);
393 334
394 if (free_on_tx) 335 if (free_on_tx)
395 priv->tx_buf[i] = data; 336 priv->tx_buf_data[i] = data;
396 337
397 spin_unlock_irqrestore(&priv->lock, flags); 338 spin_unlock_irqrestore(&priv->lock, flags);
398 339
@@ -412,7 +353,7 @@ static int p54p_open(struct ieee80211_hw *dev)
412 353
413 init_completion(&priv->boot_comp); 354 init_completion(&priv->boot_comp);
414 err = request_irq(priv->pdev->irq, &p54p_interrupt, 355 err = request_irq(priv->pdev->irq, &p54p_interrupt,
415 IRQF_SHARED, "prism54pci", dev); 356 IRQF_SHARED, "p54pci", dev);
416 if (err) { 357 if (err) {
417 printk(KERN_ERR "%s: failed to register IRQ handler\n", 358 printk(KERN_ERR "%s: failed to register IRQ handler\n",
418 wiphy_name(dev->wiphy)); 359 wiphy_name(dev->wiphy));
@@ -420,10 +361,19 @@ static int p54p_open(struct ieee80211_hw *dev)
420 } 361 }
421 362
422 memset(priv->ring_control, 0, sizeof(*priv->ring_control)); 363 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
423 priv->rx_idx = priv->tx_idx = 0; 364 err = p54p_upload_firmware(dev);
424 p54p_refill_rx_ring(dev); 365 if (err) {
366 free_irq(priv->pdev->irq, dev);
367 return err;
368 }
369 priv->rx_idx_data = priv->tx_idx_data = 0;
370 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
371
372 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
373 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
425 374
426 p54p_upload_firmware(dev); 375 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
376 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
427 377
428 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 378 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
429 P54P_READ(ring_control_base); 379 P54P_READ(ring_control_base);
@@ -465,6 +415,8 @@ static void p54p_stop(struct ieee80211_hw *dev)
465 unsigned int i; 415 unsigned int i;
466 struct p54p_desc *desc; 416 struct p54p_desc *desc;
467 417
418 tasklet_kill(&priv->rx_tasklet);
419
468 P54P_WRITE(int_enable, cpu_to_le32(0)); 420 P54P_WRITE(int_enable, cpu_to_le32(0));
469 P54P_READ(int_enable); 421 P54P_READ(int_enable);
470 udelay(10); 422 udelay(10);
@@ -473,26 +425,53 @@ static void p54p_stop(struct ieee80211_hw *dev)
473 425
474 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 426 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
475 427
476 for (i = 0; i < ARRAY_SIZE(priv->rx_buf); i++) { 428 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
477 desc = &ring_control->rx_data[i]; 429 desc = &ring_control->rx_data[i];
478 if (desc->host_addr) 430 if (desc->host_addr)
479 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 431 pci_unmap_single(priv->pdev,
480 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 432 le32_to_cpu(desc->host_addr),
481 kfree_skb(priv->rx_buf[i]); 433 priv->common.rx_mtu + 32,
482 priv->rx_buf[i] = NULL; 434 PCI_DMA_FROMDEVICE);
435 kfree_skb(priv->rx_buf_data[i]);
436 priv->rx_buf_data[i] = NULL;
437 }
438
439 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
440 desc = &ring_control->rx_mgmt[i];
441 if (desc->host_addr)
442 pci_unmap_single(priv->pdev,
443 le32_to_cpu(desc->host_addr),
444 priv->common.rx_mtu + 32,
445 PCI_DMA_FROMDEVICE);
446 kfree_skb(priv->rx_buf_mgmt[i]);
447 priv->rx_buf_mgmt[i] = NULL;
483 } 448 }
484 449
485 for (i = 0; i < ARRAY_SIZE(priv->tx_buf); i++) { 450 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
486 desc = &ring_control->tx_data[i]; 451 desc = &ring_control->tx_data[i];
487 if (desc->host_addr) 452 if (desc->host_addr)
488 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 453 pci_unmap_single(priv->pdev,
489 le16_to_cpu(desc->len), PCI_DMA_TODEVICE); 454 le32_to_cpu(desc->host_addr),
455 le16_to_cpu(desc->len),
456 PCI_DMA_TODEVICE);
490 457
491 kfree(priv->tx_buf[i]); 458 kfree(priv->tx_buf_data[i]);
492 priv->tx_buf[i] = NULL; 459 priv->tx_buf_data[i] = NULL;
493 } 460 }
494 461
495 memset(ring_control, 0, sizeof(ring_control)); 462 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
463 desc = &ring_control->tx_mgmt[i];
464 if (desc->host_addr)
465 pci_unmap_single(priv->pdev,
466 le32_to_cpu(desc->host_addr),
467 le16_to_cpu(desc->len),
468 PCI_DMA_TODEVICE);
469
470 kfree(priv->tx_buf_mgmt[i]);
471 priv->tx_buf_mgmt[i] = NULL;
472 }
473
474 memset(ring_control, 0, sizeof(*ring_control));
496} 475}
497 476
498static int __devinit p54p_probe(struct pci_dev *pdev, 477static int __devinit p54p_probe(struct pci_dev *pdev,
@@ -506,7 +485,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
506 485
507 err = pci_enable_device(pdev); 486 err = pci_enable_device(pdev);
508 if (err) { 487 if (err) {
509 printk(KERN_ERR "%s (prism54pci): Cannot enable new PCI device\n", 488 printk(KERN_ERR "%s (p54pci): Cannot enable new PCI device\n",
510 pci_name(pdev)); 489 pci_name(pdev));
511 return err; 490 return err;
512 } 491 }
@@ -514,22 +493,22 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
514 mem_addr = pci_resource_start(pdev, 0); 493 mem_addr = pci_resource_start(pdev, 0);
515 mem_len = pci_resource_len(pdev, 0); 494 mem_len = pci_resource_len(pdev, 0);
516 if (mem_len < sizeof(struct p54p_csr)) { 495 if (mem_len < sizeof(struct p54p_csr)) {
517 printk(KERN_ERR "%s (prism54pci): Too short PCI resources\n", 496 printk(KERN_ERR "%s (p54pci): Too short PCI resources\n",
518 pci_name(pdev)); 497 pci_name(pdev));
519 pci_disable_device(pdev); 498 pci_disable_device(pdev);
520 return err; 499 return err;
521 } 500 }
522 501
523 err = pci_request_regions(pdev, "prism54pci"); 502 err = pci_request_regions(pdev, "p54pci");
524 if (err) { 503 if (err) {
525 printk(KERN_ERR "%s (prism54pci): Cannot obtain PCI resources\n", 504 printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n",
526 pci_name(pdev)); 505 pci_name(pdev));
527 return err; 506 return err;
528 } 507 }
529 508
530 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || 509 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
531 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { 510 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
532 printk(KERN_ERR "%s (prism54pci): No suitable DMA available\n", 511 printk(KERN_ERR "%s (p54pci): No suitable DMA available\n",
533 pci_name(pdev)); 512 pci_name(pdev));
534 goto err_free_reg; 513 goto err_free_reg;
535 } 514 }
@@ -542,7 +521,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
542 521
543 dev = p54_init_common(sizeof(*priv)); 522 dev = p54_init_common(sizeof(*priv));
544 if (!dev) { 523 if (!dev) {
545 printk(KERN_ERR "%s (prism54pci): ieee80211 alloc failed\n", 524 printk(KERN_ERR "%s (p54pci): ieee80211 alloc failed\n",
546 pci_name(pdev)); 525 pci_name(pdev));
547 err = -ENOMEM; 526 err = -ENOMEM;
548 goto err_free_reg; 527 goto err_free_reg;
@@ -556,7 +535,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
556 535
557 priv->map = ioremap(mem_addr, mem_len); 536 priv->map = ioremap(mem_addr, mem_len);
558 if (!priv->map) { 537 if (!priv->map) {
559 printk(KERN_ERR "%s (prism54pci): Cannot map device memory\n", 538 printk(KERN_ERR "%s (p54pci): Cannot map device memory\n",
560 pci_name(pdev)); 539 pci_name(pdev));
561 err = -EINVAL; // TODO: use a better error code? 540 err = -EINVAL; // TODO: use a better error code?
562 goto err_free_dev; 541 goto err_free_dev;
@@ -565,39 +544,31 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
565 priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), 544 priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
566 &priv->ring_control_dma); 545 &priv->ring_control_dma);
567 if (!priv->ring_control) { 546 if (!priv->ring_control) {
568 printk(KERN_ERR "%s (prism54pci): Cannot allocate rings\n", 547 printk(KERN_ERR "%s (p54pci): Cannot allocate rings\n",
569 pci_name(pdev)); 548 pci_name(pdev));
570 err = -ENOMEM; 549 err = -ENOMEM;
571 goto err_iounmap; 550 goto err_iounmap;
572 } 551 }
573 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
574
575 err = p54p_upload_firmware(dev);
576 if (err)
577 goto err_free_desc;
578
579 err = p54p_read_eeprom(dev);
580 if (err)
581 goto err_free_desc;
582
583 priv->common.open = p54p_open; 552 priv->common.open = p54p_open;
584 priv->common.stop = p54p_stop; 553 priv->common.stop = p54p_stop;
585 priv->common.tx = p54p_tx; 554 priv->common.tx = p54p_tx;
586 555
587 spin_lock_init(&priv->lock); 556 spin_lock_init(&priv->lock);
557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
558
559 p54p_open(dev);
560 err = p54_read_eeprom(dev);
561 p54p_stop(dev);
562 if (err)
563 goto err_free_desc;
588 564
589 err = ieee80211_register_hw(dev); 565 err = ieee80211_register_hw(dev);
590 if (err) { 566 if (err) {
591 printk(KERN_ERR "%s (prism54pci): Cannot register netdevice\n", 567 printk(KERN_ERR "%s (p54pci): Cannot register netdevice\n",
592 pci_name(pdev)); 568 pci_name(pdev));
593 goto err_free_common; 569 goto err_free_common;
594 } 570 }
595 571
596 printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
597 wiphy_name(dev->wiphy),
598 print_mac(mac, dev->wiphy->perm_addr),
599 priv->common.version);
600
601 return 0; 572 return 0;
602 573
603 err_free_common: 574 err_free_common:
@@ -645,7 +616,7 @@ static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
645 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 616 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
646 struct p54p_priv *priv = dev->priv; 617 struct p54p_priv *priv = dev->priv;
647 618
648 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 619 if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
649 ieee80211_stop_queues(dev); 620 ieee80211_stop_queues(dev);
650 p54p_stop(dev); 621 p54p_stop(dev);
651 } 622 }
@@ -663,7 +634,7 @@ static int p54p_resume(struct pci_dev *pdev)
663 pci_set_power_state(pdev, PCI_D0); 634 pci_set_power_state(pdev, PCI_D0);
664 pci_restore_state(pdev); 635 pci_restore_state(pdev);
665 636
666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 637 if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
667 p54p_open(dev); 638 p54p_open(dev);
668 ieee80211_wake_queues(dev); 639 ieee80211_wake_queues(dev);
669 } 640 }
@@ -673,7 +644,7 @@ static int p54p_resume(struct pci_dev *pdev)
673#endif /* CONFIG_PM */ 644#endif /* CONFIG_PM */
674 645
675static struct pci_driver p54p_driver = { 646static struct pci_driver p54p_driver = {
676 .name = "prism54pci", 647 .name = "p54pci",
677 .id_table = p54p_table, 648 .id_table = p54p_table,
678 .probe = p54p_probe, 649 .probe = p54p_probe,
679 .remove = __devexit_p(p54p_remove), 650 .remove = __devexit_p(p54p_remove),
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 5bedd7af385d..4a6778070afc 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54PCI_H 1#ifndef P54PCI_H
2#define PRISM54PCI_H 2#define P54PCI_H
3 3
4/* 4/*
5 * Defines for PCI based mac80211 Prism54 driver 5 * Defines for PCI based mac80211 Prism54 driver
@@ -68,7 +68,7 @@ struct p54p_csr {
68} __attribute__ ((packed)); 68} __attribute__ ((packed));
69 69
70/* usb backend only needs the register defines above */ 70/* usb backend only needs the register defines above */
71#ifndef PRISM54USB_H 71#ifndef P54USB_H
72struct p54p_desc { 72struct p54p_desc {
73 __le32 host_addr; 73 __le32 host_addr;
74 __le32 device_addr; 74 __le32 device_addr;
@@ -92,15 +92,19 @@ struct p54p_priv {
92 struct p54_common common; 92 struct p54_common common;
93 struct pci_dev *pdev; 93 struct pci_dev *pdev;
94 struct p54p_csr __iomem *map; 94 struct p54p_csr __iomem *map;
95 struct tasklet_struct rx_tasklet;
95 96
96 spinlock_t lock; 97 spinlock_t lock;
97 struct p54p_ring_control *ring_control; 98 struct p54p_ring_control *ring_control;
98 dma_addr_t ring_control_dma; 99 dma_addr_t ring_control_dma;
99 u32 rx_idx, tx_idx; 100 u32 rx_idx_data, tx_idx_data;
100 struct sk_buff *rx_buf[8]; 101 u32 rx_idx_mgmt, tx_idx_mgmt;
101 void *tx_buf[32]; 102 struct sk_buff *rx_buf_data[8];
103 struct sk_buff *rx_buf_mgmt[4];
104 void *tx_buf_data[32];
105 void *tx_buf_mgmt[4];
102 struct completion boot_comp; 106 struct completion boot_comp;
103}; 107};
104 108
105#endif /* PRISM54USB_H */ 109#endif /* P54USB_H */
106#endif /* PRISM54PCI_H */ 110#endif /* P54PCI_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index cbaca23a9453..7444f3729779 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -91,11 +91,16 @@ static void p54u_rx_cb(struct urb *urb)
91 91
92 skb_unlink(skb, &priv->rx_queue); 92 skb_unlink(skb, &priv->rx_queue);
93 skb_put(skb, urb->actual_length); 93 skb_put(skb, urb->actual_length);
94 if (!priv->hw_type) 94
95 skb_pull(skb, sizeof(struct net2280_tx_hdr)); 95 if (priv->hw_type == P54U_NET2280)
96 skb_pull(skb, priv->common.tx_hdr_len);
97 if (priv->common.fw_interface == FW_LM87) {
98 skb_pull(skb, 4);
99 skb_put(skb, 4);
100 }
96 101
97 if (p54_rx(dev, skb)) { 102 if (p54_rx(dev, skb)) {
98 skb = dev_alloc_skb(MAX_RX_SIZE); 103 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
99 if (unlikely(!skb)) { 104 if (unlikely(!skb)) {
100 usb_free_urb(urb); 105 usb_free_urb(urb);
101 /* TODO check rx queue length and refill *somewhere* */ 106 /* TODO check rx queue length and refill *somewhere* */
@@ -109,9 +114,12 @@ static void p54u_rx_cb(struct urb *urb)
109 urb->context = skb; 114 urb->context = skb;
110 skb_queue_tail(&priv->rx_queue, skb); 115 skb_queue_tail(&priv->rx_queue, skb);
111 } else { 116 } else {
112 if (!priv->hw_type) 117 if (priv->hw_type == P54U_NET2280)
113 skb_push(skb, sizeof(struct net2280_tx_hdr)); 118 skb_push(skb, priv->common.tx_hdr_len);
114 119 if (priv->common.fw_interface == FW_LM87) {
120 skb_push(skb, 4);
121 skb_put(skb, 4);
122 }
115 skb_reset_tail_pointer(skb); 123 skb_reset_tail_pointer(skb);
116 skb_trim(skb, 0); 124 skb_trim(skb, 0);
117 if (urb->transfer_buffer != skb_tail_pointer(skb)) { 125 if (urb->transfer_buffer != skb_tail_pointer(skb)) {
@@ -145,7 +153,7 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
145 struct p54u_rx_info *info; 153 struct p54u_rx_info *info;
146 154
147 while (skb_queue_len(&priv->rx_queue) < 32) { 155 while (skb_queue_len(&priv->rx_queue) < 32) {
148 skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL); 156 skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
149 if (!skb) 157 if (!skb)
150 break; 158 break;
151 entry = usb_alloc_urb(0, GFP_KERNEL); 159 entry = usb_alloc_urb(0, GFP_KERNEL);
@@ -153,7 +161,10 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
153 kfree_skb(skb); 161 kfree_skb(skb);
154 break; 162 break;
155 } 163 }
156 usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb); 164 usb_fill_bulk_urb(entry, priv->udev,
165 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
166 skb_tail_pointer(skb),
167 priv->common.rx_mtu + 32, p54u_rx_cb, skb);
157 info = (struct p54u_rx_info *) skb->cb; 168 info = (struct p54u_rx_info *) skb->cb;
158 info->urb = entry; 169 info->urb = entry;
159 info->dev = dev; 170 info->dev = dev;
@@ -207,6 +218,42 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data,
207 usb_submit_urb(data_urb, GFP_ATOMIC); 218 usb_submit_urb(data_urb, GFP_ATOMIC);
208} 219}
209 220
221__le32 p54u_lm87_chksum(const u32 *data, size_t length)
222{
223 __le32 chk = 0;
224
225 length >>= 2;
226 while (length--) {
227 chk ^= cpu_to_le32(*data++);
228 chk = (chk >> 5) ^ (chk << 3);
229 }
230
231 return chk;
232}
233
234static void p54u_tx_lm87(struct ieee80211_hw *dev,
235 struct p54_control_hdr *data,
236 size_t len, int free_on_tx)
237{
238 struct p54u_priv *priv = dev->priv;
239 struct urb *data_urb;
240 struct lm87_tx_hdr *hdr = (void *)data - sizeof(*hdr);
241
242 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
243 if (!data_urb)
244 return;
245
246 hdr->chksum = p54u_lm87_chksum((u32 *)data, len);
247 hdr->device_addr = data->req_id;
248
249 usb_fill_bulk_urb(data_urb, priv->udev,
250 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
251 len + sizeof(*hdr), free_on_tx ? p54u_tx_free_cb : p54u_tx_cb,
252 dev);
253
254 usb_submit_urb(data_urb, GFP_ATOMIC);
255}
256
210static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data, 257static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data,
211 size_t len, int free_on_tx) 258 size_t len, int free_on_tx)
212{ 259{
@@ -312,73 +359,6 @@ static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep,
312 data, len, &alen, 2000); 359 data, len, &alen, 2000);
313} 360}
314 361
315static int p54u_read_eeprom(struct ieee80211_hw *dev)
316{
317 struct p54u_priv *priv = dev->priv;
318 void *buf;
319 struct p54_control_hdr *hdr;
320 int err, alen;
321 size_t offset = priv->hw_type ? 0x10 : 0x20;
322
323 buf = kmalloc(0x2020, GFP_KERNEL);
324 if (!buf) {
325 printk(KERN_ERR "prism54usb: cannot allocate memory for "
326 "eeprom readback!\n");
327 return -ENOMEM;
328 }
329
330 if (priv->hw_type) {
331 *((u32 *) buf) = priv->common.rx_start;
332 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
333 if (err) {
334 printk(KERN_ERR "prism54usb: addr send failed\n");
335 goto fail;
336 }
337 } else {
338 struct net2280_reg_write *reg = buf;
339 reg->port = cpu_to_le16(NET2280_DEV_U32);
340 reg->addr = cpu_to_le32(P54U_DEV_BASE);
341 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
342 err = p54u_bulk_msg(priv, P54U_PIPE_DEV, buf, sizeof(*reg));
343 if (err) {
344 printk(KERN_ERR "prism54usb: dev_int send failed\n");
345 goto fail;
346 }
347 }
348
349 hdr = buf + priv->common.tx_hdr_len;
350 p54_fill_eeprom_readback(hdr);
351 hdr->req_id = cpu_to_le32(priv->common.rx_start);
352 if (priv->common.tx_hdr_len) {
353 struct net2280_tx_hdr *tx_hdr = buf;
354 tx_hdr->device_addr = hdr->req_id;
355 tx_hdr->len = cpu_to_le16(EEPROM_READBACK_LEN);
356 }
357
358 /* we can just pretend to send 0x2000 bytes of nothing in the headers */
359 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf,
360 EEPROM_READBACK_LEN + priv->common.tx_hdr_len);
361 if (err) {
362 printk(KERN_ERR "prism54usb: eeprom req send failed\n");
363 goto fail;
364 }
365
366 err = usb_bulk_msg(priv->udev,
367 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
368 buf, 0x2020, &alen, 1000);
369 if (!err && alen > offset) {
370 p54_parse_eeprom(dev, (u8 *)buf + offset, alen - offset);
371 } else {
372 printk(KERN_ERR "prism54usb: eeprom read failed!\n");
373 err = -EINVAL;
374 goto fail;
375 }
376
377 fail:
378 kfree(buf);
379 return err;
380}
381
382static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) 362static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
383{ 363{
384 static char start_string[] = "~~~~<\r"; 364 static char start_string[] = "~~~~<\r";
@@ -412,7 +392,9 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
412 goto err_req_fw_failed; 392 goto err_req_fw_failed;
413 } 393 }
414 394
415 p54_parse_firmware(dev, fw_entry); 395 err = p54_parse_firmware(dev, fw_entry);
396 if (err)
397 goto err_upload_failed;
416 398
417 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size); 399 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size);
418 strcpy(buf, start_string); 400 strcpy(buf, start_string);
@@ -458,7 +440,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
458 440
459 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); 441 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size);
460 if (err) { 442 if (err) {
461 printk(KERN_ERR "prism54usb: firmware upload failed!\n"); 443 printk(KERN_ERR "p54usb: firmware upload failed!\n");
462 goto err_upload_failed; 444 goto err_upload_failed;
463 } 445 }
464 446
@@ -469,7 +451,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
469 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size)); 451 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size));
470 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); 452 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
471 if (err) { 453 if (err) {
472 printk(KERN_ERR "prism54usb: firmware upload failed!\n"); 454 printk(KERN_ERR "p54usb: firmware upload failed!\n");
473 goto err_upload_failed; 455 goto err_upload_failed;
474 } 456 }
475 457
@@ -480,13 +462,13 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
480 break; 462 break;
481 463
482 if (alen > 5 && !memcmp(buf, "ERROR", 5)) { 464 if (alen > 5 && !memcmp(buf, "ERROR", 5)) {
483 printk(KERN_INFO "prism54usb: firmware upload failed!\n"); 465 printk(KERN_INFO "p54usb: firmware upload failed!\n");
484 err = -EINVAL; 466 err = -EINVAL;
485 break; 467 break;
486 } 468 }
487 469
488 if (time_after(jiffies, timeout)) { 470 if (time_after(jiffies, timeout)) {
489 printk(KERN_ERR "prism54usb: firmware boot timed out!\n"); 471 printk(KERN_ERR "p54usb: firmware boot timed out!\n");
490 err = -ETIMEDOUT; 472 err = -ETIMEDOUT;
491 break; 473 break;
492 } 474 }
@@ -498,7 +480,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
498 buf[1] = '\r'; 480 buf[1] = '\r';
499 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); 481 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2);
500 if (err) { 482 if (err) {
501 printk(KERN_ERR "prism54usb: firmware boot failed!\n"); 483 printk(KERN_ERR "p54usb: firmware boot failed!\n");
502 goto err_upload_failed; 484 goto err_upload_failed;
503 } 485 }
504 486
@@ -549,7 +531,12 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
549 return err; 531 return err;
550 } 532 }
551 533
552 p54_parse_firmware(dev, fw_entry); 534 err = p54_parse_firmware(dev, fw_entry);
535 if (err) {
536 kfree(buf);
537 release_firmware(fw_entry);
538 return err;
539 }
553 540
554#define P54U_WRITE(type, addr, data) \ 541#define P54U_WRITE(type, addr, data) \
555 do {\ 542 do {\
@@ -660,7 +647,7 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
660 647
661 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); 648 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len);
662 if (err) { 649 if (err) {
663 printk(KERN_ERR "prism54usb: firmware block upload " 650 printk(KERN_ERR "p54usb: firmware block upload "
664 "failed\n"); 651 "failed\n");
665 goto fail; 652 goto fail;
666 } 653 }
@@ -694,7 +681,7 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
694 0x002C | (unsigned long)&devreg->direct_mem_win); 681 0x002C | (unsigned long)&devreg->direct_mem_win);
695 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || 682 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) ||
696 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { 683 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) {
697 printk(KERN_ERR "prism54usb: firmware DMA transfer " 684 printk(KERN_ERR "p54usb: firmware DMA transfer "
698 "failed\n"); 685 "failed\n");
699 goto fail; 686 goto fail;
700 } 687 }
@@ -802,7 +789,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
802 789
803 dev = p54_init_common(sizeof(*priv)); 790 dev = p54_init_common(sizeof(*priv));
804 if (!dev) { 791 if (!dev) {
805 printk(KERN_ERR "prism54usb: ieee80211 alloc failed\n"); 792 printk(KERN_ERR "p54usb: ieee80211 alloc failed\n");
806 return -ENOMEM; 793 return -ENOMEM;
807 } 794 }
808 795
@@ -833,49 +820,40 @@ static int __devinit p54u_probe(struct usb_interface *intf,
833 } 820 }
834 } 821 }
835 priv->common.open = p54u_open; 822 priv->common.open = p54u_open;
836 823 priv->common.stop = p54u_stop;
837 if (recognized_pipes < P54U_PIPE_NUMBER) { 824 if (recognized_pipes < P54U_PIPE_NUMBER) {
838 priv->hw_type = P54U_3887; 825 priv->hw_type = P54U_3887;
839 priv->common.tx = p54u_tx_3887; 826 err = p54u_upload_firmware_3887(dev);
827 if (priv->common.fw_interface == FW_LM87) {
828 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
829 priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr);
830 priv->common.tx = p54u_tx_lm87;
831 } else
832 priv->common.tx = p54u_tx_3887;
840 } else { 833 } else {
834 priv->hw_type = P54U_NET2280;
841 dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); 835 dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr);
842 priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); 836 priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr);
843 priv->common.tx = p54u_tx_net2280; 837 priv->common.tx = p54u_tx_net2280;
844 }
845 priv->common.stop = p54u_stop;
846
847 if (priv->hw_type)
848 err = p54u_upload_firmware_3887(dev);
849 else
850 err = p54u_upload_firmware_net2280(dev); 838 err = p54u_upload_firmware_net2280(dev);
839 }
851 if (err) 840 if (err)
852 goto err_free_dev; 841 goto err_free_dev;
853 842
854 err = p54u_read_eeprom(dev); 843 skb_queue_head_init(&priv->rx_queue);
844
845 p54u_open(dev);
846 err = p54_read_eeprom(dev);
847 p54u_stop(dev);
855 if (err) 848 if (err)
856 goto err_free_dev; 849 goto err_free_dev;
857 850
858 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
859 u8 perm_addr[ETH_ALEN];
860
861 printk(KERN_WARNING "prism54usb: Invalid hwaddr! Using randomly generated MAC addr\n");
862 random_ether_addr(perm_addr);
863 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
864 }
865
866 skb_queue_head_init(&priv->rx_queue);
867
868 err = ieee80211_register_hw(dev); 851 err = ieee80211_register_hw(dev);
869 if (err) { 852 if (err) {
870 printk(KERN_ERR "prism54usb: Cannot register netdevice\n"); 853 printk(KERN_ERR "p54usb: Cannot register netdevice\n");
871 goto err_free_dev; 854 goto err_free_dev;
872 } 855 }
873 856
874 printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
875 wiphy_name(dev->wiphy),
876 print_mac(mac, dev->wiphy->perm_addr),
877 priv->common.version);
878
879 return 0; 857 return 0;
880 858
881 err_free_dev: 859 err_free_dev:
@@ -902,7 +880,7 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
902} 880}
903 881
904static struct usb_driver p54u_driver = { 882static struct usb_driver p54u_driver = {
905 .name = "prism54usb", 883 .name = "p54usb",
906 .id_table = p54u_table, 884 .id_table = p54u_table,
907 .probe = p54u_probe, 885 .probe = p54u_probe,
908 .disconnect = p54u_disconnect, 886 .disconnect = p54u_disconnect,
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d1896b396c1c..5b8fe91379c3 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54USB_H 1#ifndef P54USB_H
2#define PRISM54USB_H 2#define P54USB_H
3 3
4/* 4/*
5 * Defines for USB based mac80211 Prism54 driver 5 * Defines for USB based mac80211 Prism54 driver
@@ -72,6 +72,11 @@ struct net2280_tx_hdr {
72 u8 padding[8]; 72 u8 padding[8];
73} __attribute__((packed)); 73} __attribute__((packed));
74 74
75struct lm87_tx_hdr {
76 __le32 device_addr;
77 __le32 chksum;
78} __attribute__((packed));
79
75/* Some flags for the isl hardware registers controlling DMA inside the 80/* Some flags for the isl hardware registers controlling DMA inside the
76 * chip */ 81 * chip */
77#define ISL38XX_DMA_STATUS_DONE 0x00000001 82#define ISL38XX_DMA_STATUS_DONE 0x00000001
@@ -130,4 +135,4 @@ struct p54u_priv {
130 struct sk_buff_head rx_queue; 135 struct sk_buff_head rx_queue;
131}; 136};
132 137
133#endif /* PRISM54USB_H */ 138#endif /* P54USB_H */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 3d75a7137d3c..16e68f4b654a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -71,7 +71,7 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
71 if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) { 71 if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) {
72 printk(KERN_DEBUG 72 printk(KERN_DEBUG
73 "%s(): Sorry, Repeater mode and Secondary mode " 73 "%s(): Sorry, Repeater mode and Secondary mode "
74 "are not yet supported by this driver.\n", __FUNCTION__); 74 "are not yet supported by this driver.\n", __func__);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
77 77
@@ -333,7 +333,7 @@ prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
333 if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) { 333 if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) {
334 printk(KERN_DEBUG 334 printk(KERN_DEBUG
335 "%s: %s() You passed a non-valid init_mode.\n", 335 "%s: %s() You passed a non-valid init_mode.\n",
336 priv->ndev->name, __FUNCTION__); 336 priv->ndev->name, __func__);
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
@@ -1234,7 +1234,7 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1234 /* don't know how to disable radio */ 1234 /* don't know how to disable radio */
1235 printk(KERN_DEBUG 1235 printk(KERN_DEBUG
1236 "%s: %s() disabling radio is not yet supported.\n", 1236 "%s: %s() disabling radio is not yet supported.\n",
1237 priv->ndev->name, __FUNCTION__); 1237 priv->ndev->name, __func__);
1238 return -ENOTSUPP; 1238 return -ENOTSUPP;
1239 } else if (vwrq->fixed) 1239 } else if (vwrq->fixed)
1240 /* currently only fixed value is supported */ 1240 /* currently only fixed value is supported */
@@ -1242,7 +1242,7 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1242 else { 1242 else {
1243 printk(KERN_DEBUG 1243 printk(KERN_DEBUG
1244 "%s: %s() auto power will be implemented later.\n", 1244 "%s: %s() auto power will be implemented later.\n",
1245 priv->ndev->name, __FUNCTION__); 1245 priv->ndev->name, __func__);
1246 return -ENOTSUPP; 1246 return -ENOTSUPP;
1247 } 1247 }
1248} 1248}
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index d485a86bba75..b686dc45483e 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -33,14 +33,22 @@ config RT2X00_LIB_FIRMWARE
33 depends on RT2X00_LIB 33 depends on RT2X00_LIB
34 select FW_LOADER 34 select FW_LOADER
35 35
36config RT2X00_LIB_CRYPTO
37 boolean
38 depends on RT2X00_LIB
39
36config RT2X00_LIB_RFKILL 40config RT2X00_LIB_RFKILL
37 boolean 41 boolean
38 depends on RT2X00_LIB 42 depends on RT2X00_LIB
39 select RFKILL 43 depends on RFKILL
44 default y
40 45
41config RT2X00_LIB_LEDS 46config RT2X00_LIB_LEDS
42 boolean 47 boolean
43 depends on RT2X00_LIB && NEW_LEDS 48 depends on RT2X00_LIB
49 depends on NEW_LEDS
50 depends on LEDS_CLASS
51 default y
44 52
45config RT2400PCI 53config RT2400PCI
46 tristate "Ralink rt2400 (PCI/PCMCIA) support" 54 tristate "Ralink rt2400 (PCI/PCMCIA) support"
@@ -53,23 +61,6 @@ config RT2400PCI
53 61
54 When compiled as a module, this driver will be called "rt2400pci.ko". 62 When compiled as a module, this driver will be called "rt2400pci.ko".
55 63
56config RT2400PCI_RFKILL
57 bool "Ralink rt2400 rfkill support"
58 depends on RT2400PCI
59 select RT2X00_LIB_RFKILL
60 ---help---
61 This adds support for integrated rt2400 hardware that features a
62 hardware button to control the radio state.
63 This feature depends on the RF switch subsystem rfkill.
64
65config RT2400PCI_LEDS
66 bool "Ralink rt2400 leds support"
67 depends on RT2400PCI && NEW_LEDS
68 select LEDS_CLASS
69 select RT2X00_LIB_LEDS
70 ---help---
71 This adds support for led triggers provided my mac80211.
72
73config RT2500PCI 64config RT2500PCI
74 tristate "Ralink rt2500 (PCI/PCMCIA) support" 65 tristate "Ralink rt2500 (PCI/PCMCIA) support"
75 depends on PCI 66 depends on PCI
@@ -81,28 +72,12 @@ config RT2500PCI
81 72
82 When compiled as a module, this driver will be called "rt2500pci.ko". 73 When compiled as a module, this driver will be called "rt2500pci.ko".
83 74
84config RT2500PCI_RFKILL
85 bool "Ralink rt2500 rfkill support"
86 depends on RT2500PCI
87 select RT2X00_LIB_RFKILL
88 ---help---
89 This adds support for integrated rt2500 hardware that features a
90 hardware button to control the radio state.
91 This feature depends on the RF switch subsystem rfkill.
92
93config RT2500PCI_LEDS
94 bool "Ralink rt2500 leds support"
95 depends on RT2500PCI && NEW_LEDS
96 select LEDS_CLASS
97 select RT2X00_LIB_LEDS
98 ---help---
99 This adds support for led triggers provided my mac80211.
100
101config RT61PCI 75config RT61PCI
102 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 76 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
103 depends on PCI 77 depends on PCI
104 select RT2X00_LIB_PCI 78 select RT2X00_LIB_PCI
105 select RT2X00_LIB_FIRMWARE 79 select RT2X00_LIB_FIRMWARE
80 select RT2X00_LIB_CRYPTO
106 select CRC_ITU_T 81 select CRC_ITU_T
107 select EEPROM_93CX6 82 select EEPROM_93CX6
108 ---help--- 83 ---help---
@@ -111,23 +86,6 @@ config RT61PCI
111 86
112 When compiled as a module, this driver will be called "rt61pci.ko". 87 When compiled as a module, this driver will be called "rt61pci.ko".
113 88
114config RT61PCI_RFKILL
115 bool "Ralink rt2501/rt61 rfkill support"
116 depends on RT61PCI
117 select RT2X00_LIB_RFKILL
118 ---help---
119 This adds support for integrated rt61 hardware that features a
120 hardware button to control the radio state.
121 This feature depends on the RF switch subsystem rfkill.
122
123config RT61PCI_LEDS
124 bool "Ralink rt2501/rt61 leds support"
125 depends on RT61PCI && NEW_LEDS
126 select LEDS_CLASS
127 select RT2X00_LIB_LEDS
128 ---help---
129 This adds support for led triggers provided my mac80211.
130
131config RT2500USB 89config RT2500USB
132 tristate "Ralink rt2500 (USB) support" 90 tristate "Ralink rt2500 (USB) support"
133 depends on USB 91 depends on USB
@@ -138,19 +96,12 @@ config RT2500USB
138 96
139 When compiled as a module, this driver will be called "rt2500usb.ko". 97 When compiled as a module, this driver will be called "rt2500usb.ko".
140 98
141config RT2500USB_LEDS
142 bool "Ralink rt2500 leds support"
143 depends on RT2500USB && NEW_LEDS
144 select LEDS_CLASS
145 select RT2X00_LIB_LEDS
146 ---help---
147 This adds support for led triggers provided my mac80211.
148
149config RT73USB 99config RT73USB
150 tristate "Ralink rt2501/rt73 (USB) support" 100 tristate "Ralink rt2501/rt73 (USB) support"
151 depends on USB 101 depends on USB
152 select RT2X00_LIB_USB 102 select RT2X00_LIB_USB
153 select RT2X00_LIB_FIRMWARE 103 select RT2X00_LIB_FIRMWARE
104 select RT2X00_LIB_CRYPTO
154 select CRC_ITU_T 105 select CRC_ITU_T
155 ---help--- 106 ---help---
156 This adds support for rt2501 wireless chipset family. 107 This adds support for rt2501 wireless chipset family.
@@ -158,14 +109,6 @@ config RT73USB
158 109
159 When compiled as a module, this driver will be called "rt73usb.ko". 110 When compiled as a module, this driver will be called "rt73usb.ko".
160 111
161config RT73USB_LEDS
162 bool "Ralink rt2501/rt73 leds support"
163 depends on RT73USB && NEW_LEDS
164 select LEDS_CLASS
165 select RT2X00_LIB_LEDS
166 ---help---
167 This adds support for led triggers provided my mac80211.
168
169config RT2X00_LIB_DEBUGFS 112config RT2X00_LIB_DEBUGFS
170 bool "Ralink debugfs support" 113 bool "Ralink debugfs support"
171 depends on RT2X00_LIB && MAC80211_DEBUGFS 114 depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 1087dbcf1a04..917cb4f3b038 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -3,6 +3,7 @@ rt2x00lib-y += rt2x00mac.o
3rt2x00lib-y += rt2x00config.o 3rt2x00lib-y += rt2x00config.o
4rt2x00lib-y += rt2x00queue.o 4rt2x00lib-y += rt2x00queue.o
5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o 5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o 7rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o
7rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o 8rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
8rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o 9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4c0538d6099b..08cb9eec16a6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -231,7 +231,7 @@ static const struct rt2x00debug rt2400pci_rt2x00debug = {
231}; 231};
232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
233 233
234#ifdef CONFIG_RT2400PCI_RFKILL 234#ifdef CONFIG_RT2X00_LIB_RFKILL
235static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 235static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
236{ 236{
237 u32 reg; 237 u32 reg;
@@ -241,9 +241,9 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
241} 241}
242#else 242#else
243#define rt2400pci_rfkill_poll NULL 243#define rt2400pci_rfkill_poll NULL
244#endif /* CONFIG_RT2400PCI_RFKILL */ 244#endif /* CONFIG_RT2X00_LIB_RFKILL */
245 245
246#ifdef CONFIG_RT2400PCI_LEDS 246#ifdef CONFIG_RT2X00_LIB_LEDS
247static void rt2400pci_brightness_set(struct led_classdev *led_cdev, 247static void rt2400pci_brightness_set(struct led_classdev *led_cdev,
248 enum led_brightness brightness) 248 enum led_brightness brightness)
249{ 249{
@@ -288,7 +288,7 @@ static void rt2400pci_init_led(struct rt2x00_dev *rt2x00dev,
288 led->led_dev.blink_set = rt2400pci_blink_set; 288 led->led_dev.blink_set = rt2400pci_blink_set;
289 led->flags = LED_INITIALIZED; 289 led->flags = LED_INITIALIZED;
290} 290}
291#endif /* CONFIG_RT2400PCI_LEDS */ 291#endif /* CONFIG_RT2X00_LIB_LEDS */
292 292
293/* 293/*
294 * Configuration handlers. 294 * Configuration handlers.
@@ -1241,7 +1241,7 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1241 if (!reg) 1241 if (!reg)
1242 return IRQ_NONE; 1242 return IRQ_NONE;
1243 1243
1244 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 1244 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1245 return IRQ_HANDLED; 1245 return IRQ_HANDLED;
1246 1246
1247 /* 1247 /*
@@ -1374,22 +1374,22 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1374 /* 1374 /*
1375 * Store led mode, for correct led behaviour. 1375 * Store led mode, for correct led behaviour.
1376 */ 1376 */
1377#ifdef CONFIG_RT2400PCI_LEDS 1377#ifdef CONFIG_RT2X00_LIB_LEDS
1378 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1378 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1379 1379
1380 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1380 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1381 if (value == LED_MODE_TXRX_ACTIVITY) 1381 if (value == LED_MODE_TXRX_ACTIVITY)
1382 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1382 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1383 LED_TYPE_ACTIVITY); 1383 LED_TYPE_ACTIVITY);
1384#endif /* CONFIG_RT2400PCI_LEDS */ 1384#endif /* CONFIG_RT2X00_LIB_LEDS */
1385 1385
1386 /* 1386 /*
1387 * Detect if this device has an hardware controlled radio. 1387 * Detect if this device has an hardware controlled radio.
1388 */ 1388 */
1389#ifdef CONFIG_RT2400PCI_RFKILL 1389#ifdef CONFIG_RT2X00_LIB_RFKILL
1390 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1390 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1391 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1391 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1392#endif /* CONFIG_RT2400PCI_RFKILL */ 1392#endif /* CONFIG_RT2X00_LIB_RFKILL */
1393 1393
1394 /* 1394 /*
1395 * Check if the BBP tuning should be enabled. 1395 * Check if the BBP tuning should be enabled.
@@ -1404,7 +1404,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1404 * RF value list for RF2420 & RF2421 1404 * RF value list for RF2420 & RF2421
1405 * Supports: 2.4 GHz 1405 * Supports: 2.4 GHz
1406 */ 1406 */
1407static const struct rf_channel rf_vals_bg[] = { 1407static const struct rf_channel rf_vals_b[] = {
1408 { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, 1408 { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 },
1409 { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, 1409 { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 },
1410 { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, 1410 { 3, 0x00022058, 0x000c2002, 0x00000101, 0 },
@@ -1421,10 +1421,11 @@ static const struct rf_channel rf_vals_bg[] = {
1421 { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, 1421 { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 },
1422}; 1422};
1423 1423
1424static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1424static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1425{ 1425{
1426 struct hw_mode_spec *spec = &rt2x00dev->spec; 1426 struct hw_mode_spec *spec = &rt2x00dev->spec;
1427 u8 *txpower; 1427 struct channel_info *info;
1428 char *tx_power;
1428 unsigned int i; 1429 unsigned int i;
1429 1430
1430 /* 1431 /*
@@ -1440,23 +1441,28 @@ static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1440 EEPROM_MAC_ADDR_0)); 1441 EEPROM_MAC_ADDR_0));
1441 1442
1442 /* 1443 /*
1443 * Convert tx_power array in eeprom.
1444 */
1445 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1446 for (i = 0; i < 14; i++)
1447 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1448
1449 /*
1450 * Initialize hw_mode information. 1444 * Initialize hw_mode information.
1451 */ 1445 */
1452 spec->supported_bands = SUPPORT_BAND_2GHZ; 1446 spec->supported_bands = SUPPORT_BAND_2GHZ;
1453 spec->supported_rates = SUPPORT_RATE_CCK; 1447 spec->supported_rates = SUPPORT_RATE_CCK;
1454 spec->tx_power_a = NULL;
1455 spec->tx_power_bg = txpower;
1456 spec->tx_power_default = DEFAULT_TXPOWER;
1457 1448
1458 spec->num_channels = ARRAY_SIZE(rf_vals_bg); 1449 spec->num_channels = ARRAY_SIZE(rf_vals_b);
1459 spec->channels = rf_vals_bg; 1450 spec->channels = rf_vals_b;
1451
1452 /*
1453 * Create channel information array
1454 */
1455 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1456 if (!info)
1457 return -ENOMEM;
1458
1459 spec->channels_info = info;
1460
1461 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1462 for (i = 0; i < 14; i++)
1463 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1464
1465 return 0;
1460} 1466}
1461 1467
1462static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1468static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1477,7 +1483,9 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1477 /* 1483 /*
1478 * Initialize hw specifications. 1484 * Initialize hw specifications.
1479 */ 1485 */
1480 rt2400pci_probe_hw_mode(rt2x00dev); 1486 retval = rt2400pci_probe_hw_mode(rt2x00dev);
1487 if (retval)
1488 return retval;
1481 1489
1482 /* 1490 /*
1483 * This device requires the atim queue and DMA-mapped skbs. 1491 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index bc5564258228..bbff381ce396 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -938,19 +938,13 @@
938#define MAX_TXPOWER 62 938#define MAX_TXPOWER 62
939#define DEFAULT_TXPOWER 39 939#define DEFAULT_TXPOWER 39
940 940
941#define TXPOWER_FROM_DEV(__txpower) \ 941#define __CLAMP_TX(__txpower) \
942({ \ 942 clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
943 ((__txpower) > MAX_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ 943
944 ((__txpower) < MIN_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ 944#define TXPOWER_FROM_DEV(__txpower) \
945 (((__txpower) - MAX_TXPOWER) + MIN_TXPOWER); \ 945 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
946}) 946
947 947#define TXPOWER_TO_DEV(__txpower) \
948#define TXPOWER_TO_DEV(__txpower) \ 948 MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER)
949({ \
950 (__txpower) += MIN_TXPOWER; \
951 ((__txpower) <= MIN_TXPOWER) ? MAX_TXPOWER : \
952 (((__txpower) >= MAX_TXPOWER) ? MIN_TXPOWER : \
953 (MAX_TXPOWER - ((__txpower) - MIN_TXPOWER))); \
954})
955 949
956#endif /* RT2400PCI_H */ 950#endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 181a146b4768..ef42cc04a2d7 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -231,7 +231,7 @@ static const struct rt2x00debug rt2500pci_rt2x00debug = {
231}; 231};
232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
233 233
234#ifdef CONFIG_RT2500PCI_RFKILL 234#ifdef CONFIG_RT2X00_LIB_RFKILL
235static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 235static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
236{ 236{
237 u32 reg; 237 u32 reg;
@@ -241,9 +241,9 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
241} 241}
242#else 242#else
243#define rt2500pci_rfkill_poll NULL 243#define rt2500pci_rfkill_poll NULL
244#endif /* CONFIG_RT2500PCI_RFKILL */ 244#endif /* CONFIG_RT2X00_LIB_RFKILL */
245 245
246#ifdef CONFIG_RT2500PCI_LEDS 246#ifdef CONFIG_RT2X00_LIB_LEDS
247static void rt2500pci_brightness_set(struct led_classdev *led_cdev, 247static void rt2500pci_brightness_set(struct led_classdev *led_cdev,
248 enum led_brightness brightness) 248 enum led_brightness brightness)
249{ 249{
@@ -288,7 +288,7 @@ static void rt2500pci_init_led(struct rt2x00_dev *rt2x00dev,
288 led->led_dev.blink_set = rt2500pci_blink_set; 288 led->led_dev.blink_set = rt2500pci_blink_set;
289 led->flags = LED_INITIALIZED; 289 led->flags = LED_INITIALIZED;
290} 290}
291#endif /* CONFIG_RT2500PCI_LEDS */ 291#endif /* CONFIG_RT2X00_LIB_LEDS */
292 292
293/* 293/*
294 * Configuration handlers. 294 * Configuration handlers.
@@ -1316,6 +1316,8 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1316 1316
1317 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1317 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1318 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1318 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1319 else
1320 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1319 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1321 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1320 rxdesc->dev_flags |= RXDONE_MY_BSS; 1322 rxdesc->dev_flags |= RXDONE_MY_BSS;
1321} 1323}
@@ -1377,7 +1379,7 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1377 if (!reg) 1379 if (!reg)
1378 return IRQ_NONE; 1380 return IRQ_NONE;
1379 1381
1380 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 1382 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1381 return IRQ_HANDLED; 1383 return IRQ_HANDLED;
1382 1384
1383 /* 1385 /*
@@ -1531,22 +1533,22 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1531 /* 1533 /*
1532 * Store led mode, for correct led behaviour. 1534 * Store led mode, for correct led behaviour.
1533 */ 1535 */
1534#ifdef CONFIG_RT2500PCI_LEDS 1536#ifdef CONFIG_RT2X00_LIB_LEDS
1535 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1537 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1536 1538
1537 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1539 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1538 if (value == LED_MODE_TXRX_ACTIVITY) 1540 if (value == LED_MODE_TXRX_ACTIVITY)
1539 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1541 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1540 LED_TYPE_ACTIVITY); 1542 LED_TYPE_ACTIVITY);
1541#endif /* CONFIG_RT2500PCI_LEDS */ 1543#endif /* CONFIG_RT2X00_LIB_LEDS */
1542 1544
1543 /* 1545 /*
1544 * Detect if this device has an hardware controlled radio. 1546 * Detect if this device has an hardware controlled radio.
1545 */ 1547 */
1546#ifdef CONFIG_RT2500PCI_RFKILL 1548#ifdef CONFIG_RT2X00_LIB_RFKILL
1547 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1549 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1548 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1550 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1549#endif /* CONFIG_RT2500PCI_RFKILL */ 1551#endif /* CONFIG_RT2X00_LIB_RFKILL */
1550 1552
1551 /* 1553 /*
1552 * Check if the BBP tuning should be enabled. 1554 * Check if the BBP tuning should be enabled.
@@ -1721,10 +1723,11 @@ static const struct rf_channel rf_vals_5222[] = {
1721 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, 1723 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
1722}; 1724};
1723 1725
1724static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1726static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1725{ 1727{
1726 struct hw_mode_spec *spec = &rt2x00dev->spec; 1728 struct hw_mode_spec *spec = &rt2x00dev->spec;
1727 u8 *txpower; 1729 struct channel_info *info;
1730 char *tx_power;
1728 unsigned int i; 1731 unsigned int i;
1729 1732
1730 /* 1733 /*
@@ -1741,20 +1744,10 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1741 EEPROM_MAC_ADDR_0)); 1744 EEPROM_MAC_ADDR_0));
1742 1745
1743 /* 1746 /*
1744 * Convert tx_power array in eeprom.
1745 */
1746 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1747 for (i = 0; i < 14; i++)
1748 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1749
1750 /*
1751 * Initialize hw_mode information. 1747 * Initialize hw_mode information.
1752 */ 1748 */
1753 spec->supported_bands = SUPPORT_BAND_2GHZ; 1749 spec->supported_bands = SUPPORT_BAND_2GHZ;
1754 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1750 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1755 spec->tx_power_a = NULL;
1756 spec->tx_power_bg = txpower;
1757 spec->tx_power_default = DEFAULT_TXPOWER;
1758 1751
1759 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1752 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
1760 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1753 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1776,6 +1769,26 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1776 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1769 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1777 spec->channels = rf_vals_5222; 1770 spec->channels = rf_vals_5222;
1778 } 1771 }
1772
1773 /*
1774 * Create channel information array
1775 */
1776 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1777 if (!info)
1778 return -ENOMEM;
1779
1780 spec->channels_info = info;
1781
1782 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1783 for (i = 0; i < 14; i++)
1784 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1785
1786 if (spec->num_channels > 14) {
1787 for (i = 14; i < spec->num_channels; i++)
1788 info[i].tx_power1 = DEFAULT_TXPOWER;
1789 }
1790
1791 return 0;
1779} 1792}
1780 1793
1781static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1794static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1796,7 +1809,9 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1796 /* 1809 /*
1797 * Initialize hw specifications. 1810 * Initialize hw specifications.
1798 */ 1811 */
1799 rt2500pci_probe_hw_mode(rt2x00dev); 1812 retval = rt2500pci_probe_hw_mode(rt2x00dev);
1813 if (retval)
1814 return retval;
1800 1815
1801 /* 1816 /*
1802 * This device requires the atim queue and DMA-mapped skbs. 1817 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 42f376929ea9..8c26bef6cf49 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1223,17 +1223,10 @@
1223#define MAX_TXPOWER 31 1223#define MAX_TXPOWER 31
1224#define DEFAULT_TXPOWER 24 1224#define DEFAULT_TXPOWER 24
1225 1225
1226#define TXPOWER_FROM_DEV(__txpower) \ 1226#define TXPOWER_FROM_DEV(__txpower) \
1227({ \ 1227 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1228 ((__txpower) > MAX_TXPOWER) ? \ 1228
1229 DEFAULT_TXPOWER : (__txpower); \ 1229#define TXPOWER_TO_DEV(__txpower) \
1230}) 1230 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1231
1232#define TXPOWER_TO_DEV(__txpower) \
1233({ \
1234 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1235 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1236 (__txpower)); \
1237})
1238 1231
1239#endif /* RT2500PCI_H */ 1232#endif /* RT2500PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index cd5af656932d..d3bf7bba611a 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -288,7 +288,7 @@ static const struct rt2x00debug rt2500usb_rt2x00debug = {
288}; 288};
289#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 289#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
290 290
291#ifdef CONFIG_RT2500USB_LEDS 291#ifdef CONFIG_RT2X00_LIB_LEDS
292static void rt2500usb_brightness_set(struct led_classdev *led_cdev, 292static void rt2500usb_brightness_set(struct led_classdev *led_cdev,
293 enum led_brightness brightness) 293 enum led_brightness brightness)
294{ 294{
@@ -333,7 +333,7 @@ static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev,
333 led->led_dev.blink_set = rt2500usb_blink_set; 333 led->led_dev.blink_set = rt2500usb_blink_set;
334 led->flags = LED_INITIALIZED; 334 led->flags = LED_INITIALIZED;
335} 335}
336#endif /* CONFIG_RT2500USB_LEDS */ 336#endif /* CONFIG_RT2X00_LIB_LEDS */
337 337
338/* 338/*
339 * Configuration handlers. 339 * Configuration handlers.
@@ -384,7 +384,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
384 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); 384 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg);
385 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); 385 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6);
386 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 386 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW,
387 2 * (conf->type != IEEE80211_IF_TYPE_STA)); 387 2 * (conf->type != NL80211_IFTYPE_STATION));
388 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); 388 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg);
389 389
390 /* 390 /*
@@ -1114,8 +1114,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1114 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1114 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1115 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1115 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1116 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1116 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1117 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, 1117 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1118 skb->len - skbdesc->desc_len);
1119 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1118 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
1120 rt2x00_desc_write(txd, 0, word); 1119 rt2x00_desc_write(txd, 0, word);
1121} 1120}
@@ -1134,7 +1133,6 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1134 int pipe = usb_sndbulkpipe(usb_dev, 1); 1133 int pipe = usb_sndbulkpipe(usb_dev, 1);
1135 int length; 1134 int length;
1136 u16 reg; 1135 u16 reg;
1137 u32 word, len;
1138 1136
1139 /* 1137 /*
1140 * Add the descriptor in front of the skb. 1138 * Add the descriptor in front of the skb.
@@ -1144,17 +1142,6 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1144 skbdesc->desc = entry->skb->data; 1142 skbdesc->desc = entry->skb->data;
1145 1143
1146 /* 1144 /*
1147 * Adjust the beacon databyte count. The current number is
1148 * calculated before this function gets called, but falsely
1149 * assumes that the descriptor was already present in the SKB.
1150 */
1151 rt2x00_desc_read(skbdesc->desc, 0, &word);
1152 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1153 len += skbdesc->desc_len;
1154 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1155 rt2x00_desc_write(skbdesc->desc, 0, word);
1156
1157 /*
1158 * Disable beaconing while we are reloading the beacon data, 1145 * Disable beaconing while we are reloading the beacon data,
1159 * otherwise we might be sending out invalid data. 1146 * otherwise we might be sending out invalid data.
1160 */ 1147 */
@@ -1280,6 +1267,8 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1280 1267
1281 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1268 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1282 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1269 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1270 else
1271 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1283 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1272 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1284 rxdesc->dev_flags |= RXDONE_MY_BSS; 1273 rxdesc->dev_flags |= RXDONE_MY_BSS;
1285 1274
@@ -1297,7 +1286,7 @@ static void rt2500usb_beacondone(struct urb *urb)
1297 struct queue_entry *entry = (struct queue_entry *)urb->context; 1286 struct queue_entry *entry = (struct queue_entry *)urb->context;
1298 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 1287 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1299 1288
1300 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) 1289 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
1301 return; 1290 return;
1302 1291
1303 /* 1292 /*
@@ -1484,14 +1473,14 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1484 /* 1473 /*
1485 * Store led mode, for correct led behaviour. 1474 * Store led mode, for correct led behaviour.
1486 */ 1475 */
1487#ifdef CONFIG_RT2500USB_LEDS 1476#ifdef CONFIG_RT2X00_LIB_LEDS
1488 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1477 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1489 1478
1490 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1479 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1491 if (value == LED_MODE_TXRX_ACTIVITY) 1480 if (value == LED_MODE_TXRX_ACTIVITY)
1492 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, 1481 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual,
1493 LED_TYPE_ACTIVITY); 1482 LED_TYPE_ACTIVITY);
1494#endif /* CONFIG_RT2500USB_LEDS */ 1483#endif /* CONFIG_RT2X00_LIB_LEDS */
1495 1484
1496 /* 1485 /*
1497 * Check if the BBP tuning should be disabled. 1486 * Check if the BBP tuning should be disabled.
@@ -1665,10 +1654,11 @@ static const struct rf_channel rf_vals_5222[] = {
1665 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, 1654 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
1666}; 1655};
1667 1656
1668static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1657static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1669{ 1658{
1670 struct hw_mode_spec *spec = &rt2x00dev->spec; 1659 struct hw_mode_spec *spec = &rt2x00dev->spec;
1671 u8 *txpower; 1660 struct channel_info *info;
1661 char *tx_power;
1672 unsigned int i; 1662 unsigned int i;
1673 1663
1674 /* 1664 /*
@@ -1687,20 +1677,10 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1687 EEPROM_MAC_ADDR_0)); 1677 EEPROM_MAC_ADDR_0));
1688 1678
1689 /* 1679 /*
1690 * Convert tx_power array in eeprom.
1691 */
1692 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1693 for (i = 0; i < 14; i++)
1694 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1695
1696 /*
1697 * Initialize hw_mode information. 1680 * Initialize hw_mode information.
1698 */ 1681 */
1699 spec->supported_bands = SUPPORT_BAND_2GHZ; 1682 spec->supported_bands = SUPPORT_BAND_2GHZ;
1700 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1683 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1701 spec->tx_power_a = NULL;
1702 spec->tx_power_bg = txpower;
1703 spec->tx_power_default = DEFAULT_TXPOWER;
1704 1684
1705 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1685 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
1706 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1686 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1722,6 +1702,26 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1722 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1702 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1723 spec->channels = rf_vals_5222; 1703 spec->channels = rf_vals_5222;
1724 } 1704 }
1705
1706 /*
1707 * Create channel information array
1708 */
1709 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1710 if (!info)
1711 return -ENOMEM;
1712
1713 spec->channels_info = info;
1714
1715 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1716 for (i = 0; i < 14; i++)
1717 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1718
1719 if (spec->num_channels > 14) {
1720 for (i = 14; i < spec->num_channels; i++)
1721 info[i].tx_power1 = DEFAULT_TXPOWER;
1722 }
1723
1724 return 0;
1725} 1725}
1726 1726
1727static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) 1727static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1742,7 +1742,9 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1742 /* 1742 /*
1743 * Initialize hw specifications. 1743 * Initialize hw specifications.
1744 */ 1744 */
1745 rt2500usb_probe_hw_mode(rt2x00dev); 1745 retval = rt2500usb_probe_hw_mode(rt2x00dev);
1746 if (retval)
1747 return retval;
1746 1748
1747 /* 1749 /*
1748 * This device requires the atim queue 1750 * This device requires the atim queue
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 4769ffeb4cc6..89e5ed24e4f7 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -825,17 +825,10 @@
825#define MAX_TXPOWER 31 825#define MAX_TXPOWER 31
826#define DEFAULT_TXPOWER 24 826#define DEFAULT_TXPOWER 24
827 827
828#define TXPOWER_FROM_DEV(__txpower) \ 828#define TXPOWER_FROM_DEV(__txpower) \
829({ \ 829 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
830 ((__txpower) > MAX_TXPOWER) ? \ 830
831 DEFAULT_TXPOWER : (__txpower); \ 831#define TXPOWER_TO_DEV(__txpower) \
832}) 832 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
833
834#define TXPOWER_TO_DEV(__txpower) \
835({ \
836 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
837 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
838 (__txpower)); \
839})
840 833
841#endif /* RT2500USB_H */ 834#endif /* RT2500USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8b10ea41b204..1359a3768404 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
44/* 44/*
45 * Module information. 45 * Module information.
46 */ 46 */
47#define DRV_VERSION "2.1.8" 47#define DRV_VERSION "2.2.1"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 49
50/* 50/*
@@ -53,11 +53,11 @@
53 */ 53 */
54#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \ 54#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \
55 printk(__kernlvl "%s -> %s: %s - " __msg, \ 55 printk(__kernlvl "%s -> %s: %s - " __msg, \
56 wiphy_name((__dev)->hw->wiphy), __FUNCTION__, __lvl, ##__args) 56 wiphy_name((__dev)->hw->wiphy), __func__, __lvl, ##__args)
57 57
58#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \ 58#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \
59 printk(__kernlvl "%s -> %s: %s - " __msg, \ 59 printk(__kernlvl "%s -> %s: %s - " __msg, \
60 KBUILD_MODNAME, __FUNCTION__, __lvl, ##__args) 60 KBUILD_MODNAME, __func__, __lvl, ##__args)
61 61
62#ifdef CONFIG_RT2X00_DEBUG 62#ifdef CONFIG_RT2X00_DEBUG
63#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ 63#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
@@ -144,6 +144,17 @@ struct rf_channel {
144}; 144};
145 145
146/* 146/*
147 * Channel information structure
148 */
149struct channel_info {
150 unsigned int flags;
151#define GEOGRAPHY_ALLOWED 0x00000001
152
153 short tx_power1;
154 short tx_power2;
155};
156
157/*
147 * Antenna setup values. 158 * Antenna setup values.
148 */ 159 */
149struct antenna_setup { 160struct antenna_setup {
@@ -394,10 +405,7 @@ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
394 * @num_channels: Number of supported channels. This is used as array size 405 * @num_channels: Number of supported channels. This is used as array size
395 * for @tx_power_a, @tx_power_bg and @channels. 406 * for @tx_power_a, @tx_power_bg and @channels.
396 * @channels: Device/chipset specific channel values (See &struct rf_channel). 407 * @channels: Device/chipset specific channel values (See &struct rf_channel).
397 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL). 408 * @channels_info: Additional information for channels (See &struct channel_info).
398 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
399 * @tx_power_default: Default TX power value to use when either
400 * @tx_power_a or @tx_power_bg is missing.
401 */ 409 */
402struct hw_mode_spec { 410struct hw_mode_spec {
403 unsigned int supported_bands; 411 unsigned int supported_bands;
@@ -410,10 +418,7 @@ struct hw_mode_spec {
410 418
411 unsigned int num_channels; 419 unsigned int num_channels;
412 const struct rf_channel *channels; 420 const struct rf_channel *channels;
413 421 const struct channel_info *channels_info;
414 const u8 *tx_power_a;
415 const u8 *tx_power_bg;
416 u8 tx_power_default;
417}; 422};
418 423
419/* 424/*
@@ -425,7 +430,9 @@ struct hw_mode_spec {
425 */ 430 */
426struct rt2x00lib_conf { 431struct rt2x00lib_conf {
427 struct ieee80211_conf *conf; 432 struct ieee80211_conf *conf;
433
428 struct rf_channel rf; 434 struct rf_channel rf;
435 struct channel_info channel;
429 436
430 struct antenna_setup ant; 437 struct antenna_setup ant;
431 438
@@ -452,6 +459,23 @@ struct rt2x00lib_erp {
452}; 459};
453 460
454/* 461/*
462 * Configuration structure for hardware encryption.
463 */
464struct rt2x00lib_crypto {
465 enum cipher cipher;
466
467 enum set_key_cmd cmd;
468 const u8 *address;
469
470 u32 bssidx;
471 u32 aid;
472
473 u8 key[16];
474 u8 tx_mic[8];
475 u8 rx_mic[8];
476};
477
478/*
455 * Configuration structure wrapper around the 479 * Configuration structure wrapper around the
456 * rt2x00 interface configuration handler. 480 * rt2x00 interface configuration handler.
457 */ 481 */
@@ -459,7 +483,7 @@ struct rt2x00intf_conf {
459 /* 483 /*
460 * Interface type 484 * Interface type
461 */ 485 */
462 enum ieee80211_if_types type; 486 enum nl80211_iftype type;
463 487
464 /* 488 /*
465 * TSF sync value, this is dependant on the operation type. 489 * TSF sync value, this is dependant on the operation type.
@@ -547,6 +571,12 @@ struct rt2x00lib_ops {
547 /* 571 /*
548 * Configuration handlers. 572 * Configuration handlers.
549 */ 573 */
574 int (*config_shared_key) (struct rt2x00_dev *rt2x00dev,
575 struct rt2x00lib_crypto *crypto,
576 struct ieee80211_key_conf *key);
577 int (*config_pairwise_key) (struct rt2x00_dev *rt2x00dev,
578 struct rt2x00lib_crypto *crypto,
579 struct ieee80211_key_conf *key);
550 void (*config_filter) (struct rt2x00_dev *rt2x00dev, 580 void (*config_filter) (struct rt2x00_dev *rt2x00dev,
551 const unsigned int filter_flags); 581 const unsigned int filter_flags);
552 void (*config_intf) (struct rt2x00_dev *rt2x00dev, 582 void (*config_intf) (struct rt2x00_dev *rt2x00dev,
@@ -599,17 +629,16 @@ enum rt2x00_flags {
599 /* 629 /*
600 * Device state flags 630 * Device state flags
601 */ 631 */
602 DEVICE_PRESENT, 632 DEVICE_STATE_PRESENT,
603 DEVICE_REGISTERED_HW, 633 DEVICE_STATE_REGISTERED_HW,
604 DEVICE_INITIALIZED, 634 DEVICE_STATE_INITIALIZED,
605 DEVICE_STARTED, 635 DEVICE_STATE_STARTED,
606 DEVICE_STARTED_SUSPEND, 636 DEVICE_STATE_STARTED_SUSPEND,
607 DEVICE_ENABLED_RADIO, 637 DEVICE_STATE_ENABLED_RADIO,
608 DEVICE_DISABLED_RADIO_HW, 638 DEVICE_STATE_DISABLED_RADIO_HW,
609 DEVICE_DIRTY_CONFIG,
610 639
611 /* 640 /*
612 * Driver features 641 * Driver requirements
613 */ 642 */
614 DRIVER_REQUIRE_FIRMWARE, 643 DRIVER_REQUIRE_FIRMWARE,
615 DRIVER_REQUIRE_BEACON_GUARD, 644 DRIVER_REQUIRE_BEACON_GUARD,
@@ -618,9 +647,14 @@ enum rt2x00_flags {
618 DRIVER_REQUIRE_DMA, 647 DRIVER_REQUIRE_DMA,
619 648
620 /* 649 /*
621 * Driver configuration 650 * Driver features
622 */ 651 */
623 CONFIG_SUPPORT_HW_BUTTON, 652 CONFIG_SUPPORT_HW_BUTTON,
653 CONFIG_SUPPORT_HW_CRYPTO,
654
655 /*
656 * Driver configuration
657 */
624 CONFIG_FRAME_TYPE, 658 CONFIG_FRAME_TYPE,
625 CONFIG_RF_SEQUENCE, 659 CONFIG_RF_SEQUENCE,
626 CONFIG_EXTERNAL_LNA_A, 660 CONFIG_EXTERNAL_LNA_A,
@@ -769,6 +803,11 @@ struct rt2x00_dev {
769 u32 *rf; 803 u32 *rf;
770 804
771 /* 805 /*
806 * LNA gain
807 */
808 short lna_gain;
809
810 /*
772 * USB Max frame size (for rt2500usb & rt73usb). 811 * USB Max frame size (for rt2500usb & rt73usb).
773 */ 812 */
774 u16 usb_maxpacket; 813 u16 usb_maxpacket;
@@ -966,6 +1005,13 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
966 unsigned int changed_flags, 1005 unsigned int changed_flags,
967 unsigned int *total_flags, 1006 unsigned int *total_flags,
968 int mc_count, struct dev_addr_list *mc_list); 1007 int mc_count, struct dev_addr_list *mc_list);
1008#ifdef CONFIG_RT2X00_LIB_CRYPTO
1009int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1010 const u8 *local_address, const u8 *address,
1011 struct ieee80211_key_conf *key);
1012#else
1013#define rt2x00mac_set_key NULL
1014#endif /* CONFIG_RT2X00_LIB_CRYPTO */
969int rt2x00mac_get_stats(struct ieee80211_hw *hw, 1015int rt2x00mac_get_stats(struct ieee80211_hw *hw,
970 struct ieee80211_low_level_stats *stats); 1016 struct ieee80211_low_level_stats *stats);
971int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, 1017int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index d134c3be539a..4d5e87b015a0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -31,7 +31,7 @@
31 31
32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
33 struct rt2x00_intf *intf, 33 struct rt2x00_intf *intf,
34 enum ieee80211_if_types type, 34 enum nl80211_iftype type,
35 u8 *mac, u8 *bssid) 35 u8 *mac, u8 *bssid)
36{ 36{
37 struct rt2x00intf_conf conf; 37 struct rt2x00intf_conf conf;
@@ -40,11 +40,11 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
40 conf.type = type; 40 conf.type = type;
41 41
42 switch (type) { 42 switch (type) {
43 case IEEE80211_IF_TYPE_IBSS: 43 case NL80211_IFTYPE_ADHOC:
44 case IEEE80211_IF_TYPE_AP: 44 case NL80211_IFTYPE_AP:
45 conf.sync = TSF_SYNC_BEACON; 45 conf.sync = TSF_SYNC_BEACON;
46 break; 46 break;
47 case IEEE80211_IF_TYPE_STA: 47 case NL80211_IFTYPE_STATION:
48 conf.sync = TSF_SYNC_INFRA; 48 conf.sync = TSF_SYNC_INFRA;
49 break; 49 break;
50 default: 50 default:
@@ -121,7 +121,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
121 * Antenna setup changes require the RX to be disabled, 121 * Antenna setup changes require the RX to be disabled,
122 * else the changes will be ignored by the device. 122 * else the changes will be ignored by the device.
123 */ 123 */
124 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 124 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
125 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 125 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
126 126
127 /* 127 /*
@@ -136,7 +136,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
136 rt2x00dev->link.ant.active.rx = libconf.ant.rx; 136 rt2x00dev->link.ant.active.rx = libconf.ant.rx;
137 rt2x00dev->link.ant.active.tx = libconf.ant.tx; 137 rt2x00dev->link.ant.active.tx = libconf.ant.tx;
138 138
139 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 139 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
140 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 140 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
141} 141}
142 142
@@ -245,6 +245,10 @@ config:
245 memcpy(&libconf.rf, 245 memcpy(&libconf.rf,
246 &rt2x00dev->spec.channels[conf->channel->hw_value], 246 &rt2x00dev->spec.channels[conf->channel->hw_value],
247 sizeof(libconf.rf)); 247 sizeof(libconf.rf));
248
249 memcpy(&libconf.channel,
250 &rt2x00dev->spec.channels_info[conf->channel->hw_value],
251 sizeof(libconf.channel));
248 } 252 }
249 253
250 if (flags & CONFIG_UPDATE_ANTENNA) { 254 if (flags & CONFIG_UPDATE_ANTENNA) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
new file mode 100644
index 000000000000..e1448cfa9444
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -0,0 +1,215 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 crypto specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
33{
34 switch (key->alg) {
35 case ALG_WEP:
36 if (key->keylen == LEN_WEP40)
37 return CIPHER_WEP64;
38 else
39 return CIPHER_WEP128;
40 case ALG_TKIP:
41 return CIPHER_TKIP;
42 case ALG_CCMP:
43 return CIPHER_AES;
44 default:
45 return CIPHER_NONE;
46 }
47}
48
49unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
50{
51 struct ieee80211_key_conf *key = tx_info->control.hw_key;
52 unsigned int overhead = 0;
53
54 /*
55 * Extend frame length to include IV/EIV/ICV/MMIC,
56 * note that these lengths should only be added when
57 * mac80211 does not generate it.
58 */
59 overhead += tx_info->control.icv_len;
60
61 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
62 overhead += tx_info->control.iv_len;
63
64 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
65 if (key->alg == ALG_TKIP)
66 overhead += 8;
67 }
68
69 return overhead;
70}
71
72void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
73{
74 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
75 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
76
77 if (unlikely(!iv_len))
78 return;
79
80 /* Copy IV/EIV data */
81 if (iv_len >= 4)
82 memcpy(&skbdesc->iv, skb->data + header_length, 4);
83 if (iv_len >= 8)
84 memcpy(&skbdesc->eiv, skb->data + header_length + 4, 4);
85
86 /* Move ieee80211 header */
87 memmove(skb->data + iv_len, skb->data, header_length);
88
89 /* Pull buffer to correct size */
90 skb_pull(skb, iv_len);
91
92 /* IV/EIV data has officially be stripped */
93 skbdesc->flags |= FRAME_DESC_IV_STRIPPED;
94}
95
96void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
97{
98 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
99 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
100 const unsigned int iv_len =
101 ((!!(skbdesc->iv)) * 4) + ((!!(skbdesc->eiv)) * 4);
102
103 if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
104 return;
105
106 skb_push(skb, iv_len);
107
108 /* Move ieee80211 header */
109 memmove(skb->data, skb->data + iv_len, header_length);
110
111 /* Copy IV/EIV data */
112 if (iv_len >= 4)
113 memcpy(skb->data + header_length, &skbdesc->iv, 4);
114 if (iv_len >= 8)
115 memcpy(skb->data + header_length + 4, &skbdesc->eiv, 4);
116
117 /* IV/EIV data has returned into the frame */
118 skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
119}
120
121void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
122 unsigned int header_length,
123 struct rxdone_entry_desc *rxdesc)
124{
125 unsigned int payload_len = rxdesc->size - header_length;
126 unsigned int iv_len;
127 unsigned int icv_len;
128 unsigned int transfer = 0;
129
130 /*
131 * WEP64/WEP128: Provides IV & ICV
132 * TKIP: Provides IV/EIV & ICV
133 * AES: Provies IV/EIV & ICV
134 */
135 switch (rxdesc->cipher) {
136 case CIPHER_WEP64:
137 case CIPHER_WEP128:
138 iv_len = 4;
139 icv_len = 4;
140 break;
141 case CIPHER_TKIP:
142 iv_len = 8;
143 icv_len = 4;
144 break;
145 case CIPHER_AES:
146 iv_len = 8;
147 icv_len = 8;
148 break;
149 default:
150 /* Unsupport type */
151 return;
152 }
153
154 /*
155 * Make room for new data, note that we increase both
156 * headsize and tailsize when required. The tailsize is
157 * only needed when ICV data needs to be inserted and
158 * the padding is smaller then the ICV data.
159 * When alignment requirements is greater then the
160 * ICV data we must trim the skb to the correct size
161 * because we need to remove the extra bytes.
162 */
163 skb_push(skb, iv_len + align);
164 if (align < icv_len)
165 skb_put(skb, icv_len - align);
166 else if (align > icv_len)
167 skb_trim(skb, rxdesc->size + iv_len + icv_len);
168
169 /* Move ieee80211 header */
170 memmove(skb->data + transfer,
171 skb->data + transfer + iv_len + align,
172 header_length);
173 transfer += header_length;
174
175 /* Copy IV data */
176 if (iv_len >= 4) {
177 memcpy(skb->data + transfer, &rxdesc->iv, 4);
178 transfer += 4;
179 }
180
181 /* Copy EIV data */
182 if (iv_len >= 8) {
183 memcpy(skb->data + transfer, &rxdesc->eiv, 4);
184 transfer += 4;
185 }
186
187 /* Move payload */
188 if (align) {
189 memmove(skb->data + transfer,
190 skb->data + transfer + align,
191 payload_len);
192 }
193
194 /*
195 * NOTE: Always count the payload as transfered,
196 * even when alignment was set to zero. This is required
197 * for determining the correct offset for the ICV data.
198 */
199 transfer += payload_len;
200
201 /* Copy ICV data */
202 if (icv_len >= 4) {
203 memcpy(skb->data + transfer, &rxdesc->icv, 4);
204 /*
205 * AES appends 8 bytes, we can't fill the upper
206 * 4 bytes, but mac80211 doesn't care about what
207 * we provide here anyway and strips it immediately.
208 */
209 transfer += icv_len;
210 }
211
212 /* IV/EIV/ICV has been inserted into frame */
213 rxdesc->size = transfer;
214 rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
215}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 6bee1d611bbf..5cf4c859e39d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -35,6 +35,13 @@
35 35
36#define MAX_LINE_LENGTH 64 36#define MAX_LINE_LENGTH 64
37 37
38struct rt2x00debug_crypto {
39 unsigned long success;
40 unsigned long icv_error;
41 unsigned long mic_error;
42 unsigned long key_error;
43};
44
38struct rt2x00debug_intf { 45struct rt2x00debug_intf {
39 /* 46 /*
40 * Pointer to driver structure where 47 * Pointer to driver structure where
@@ -63,6 +70,7 @@ struct rt2x00debug_intf {
63 * - queue folder 70 * - queue folder
64 * - frame dump file 71 * - frame dump file
65 * - queue stats file 72 * - queue stats file
73 * - crypto stats file
66 */ 74 */
67 struct dentry *driver_folder; 75 struct dentry *driver_folder;
68 struct dentry *driver_entry; 76 struct dentry *driver_entry;
@@ -80,6 +88,7 @@ struct rt2x00debug_intf {
80 struct dentry *queue_folder; 88 struct dentry *queue_folder;
81 struct dentry *queue_frame_dump_entry; 89 struct dentry *queue_frame_dump_entry;
82 struct dentry *queue_stats_entry; 90 struct dentry *queue_stats_entry;
91 struct dentry *crypto_stats_entry;
83 92
84 /* 93 /*
85 * The frame dump file only allows a single reader, 94 * The frame dump file only allows a single reader,
@@ -98,6 +107,12 @@ struct rt2x00debug_intf {
98 wait_queue_head_t frame_dump_waitqueue; 107 wait_queue_head_t frame_dump_waitqueue;
99 108
100 /* 109 /*
110 * HW crypto statistics.
111 * All statistics are stored seperately per cipher type.
112 */
113 struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
114
115 /*
101 * Driver and chipset files will use a data buffer 116 * Driver and chipset files will use a data buffer
102 * that has been created in advance. This will simplify 117 * that has been created in advance. This will simplify
103 * the code since we can use the debugfs functions. 118 * the code since we can use the debugfs functions.
@@ -114,6 +129,25 @@ struct rt2x00debug_intf {
114 unsigned int offset_rf; 129 unsigned int offset_rf;
115}; 130};
116 131
132void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
133 enum cipher cipher, enum rx_crypto status)
134{
135 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
136
137 if (cipher == CIPHER_TKIP_NO_MIC)
138 cipher = CIPHER_TKIP;
139 if (cipher == CIPHER_NONE || cipher > CIPHER_MAX)
140 return;
141
142 /* Remove CIPHER_NONE index */
143 cipher--;
144
145 intf->crypto_stats[cipher].success += (status == RX_CRYPTO_SUCCESS);
146 intf->crypto_stats[cipher].icv_error += (status == RX_CRYPTO_FAIL_ICV);
147 intf->crypto_stats[cipher].mic_error += (status == RX_CRYPTO_FAIL_MIC);
148 intf->crypto_stats[cipher].key_error += (status == RX_CRYPTO_FAIL_KEY);
149}
150
117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 151void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
118 enum rt2x00_dump_type type, struct sk_buff *skb) 152 enum rt2x00_dump_type type, struct sk_buff *skb)
119{ 153{
@@ -327,6 +361,59 @@ static const struct file_operations rt2x00debug_fop_queue_stats = {
327 .release = rt2x00debug_file_release, 361 .release = rt2x00debug_file_release,
328}; 362};
329 363
364#ifdef CONFIG_RT2X00_LIB_CRYPTO
365static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
366 char __user *buf,
367 size_t length,
368 loff_t *offset)
369{
370 struct rt2x00debug_intf *intf = file->private_data;
371 char *name[] = { "WEP64", "WEP128", "TKIP", "AES" };
372 char *data;
373 char *temp;
374 size_t size;
375 unsigned int i;
376
377 if (*offset)
378 return 0;
379
380 data = kzalloc((1 + CIPHER_MAX)* MAX_LINE_LENGTH, GFP_KERNEL);
381 if (!data)
382 return -ENOMEM;
383
384 temp = data;
385 temp += sprintf(data, "cipher\tsuccess\ticv err\tmic err\tkey err\n");
386
387 for (i = 0; i < CIPHER_MAX; i++) {
388 temp += sprintf(temp, "%s\t%lu\t%lu\t%lu\t%lu\n", name[i],
389 intf->crypto_stats[i].success,
390 intf->crypto_stats[i].icv_error,
391 intf->crypto_stats[i].mic_error,
392 intf->crypto_stats[i].key_error);
393 }
394
395 size = strlen(data);
396 size = min(size, length);
397
398 if (copy_to_user(buf, data, size)) {
399 kfree(data);
400 return -EFAULT;
401 }
402
403 kfree(data);
404
405 *offset += size;
406 return size;
407}
408
409static const struct file_operations rt2x00debug_fop_crypto_stats = {
410 .owner = THIS_MODULE,
411 .read = rt2x00debug_read_crypto_stats,
412 .open = rt2x00debug_file_open,
413 .release = rt2x00debug_file_release,
414};
415#endif
416
330#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ 417#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \
331static ssize_t rt2x00debug_read_##__name(struct file *file, \ 418static ssize_t rt2x00debug_read_##__name(struct file *file, \
332 char __user *buf, \ 419 char __user *buf, \
@@ -569,6 +656,13 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
569 debugfs_create_file("queue", S_IRUSR, intf->queue_folder, 656 debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
570 intf, &rt2x00debug_fop_queue_stats); 657 intf, &rt2x00debug_fop_queue_stats);
571 658
659#ifdef CONFIG_RT2X00_LIB_CRYPTO
660 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
661 intf->crypto_stats_entry =
662 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
663 intf, &rt2x00debug_fop_crypto_stats);
664#endif
665
572 return; 666 return;
573 667
574exit: 668exit:
@@ -587,6 +681,9 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
587 681
588 skb_queue_purge(&intf->frame_dump_skbqueue); 682 skb_queue_purge(&intf->frame_dump_skbqueue);
589 683
684#ifdef CONFIG_RT2X00_LIB_CRYPTO
685 debugfs_remove(intf->crypto_stats_entry);
686#endif
590 debugfs_remove(intf->queue_stats_entry); 687 debugfs_remove(intf->queue_stats_entry);
591 debugfs_remove(intf->queue_frame_dump_entry); 688 debugfs_remove(intf->queue_frame_dump_entry);
592 debugfs_remove(intf->queue_folder); 689 debugfs_remove(intf->queue_folder);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index f42283ad7b02..86840e3585e8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -34,7 +34,7 @@
34 */ 34 */
35void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) 35void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
36{ 36{
37 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 37 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
38 return; 38 return;
39 39
40 /* 40 /*
@@ -94,8 +94,8 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
94 * Don't enable the radio twice. 94 * Don't enable the radio twice.
95 * And check if the hardware button has been disabled. 95 * And check if the hardware button has been disabled.
96 */ 96 */
97 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 97 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
98 test_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags)) 98 test_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags))
99 return 0; 99 return 0;
100 100
101 /* 101 /*
@@ -117,7 +117,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
117 rt2x00leds_led_radio(rt2x00dev, true); 117 rt2x00leds_led_radio(rt2x00dev, true);
118 rt2x00led_led_activity(rt2x00dev, true); 118 rt2x00led_led_activity(rt2x00dev, true);
119 119
120 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); 120 set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
121 121
122 /* 122 /*
123 * Enable RX. 123 * Enable RX.
@@ -134,7 +134,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
134 134
135void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) 135void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
136{ 136{
137 if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 137 if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
138 return; 138 return;
139 139
140 /* 140 /*
@@ -354,7 +354,7 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
354 * When the radio is shutting down we should 354 * When the radio is shutting down we should
355 * immediately cease all link tuning. 355 * immediately cease all link tuning.
356 */ 356 */
357 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 357 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
358 return; 358 return;
359 359
360 /* 360 /*
@@ -431,7 +431,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
431 * note that in the spinlock protected area above the delayed_flags 431 * note that in the spinlock protected area above the delayed_flags
432 * have been cleared correctly. 432 * have been cleared correctly.
433 */ 433 */
434 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 434 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
435 return; 435 return;
436 436
437 if (delayed_flags & DELAYED_UPDATE_BEACON) 437 if (delayed_flags & DELAYED_UPDATE_BEACON)
@@ -467,8 +467,8 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
467 struct rt2x00_dev *rt2x00dev = data; 467 struct rt2x00_dev *rt2x00dev = data;
468 struct rt2x00_intf *intf = vif_to_intf(vif); 468 struct rt2x00_intf *intf = vif_to_intf(vif);
469 469
470 if (vif->type != IEEE80211_IF_TYPE_AP && 470 if (vif->type != NL80211_IFTYPE_AP &&
471 vif->type != IEEE80211_IF_TYPE_IBSS) 471 vif->type != NL80211_IFTYPE_ADHOC)
472 return; 472 return;
473 473
474 /* 474 /*
@@ -484,7 +484,7 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
484 484
485void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 485void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
486{ 486{
487 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 487 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
488 return; 488 return;
489 489
490 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 490 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
@@ -508,6 +508,15 @@ void rt2x00lib_txdone(struct queue_entry *entry,
508 rt2x00queue_unmap_skb(rt2x00dev, entry->skb); 508 rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
509 509
510 /* 510 /*
511 * If the IV/EIV data was stripped from the frame before it was
512 * passed to the hardware, we should now reinsert it again because
513 * mac80211 will expect the the same data to be present it the
514 * frame as it was passed to us.
515 */
516 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
517 rt2x00crypto_tx_insert_iv(entry->skb);
518
519 /*
511 * Send frame to debugfs immediately, after this call is completed 520 * Send frame to debugfs immediately, after this call is completed
512 * we are going to overwrite the skb->cb array. 521 * we are going to overwrite the skb->cb array.
513 */ 522 */
@@ -563,7 +572,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
563 572
564 rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry); 573 rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry);
565 574
566 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 575 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
567 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 576 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
568 577
569 /* 578 /*
@@ -585,7 +594,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
585 struct ieee80211_supported_band *sband; 594 struct ieee80211_supported_band *sband;
586 struct ieee80211_hdr *hdr; 595 struct ieee80211_hdr *hdr;
587 const struct rt2x00_rate *rate; 596 const struct rt2x00_rate *rate;
588 unsigned int header_size; 597 unsigned int header_length;
589 unsigned int align; 598 unsigned int align;
590 unsigned int i; 599 unsigned int i;
591 int idx = -1; 600 int idx = -1;
@@ -613,10 +622,19 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
613 * The data behind the ieee80211 header must be 622 * The data behind the ieee80211 header must be
614 * aligned on a 4 byte boundary. 623 * aligned on a 4 byte boundary.
615 */ 624 */
616 header_size = ieee80211_get_hdrlen_from_skb(entry->skb); 625 header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
617 align = ((unsigned long)(entry->skb->data + header_size)) & 3; 626 align = ((unsigned long)(entry->skb->data + header_length)) & 3;
618 627
619 if (align) { 628 /*
629 * Hardware might have stripped the IV/EIV/ICV data,
630 * in that case it is possible that the data was
631 * provided seperately (through hardware descriptor)
632 * in which case we should reinsert the data into the frame.
633 */
634 if ((rxdesc.flags & RX_FLAG_IV_STRIPPED)) {
635 rt2x00crypto_rx_insert_iv(entry->skb, align,
636 header_length, &rxdesc);
637 } else if (align) {
620 skb_push(entry->skb, align); 638 skb_push(entry->skb, align);
621 /* Move entire frame in 1 command */ 639 /* Move entire frame in 1 command */
622 memmove(entry->skb->data, entry->skb->data + align, 640 memmove(entry->skb->data, entry->skb->data + align,
@@ -635,7 +653,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
635 653
636 if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) && 654 if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
637 (rate->plcp == rxdesc.signal)) || 655 (rate->plcp == rxdesc.signal)) ||
638 (!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) && 656 ((rxdesc.dev_flags & RXDONE_SIGNAL_BITRATE) &&
639 (rate->bitrate == rxdesc.signal))) { 657 (rate->bitrate == rxdesc.signal))) {
640 idx = i; 658 idx = i;
641 break; 659 break;
@@ -657,6 +675,10 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
657 (rxdesc.dev_flags & RXDONE_MY_BSS)) 675 (rxdesc.dev_flags & RXDONE_MY_BSS))
658 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi); 676 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi);
659 677
678 rt2x00debug_update_crypto(rt2x00dev,
679 rxdesc.cipher,
680 rxdesc.cipher_status);
681
660 rt2x00dev->link.qual.rx_success++; 682 rt2x00dev->link.qual.rx_success++;
661 683
662 rx_status->mactime = rxdesc.timestamp; 684 rx_status->mactime = rxdesc.timestamp;
@@ -796,7 +818,6 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
796 struct ieee80211_rate *rates; 818 struct ieee80211_rate *rates;
797 unsigned int num_rates; 819 unsigned int num_rates;
798 unsigned int i; 820 unsigned int i;
799 unsigned char tx_power;
800 821
801 num_rates = 0; 822 num_rates = 0;
802 if (spec->supported_rates & SUPPORT_RATE_CCK) 823 if (spec->supported_rates & SUPPORT_RATE_CCK)
@@ -822,20 +843,9 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
822 * Initialize Channel list. 843 * Initialize Channel list.
823 */ 844 */
824 for (i = 0; i < spec->num_channels; i++) { 845 for (i = 0; i < spec->num_channels; i++) {
825 if (spec->channels[i].channel <= 14) {
826 if (spec->tx_power_bg)
827 tx_power = spec->tx_power_bg[i];
828 else
829 tx_power = spec->tx_power_default;
830 } else {
831 if (spec->tx_power_a)
832 tx_power = spec->tx_power_a[i];
833 else
834 tx_power = spec->tx_power_default;
835 }
836
837 rt2x00lib_channel(&channels[i], 846 rt2x00lib_channel(&channels[i],
838 spec->channels[i].channel, tx_power, i); 847 spec->channels[i].channel,
848 spec->channels_info[i].tx_power1, i);
839 } 849 }
840 850
841 /* 851 /*
@@ -878,7 +888,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
878 888
879static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) 889static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
880{ 890{
881 if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags)) 891 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
882 ieee80211_unregister_hw(rt2x00dev->hw); 892 ieee80211_unregister_hw(rt2x00dev->hw);
883 893
884 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) { 894 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
@@ -887,6 +897,8 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
887 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; 897 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
888 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 898 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
889 } 899 }
900
901 kfree(rt2x00dev->spec.channels_info);
890} 902}
891 903
892static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) 904static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -894,6 +906,9 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
894 struct hw_mode_spec *spec = &rt2x00dev->spec; 906 struct hw_mode_spec *spec = &rt2x00dev->spec;
895 int status; 907 int status;
896 908
909 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
910 return 0;
911
897 /* 912 /*
898 * Initialize HW modes. 913 * Initialize HW modes.
899 */ 914 */
@@ -915,7 +930,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
915 return status; 930 return status;
916 } 931 }
917 932
918 __set_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags); 933 set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);
919 934
920 return 0; 935 return 0;
921} 936}
@@ -925,7 +940,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
925 */ 940 */
926static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) 941static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
927{ 942{
928 if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) 943 if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
929 return; 944 return;
930 945
931 /* 946 /*
@@ -948,7 +963,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
948{ 963{
949 int status; 964 int status;
950 965
951 if (test_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) 966 if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
952 return 0; 967 return 0;
953 968
954 /* 969 /*
@@ -967,7 +982,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
967 return status; 982 return status;
968 } 983 }
969 984
970 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 985 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
971 986
972 /* 987 /*
973 * Register the extra components. 988 * Register the extra components.
@@ -981,7 +996,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
981{ 996{
982 int retval; 997 int retval;
983 998
984 if (test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 999 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
985 return 0; 1000 return 0;
986 1001
987 /* 1002 /*
@@ -999,28 +1014,18 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
999 if (retval) 1014 if (retval)
1000 return retval; 1015 return retval;
1001 1016
1002 /*
1003 * Enable radio.
1004 */
1005 retval = rt2x00lib_enable_radio(rt2x00dev);
1006 if (retval) {
1007 rt2x00lib_uninitialize(rt2x00dev);
1008 return retval;
1009 }
1010
1011 rt2x00dev->intf_ap_count = 0; 1017 rt2x00dev->intf_ap_count = 0;
1012 rt2x00dev->intf_sta_count = 0; 1018 rt2x00dev->intf_sta_count = 0;
1013 rt2x00dev->intf_associated = 0; 1019 rt2x00dev->intf_associated = 0;
1014 1020
1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags); 1021 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1016 __set_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
1017 1022
1018 return 0; 1023 return 0;
1019} 1024}
1020 1025
1021void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1026void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1022{ 1027{
1023 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 1028 if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
1024 return; 1029 return;
1025 1030
1026 /* 1031 /*
@@ -1032,8 +1037,6 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1032 rt2x00dev->intf_ap_count = 0; 1037 rt2x00dev->intf_ap_count = 0;
1033 rt2x00dev->intf_sta_count = 0; 1038 rt2x00dev->intf_sta_count = 0;
1034 rt2x00dev->intf_associated = 0; 1039 rt2x00dev->intf_associated = 0;
1035
1036 __clear_bit(DEVICE_STARTED, &rt2x00dev->flags);
1037} 1040}
1038 1041
1039/* 1042/*
@@ -1049,6 +1052,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1049 */ 1052 */
1050 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); 1053 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1051 1054
1055 rt2x00dev->hw->wiphy->interface_modes =
1056 BIT(NL80211_IFTYPE_AP) |
1057 BIT(NL80211_IFTYPE_STATION) |
1058 BIT(NL80211_IFTYPE_ADHOC);
1059
1052 /* 1060 /*
1053 * Let the driver probe the device to detect the capabilities. 1061 * Let the driver probe the device to detect the capabilities.
1054 */ 1062 */
@@ -1088,7 +1096,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1088 rt2x00rfkill_allocate(rt2x00dev); 1096 rt2x00rfkill_allocate(rt2x00dev);
1089 rt2x00debug_register(rt2x00dev); 1097 rt2x00debug_register(rt2x00dev);
1090 1098
1091 __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1099 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1092 1100
1093 return 0; 1101 return 0;
1094 1102
@@ -1101,7 +1109,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev);
1101 1109
1102void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) 1110void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1103{ 1111{
1104 __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1112 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1105 1113
1106 /* 1114 /*
1107 * Disable radio. 1115 * Disable radio.
@@ -1146,14 +1154,15 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1146 int retval; 1154 int retval;
1147 1155
1148 NOTICE(rt2x00dev, "Going to sleep.\n"); 1156 NOTICE(rt2x00dev, "Going to sleep.\n");
1149 __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags);
1150 1157
1151 /* 1158 /*
1152 * Only continue if mac80211 has open interfaces. 1159 * Only continue if mac80211 has open interfaces.
1153 */ 1160 */
1154 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 1161 if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
1162 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
1155 goto exit; 1163 goto exit;
1156 __set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags); 1164
1165 set_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags);
1157 1166
1158 /* 1167 /*
1159 * Disable radio. 1168 * Disable radio.
@@ -1203,8 +1212,8 @@ static void rt2x00lib_resume_intf(void *data, u8 *mac,
1203 /* 1212 /*
1204 * Master or Ad-hoc mode require a new beacon update. 1213 * Master or Ad-hoc mode require a new beacon update.
1205 */ 1214 */
1206 if (vif->type == IEEE80211_IF_TYPE_AP || 1215 if (vif->type == NL80211_IFTYPE_AP ||
1207 vif->type == IEEE80211_IF_TYPE_IBSS) 1216 vif->type == NL80211_IFTYPE_ADHOC)
1208 intf->delayed_flags |= DELAYED_UPDATE_BEACON; 1217 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
1209 1218
1210 spin_unlock(&intf->lock); 1219 spin_unlock(&intf->lock);
@@ -1225,7 +1234,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1225 /* 1234 /*
1226 * Only continue if mac80211 had open interfaces. 1235 * Only continue if mac80211 had open interfaces.
1227 */ 1236 */
1228 if (!__test_and_clear_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags)) 1237 if (!test_and_clear_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags))
1229 return 0; 1238 return 0;
1230 1239
1231 /* 1240 /*
@@ -1252,7 +1261,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1252 /* 1261 /*
1253 * We are ready again to receive requests from mac80211. 1262 * We are ready again to receive requests from mac80211.
1254 */ 1263 */
1255 __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1264 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1256 1265
1257 /* 1266 /*
1258 * It is possible that during that mac80211 has attempted 1267 * It is possible that during that mac80211 has attempted
@@ -1272,7 +1281,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1272 return 0; 1281 return 0;
1273 1282
1274exit: 1283exit:
1275 rt2x00lib_disable_radio(rt2x00dev); 1284 rt2x00lib_stop(rt2x00dev);
1276 rt2x00lib_uninitialize(rt2x00dev); 1285 rt2x00lib_uninitialize(rt2x00dev);
1277 rt2x00debug_deregister(rt2x00dev); 1286 rt2x00debug_deregister(rt2x00dev);
1278 1287
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index c5fb3a72cf37..797eb619aa0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -88,7 +88,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev);
88 */ 88 */
89void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 89void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
90 struct rt2x00_intf *intf, 90 struct rt2x00_intf *intf,
91 enum ieee80211_if_types type, 91 enum nl80211_iftype type,
92 u8 *mac, u8 *bssid); 92 u8 *mac, u8 *bssid);
93void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 93void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
94 struct rt2x00_intf *intf, 94 struct rt2x00_intf *intf,
@@ -181,6 +181,8 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
181void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); 181void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
182void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 182void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
183 enum rt2x00_dump_type type, struct sk_buff *skb); 183 enum rt2x00_dump_type type, struct sk_buff *skb);
184void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
185 enum cipher cipher, enum rx_crypto status);
184#else 186#else
185static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 187static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
186{ 188{
@@ -195,9 +197,54 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
195 struct sk_buff *skb) 197 struct sk_buff *skb)
196{ 198{
197} 199}
200
201static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
202 enum cipher cipher,
203 enum rx_crypto status)
204{
205}
198#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 206#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
199 207
200/* 208/*
209 * Crypto handlers.
210 */
211#ifdef CONFIG_RT2X00_LIB_CRYPTO
212enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
213unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info);
214void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
215void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
216void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
217 unsigned int header_length,
218 struct rxdone_entry_desc *rxdesc);
219#else
220static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
221{
222 return CIPHER_NONE;
223}
224
225static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
226{
227 return 0;
228}
229
230static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
231 unsigned int iv_len)
232{
233}
234
235static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
236{
237}
238
239static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
240 unsigned int align,
241 unsigned int header_length,
242 struct rxdone_entry_desc *rxdesc)
243{
244}
245#endif
246
247/*
201 * RFkill handlers. 248 * RFkill handlers.
202 */ 249 */
203#ifdef CONFIG_RT2X00_LIB_RFKILL 250#ifdef CONFIG_RT2X00_LIB_RFKILL
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index d06507388635..485c40de5cc0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -36,21 +36,22 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb); 36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
37 struct ieee80211_tx_info *rts_info; 37 struct ieee80211_tx_info *rts_info;
38 struct sk_buff *skb; 38 struct sk_buff *skb;
39 int size; 39 unsigned int data_length;
40 int retval = 0;
40 41
41 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 42 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
42 size = sizeof(struct ieee80211_cts); 43 data_length = sizeof(struct ieee80211_cts);
43 else 44 else
44 size = sizeof(struct ieee80211_rts); 45 data_length = sizeof(struct ieee80211_rts);
45 46
46 skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom); 47 skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
47 if (!skb) { 48 if (unlikely(!skb)) {
48 WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); 49 WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n");
49 return NETDEV_TX_BUSY; 50 return -ENOMEM;
50 } 51 }
51 52
52 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); 53 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
53 skb_put(skb, size); 54 skb_put(skb, data_length);
54 55
55 /* 56 /*
56 * Copy TX information over from original frame to 57 * Copy TX information over from original frame to
@@ -63,7 +64,6 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
63 */ 64 */
64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); 65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
65 rts_info = IEEE80211_SKB_CB(skb); 66 rts_info = IEEE80211_SKB_CB(skb);
66 rts_info->control.hw_key = NULL;
67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS; 67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT; 68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS; 69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -73,22 +73,33 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
73 else 73 else
74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
75 75
76 skb->do_not_encrypt = 1;
77
78 /*
79 * RTS/CTS frame should use the length of the frame plus any
80 * encryption overhead that will be added by the hardware.
81 */
82#ifdef CONFIG_RT2X00_LIB_CRYPTO
83 if (!frag_skb->do_not_encrypt)
84 data_length += rt2x00crypto_tx_overhead(tx_info);
85#endif /* CONFIG_RT2X00_LIB_CRYPTO */
86
76 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 87 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
77 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, 88 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
78 frag_skb->data, size, tx_info, 89 frag_skb->data, data_length, tx_info,
79 (struct ieee80211_cts *)(skb->data)); 90 (struct ieee80211_cts *)(skb->data));
80 else 91 else
81 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif, 92 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
82 frag_skb->data, size, tx_info, 93 frag_skb->data, data_length, tx_info,
83 (struct ieee80211_rts *)(skb->data)); 94 (struct ieee80211_rts *)(skb->data));
84 95
85 if (rt2x00queue_write_tx_frame(queue, skb)) { 96 retval = rt2x00queue_write_tx_frame(queue, skb);
97 if (retval) {
86 dev_kfree_skb_any(skb); 98 dev_kfree_skb_any(skb);
87 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 99 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
88 return NETDEV_TX_BUSY;
89 } 100 }
90 101
91 return NETDEV_TX_OK; 102 return retval;
92} 103}
93 104
94int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 105int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -106,11 +117,8 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
106 * Note that we can only stop the TX queues inside the TX path 117 * Note that we can only stop the TX queues inside the TX path
107 * due to possible race conditions in mac80211. 118 * due to possible race conditions in mac80211.
108 */ 119 */
109 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { 120 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
110 ieee80211_stop_queues(hw); 121 goto exit_fail;
111 dev_kfree_skb_any(skb);
112 return NETDEV_TX_OK;
113 }
114 122
115 /* 123 /*
116 * Determine which queue to put packet on. 124 * Determine which queue to put packet on.
@@ -141,26 +149,25 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
141 if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS | 149 if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS |
142 IEEE80211_TX_CTL_USE_CTS_PROTECT)) && 150 IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
143 !rt2x00dev->ops->hw->set_rts_threshold) { 151 !rt2x00dev->ops->hw->set_rts_threshold) {
144 if (rt2x00queue_available(queue) <= 1) { 152 if (rt2x00queue_available(queue) <= 1)
145 ieee80211_stop_queue(rt2x00dev->hw, qid); 153 goto exit_fail;
146 return NETDEV_TX_BUSY;
147 }
148
149 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
150 ieee80211_stop_queue(rt2x00dev->hw, qid);
151 return NETDEV_TX_BUSY;
152 }
153 }
154 154
155 if (rt2x00queue_write_tx_frame(queue, skb)) { 155 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
156 ieee80211_stop_queue(rt2x00dev->hw, qid); 156 goto exit_fail;
157 return NETDEV_TX_BUSY;
158 } 157 }
159 158
159 if (rt2x00queue_write_tx_frame(queue, skb))
160 goto exit_fail;
161
160 if (rt2x00queue_threshold(queue)) 162 if (rt2x00queue_threshold(queue))
161 ieee80211_stop_queue(rt2x00dev->hw, qid); 163 ieee80211_stop_queue(rt2x00dev->hw, qid);
162 164
163 return NETDEV_TX_OK; 165 return NETDEV_TX_OK;
166
167 exit_fail:
168 ieee80211_stop_queue(rt2x00dev->hw, qid);
169 dev_kfree_skb_any(skb);
170 return NETDEV_TX_OK;
164} 171}
165EXPORT_SYMBOL_GPL(rt2x00mac_tx); 172EXPORT_SYMBOL_GPL(rt2x00mac_tx);
166 173
@@ -168,7 +175,7 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
168{ 175{
169 struct rt2x00_dev *rt2x00dev = hw->priv; 176 struct rt2x00_dev *rt2x00dev = hw->priv;
170 177
171 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 178 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
172 return 0; 179 return 0;
173 180
174 return rt2x00lib_start(rt2x00dev); 181 return rt2x00lib_start(rt2x00dev);
@@ -179,7 +186,7 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
179{ 186{
180 struct rt2x00_dev *rt2x00dev = hw->priv; 187 struct rt2x00_dev *rt2x00dev = hw->priv;
181 188
182 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 189 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
183 return; 190 return;
184 191
185 rt2x00lib_stop(rt2x00dev); 192 rt2x00lib_stop(rt2x00dev);
@@ -199,12 +206,12 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
199 * Don't allow interfaces to be added 206 * Don't allow interfaces to be added
200 * the device has disappeared. 207 * the device has disappeared.
201 */ 208 */
202 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 209 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 210 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 211 return -ENODEV;
205 212
206 switch (conf->type) { 213 switch (conf->type) {
207 case IEEE80211_IF_TYPE_AP: 214 case NL80211_IFTYPE_AP:
208 /* 215 /*
209 * We don't support mixed combinations of 216 * We don't support mixed combinations of
210 * sta and ap interfaces. 217 * sta and ap interfaces.
@@ -220,8 +227,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
220 return -ENOBUFS; 227 return -ENOBUFS;
221 228
222 break; 229 break;
223 case IEEE80211_IF_TYPE_STA: 230 case NL80211_IFTYPE_STATION:
224 case IEEE80211_IF_TYPE_IBSS: 231 case NL80211_IFTYPE_ADHOC:
225 /* 232 /*
226 * We don't support mixed combinations of 233 * We don't support mixed combinations of
227 * sta and ap interfaces. 234 * sta and ap interfaces.
@@ -249,7 +256,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
249 */ 256 */
250 for (i = 0; i < queue->limit; i++) { 257 for (i = 0; i < queue->limit; i++) {
251 entry = &queue->entries[i]; 258 entry = &queue->entries[i];
252 if (!__test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags)) 259 if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
253 break; 260 break;
254 } 261 }
255 262
@@ -261,7 +268,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
261 * increase interface count and start initialization. 268 * increase interface count and start initialization.
262 */ 269 */
263 270
264 if (conf->type == IEEE80211_IF_TYPE_AP) 271 if (conf->type == NL80211_IFTYPE_AP)
265 rt2x00dev->intf_ap_count++; 272 rt2x00dev->intf_ap_count++;
266 else 273 else
267 rt2x00dev->intf_sta_count++; 274 rt2x00dev->intf_sta_count++;
@@ -270,7 +277,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
270 spin_lock_init(&intf->seqlock); 277 spin_lock_init(&intf->seqlock);
271 intf->beacon = entry; 278 intf->beacon = entry;
272 279
273 if (conf->type == IEEE80211_IF_TYPE_AP) 280 if (conf->type == NL80211_IFTYPE_AP)
274 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 281 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
275 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 282 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
276 283
@@ -303,12 +310,12 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
303 * either the device has disappeared or when 310 * either the device has disappeared or when
304 * no interface is present. 311 * no interface is present.
305 */ 312 */
306 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 313 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
307 (conf->type == IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_ap_count) || 314 (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
308 (conf->type != IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_sta_count)) 315 (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
309 return; 316 return;
310 317
311 if (conf->type == IEEE80211_IF_TYPE_AP) 318 if (conf->type == NL80211_IFTYPE_AP)
312 rt2x00dev->intf_ap_count--; 319 rt2x00dev->intf_ap_count--;
313 else 320 else
314 rt2x00dev->intf_sta_count--; 321 rt2x00dev->intf_sta_count--;
@@ -317,59 +324,59 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
317 * Release beacon entry so it is available for 324 * Release beacon entry so it is available for
318 * new interfaces again. 325 * new interfaces again.
319 */ 326 */
320 __clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags); 327 clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
321 328
322 /* 329 /*
323 * Make sure the bssid and mac address registers 330 * Make sure the bssid and mac address registers
324 * are cleared to prevent false ACKing of frames. 331 * are cleared to prevent false ACKing of frames.
325 */ 332 */
326 rt2x00lib_config_intf(rt2x00dev, intf, 333 rt2x00lib_config_intf(rt2x00dev, intf,
327 IEEE80211_IF_TYPE_INVALID, NULL, NULL); 334 NL80211_IFTYPE_UNSPECIFIED, NULL, NULL);
328} 335}
329EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); 336EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
330 337
331int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 338int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
332{ 339{
333 struct rt2x00_dev *rt2x00dev = hw->priv; 340 struct rt2x00_dev *rt2x00dev = hw->priv;
334 int force_reconfig; 341 int radio_on;
342 int status;
335 343
336 /* 344 /*
337 * Mac80211 might be calling this function while we are trying 345 * Mac80211 might be calling this function while we are trying
338 * to remove the device or perhaps suspending it. 346 * to remove the device or perhaps suspending it.
339 */ 347 */
340 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 348 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
341 return 0; 349 return 0;
342 350
343 /* 351 /*
344 * Check if we need to disable the radio, 352 * Only change device state when the radio is enabled. It does not
345 * if this is not the case, at least the RX must be disabled. 353 * matter what parameters we have configured when the radio is disabled
354 * because we won't be able to send or receive anyway. Also note that
355 * some configuration parameters (e.g. channel and antenna values) can
356 * only be set when the radio is enabled.
346 */ 357 */
347 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) { 358 radio_on = test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
348 if (!conf->radio_enabled) 359 if (conf->radio_enabled) {
349 rt2x00lib_disable_radio(rt2x00dev); 360 /* For programming the values, we have to turn RX off */
350 else 361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
351 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
352 }
353 362
354 /* 363 /* Enable the radio */
355 * When the DEVICE_DIRTY_CONFIG flag is set, the device has recently 364 status = rt2x00lib_enable_radio(rt2x00dev);
356 * been started and the configuration must be forced upon the hardware. 365 if (unlikely(status))
357 * Otherwise registers will not be intialized correctly and could 366 return status;
358 * result in non-working hardware because essential registers aren't
359 * initialized.
360 */
361 force_reconfig =
362 __test_and_clear_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
363 367
364 rt2x00lib_config(rt2x00dev, conf, force_reconfig); 368 /*
369 * When we've just turned on the radio, we want to reprogram
370 * everything to ensure a consistent state
371 */
372 rt2x00lib_config(rt2x00dev, conf, !radio_on);
365 373
366 /* 374 /* Turn RX back on */
367 * Reenable RX only if the radio should be on.
368 */
369 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
370 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 375 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
371 else if (conf->radio_enabled) 376 } else {
372 return rt2x00lib_enable_radio(rt2x00dev); 377 /* Disable the radio */
378 rt2x00lib_disable_radio(rt2x00dev);
379 }
373 380
374 return 0; 381 return 0;
375} 382}
@@ -388,7 +395,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
388 * Mac80211 might be calling this function while we are trying 395 * Mac80211 might be calling this function while we are trying
389 * to remove the device or perhaps suspending it. 396 * to remove the device or perhaps suspending it.
390 */ 397 */
391 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 398 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
392 return 0; 399 return 0;
393 400
394 spin_lock(&intf->lock); 401 spin_lock(&intf->lock);
@@ -467,6 +474,90 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
467} 474}
468EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); 475EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
469 476
477#ifdef CONFIG_RT2X00_LIB_CRYPTO
478int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
479 const u8 *local_address, const u8 *address,
480 struct ieee80211_key_conf *key)
481{
482 struct rt2x00_dev *rt2x00dev = hw->priv;
483 int (*set_key) (struct rt2x00_dev *rt2x00dev,
484 struct rt2x00lib_crypto *crypto,
485 struct ieee80211_key_conf *key);
486 struct rt2x00lib_crypto crypto;
487
488 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
489 return -EOPNOTSUPP;
490 else if (key->keylen > 32)
491 return -ENOSPC;
492
493 memset(&crypto, 0, sizeof(crypto));
494
495 /*
496 * When in STA mode, bssidx is always 0 otherwise local_address[5]
497 * contains the bss number, see BSS_ID_MASK comments for details.
498 */
499 if (rt2x00dev->intf_sta_count)
500 crypto.bssidx = 0;
501 else
502 crypto.bssidx =
503 local_address[5] & (rt2x00dev->ops->max_ap_intf - 1);
504
505 crypto.cipher = rt2x00crypto_key_to_cipher(key);
506 if (crypto.cipher == CIPHER_NONE)
507 return -EOPNOTSUPP;
508
509 crypto.cmd = cmd;
510 crypto.address = address;
511
512 if (crypto.cipher == CIPHER_TKIP) {
513 if (key->keylen > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
514 memcpy(&crypto.key,
515 &key->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
516 sizeof(crypto.key));
517
518 if (key->keylen > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
519 memcpy(&crypto.tx_mic,
520 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
521 sizeof(crypto.tx_mic));
522
523 if (key->keylen > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
524 memcpy(&crypto.rx_mic,
525 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
526 sizeof(crypto.rx_mic));
527 } else
528 memcpy(&crypto.key, &key->key[0], key->keylen);
529
530 /*
531 * Each BSS has a maximum of 4 shared keys.
532 * Shared key index values:
533 * 0) BSS0 key0
534 * 1) BSS0 key1
535 * ...
536 * 4) BSS1 key0
537 * ...
538 * 8) BSS2 key0
539 * ...
540 * Both pairwise as shared key indeces are determined by
541 * driver. This is required because the hardware requires
542 * keys to be assigned in correct order (When key 1 is
543 * provided but key 0 is not, then the key is not found
544 * by the hardware during RX).
545 */
546 key->hw_key_idx = 0;
547
548 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
549 set_key = rt2x00dev->ops->lib->config_pairwise_key;
550 else
551 set_key = rt2x00dev->ops->lib->config_shared_key;
552
553 if (!set_key)
554 return -EOPNOTSUPP;
555
556 return set_key(rt2x00dev, &crypto, key);
557}
558EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
559#endif /* CONFIG_RT2X00_LIB_CRYPTO */
560
470int rt2x00mac_get_stats(struct ieee80211_hw *hw, 561int rt2x00mac_get_stats(struct ieee80211_hw *hw,
471 struct ieee80211_low_level_stats *stats) 562 struct ieee80211_low_level_stats *stats)
472{ 563{
@@ -575,10 +666,11 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
575 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ 666 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
576 667
577 queue->aifs = params->aifs; 668 queue->aifs = params->aifs;
669 queue->txop = params->txop;
578 670
579 INFO(rt2x00dev, 671 INFO(rt2x00dev,
580 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d.\n", 672 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
581 queue_idx, queue->cw_min, queue->cw_max, queue->aifs); 673 queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
582 674
583 return 0; 675 return 0;
584} 676}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 898cdd7f57d9..b7f4fe8fba6e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,10 +33,11 @@
33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, 33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry) 34 struct queue_entry *entry)
35{ 35{
36 unsigned int frame_size;
37 unsigned int reserved_size;
38 struct sk_buff *skb; 36 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc; 37 struct skb_frame_desc *skbdesc;
38 unsigned int frame_size;
39 unsigned int head_size = 0;
40 unsigned int tail_size = 0;
40 41
41 /* 42 /*
42 * The frame size includes descriptor size, because the 43 * The frame size includes descriptor size, because the
@@ -49,16 +50,32 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
49 * this means we need at least 3 bytes for moving the frame 50 * this means we need at least 3 bytes for moving the frame
50 * into the correct offset. 51 * into the correct offset.
51 */ 52 */
52 reserved_size = 4; 53 head_size = 4;
54
55 /*
56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV
58 * and 4 bytes for ICV data as tailroon.
59 */
60#ifdef CONFIG_RT2X00_LIB_CRYPTO
61 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
62 head_size += 8;
63 tail_size += 4;
64 }
65#endif /* CONFIG_RT2X00_LIB_CRYPTO */
53 66
54 /* 67 /*
55 * Allocate skbuffer. 68 * Allocate skbuffer.
56 */ 69 */
57 skb = dev_alloc_skb(frame_size + reserved_size); 70 skb = dev_alloc_skb(frame_size + head_size + tail_size);
58 if (!skb) 71 if (!skb)
59 return NULL; 72 return NULL;
60 73
61 skb_reserve(skb, reserved_size); 74 /*
75 * Make sure we not have a frame with the requested bytes
76 * available in the head and tail.
77 */
78 skb_reserve(skb, head_size);
62 skb_put(skb, frame_size); 79 skb_put(skb, frame_size);
63 80
64 /* 81 /*
@@ -83,8 +100,21 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
83{ 100{
84 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
85 102
86 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 103 /*
87 DMA_TO_DEVICE); 104 * If device has requested headroom, we should make sure that
105 * is also mapped to the DMA so it can be used for transfering
106 * additional descriptor information to the hardware.
107 */
108 skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
109
110 skbdesc->skb_dma =
111 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
112
113 /*
114 * Restore data pointer to original location again.
115 */
116 skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
117
88 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 118 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
89} 119}
90EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -100,7 +130,12 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
100 } 130 }
101 131
102 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 132 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
103 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, 133 /*
134 * Add headroom to the skb length, it has been removed
135 * by the driver, but it was actually mapped to DMA.
136 */
137 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
138 skb->len + rt2x00dev->hw->extra_tx_headroom,
104 DMA_TO_DEVICE); 139 DMA_TO_DEVICE);
105 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 140 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
106 } 141 }
@@ -120,7 +155,6 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
120{ 155{
121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 156 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 157 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
123 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
125 struct ieee80211_rate *rate = 159 struct ieee80211_rate *rate =
126 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 160 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
@@ -140,7 +174,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
140 txdesc->cw_max = entry->queue->cw_max; 174 txdesc->cw_max = entry->queue->cw_max;
141 txdesc->aifs = entry->queue->aifs; 175 txdesc->aifs = entry->queue->aifs;
142 176
143 /* Data length should be extended with 4 bytes for CRC */ 177 /* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */
144 data_length = entry->skb->len + 4; 178 data_length = entry->skb->len + 4;
145 179
146 /* 180 /*
@@ -149,6 +183,35 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
149 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 183 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
150 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 184 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
151 185
186#ifdef CONFIG_RT2X00_LIB_CRYPTO
187 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
188 !entry->skb->do_not_encrypt) {
189 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
190
191 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
192
193 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
194
195 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
196 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
197
198 txdesc->key_idx = hw_key->hw_key_idx;
199 txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
200
201 /*
202 * Extend frame length to include all encryption overhead
203 * that will be added by the hardware.
204 */
205 data_length += rt2x00crypto_tx_overhead(tx_info);
206
207 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
208 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
209
210 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
211 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
212 }
213#endif /* CONFIG_RT2X00_LIB_CRYPTO */
214
152 /* 215 /*
153 * Check if this is a RTS/CTS frame 216 * Check if this is a RTS/CTS frame
154 */ 217 */
@@ -214,16 +277,22 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
214 * sequence counter given by mac80211. 277 * sequence counter given by mac80211.
215 */ 278 */
216 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 279 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
217 spin_lock_irqsave(&intf->seqlock, irqflags); 280 if (likely(tx_info->control.vif)) {
281 struct rt2x00_intf *intf;
282
283 intf = vif_to_intf(tx_info->control.vif);
284
285 spin_lock_irqsave(&intf->seqlock, irqflags);
218 286
219 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 287 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
220 intf->seqno += 0x10; 288 intf->seqno += 0x10;
221 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 289 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
222 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 290 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
223 291
224 spin_unlock_irqrestore(&intf->seqlock, irqflags); 292 spin_unlock_irqrestore(&intf->seqlock, irqflags);
225 293
226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 294 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
295 }
227 } 296 }
228 297
229 /* 298 /*
@@ -305,11 +374,12 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
305 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 374 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
306 struct txentry_desc txdesc; 375 struct txentry_desc txdesc;
307 struct skb_frame_desc *skbdesc; 376 struct skb_frame_desc *skbdesc;
377 unsigned int iv_len = IEEE80211_SKB_CB(skb)->control.iv_len;
308 378
309 if (unlikely(rt2x00queue_full(queue))) 379 if (unlikely(rt2x00queue_full(queue)))
310 return -EINVAL; 380 return -EINVAL;
311 381
312 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 382 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
313 ERROR(queue->rt2x00dev, 383 ERROR(queue->rt2x00dev,
314 "Arrived at non-free entry in the non-full queue %d.\n" 384 "Arrived at non-free entry in the non-full queue %d.\n"
315 "Please file bug report to %s.\n", 385 "Please file bug report to %s.\n",
@@ -326,21 +396,39 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
326 rt2x00queue_create_tx_descriptor(entry, &txdesc); 396 rt2x00queue_create_tx_descriptor(entry, &txdesc);
327 397
328 /* 398 /*
329 * skb->cb array is now ours and we are free to use it. 399 * All information is retreived from the skb->cb array,
400 * now we should claim ownership of the driver part of that
401 * array.
330 */ 402 */
331 skbdesc = get_skb_frame_desc(entry->skb); 403 skbdesc = get_skb_frame_desc(entry->skb);
332 memset(skbdesc, 0, sizeof(*skbdesc)); 404 memset(skbdesc, 0, sizeof(*skbdesc));
333 skbdesc->entry = entry; 405 skbdesc->entry = entry;
334 406
407 /*
408 * When hardware encryption is supported, and this frame
409 * is to be encrypted, we should strip the IV/EIV data from
410 * the frame so we can provide it to the driver seperately.
411 */
412 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
413 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags))
414 rt2x00crypto_tx_remove_iv(skb, iv_len);
415
416 /*
417 * It could be possible that the queue was corrupted and this
418 * call failed. Just drop the frame, we cannot rollback and pass
419 * the frame to mac80211 because the skb->cb has now been tainted.
420 */
335 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 421 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
336 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 422 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
337 return -EIO; 423 dev_kfree_skb_any(entry->skb);
424 entry->skb = NULL;
425 return 0;
338 } 426 }
339 427
340 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 428 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
341 rt2x00queue_map_txskb(queue->rt2x00dev, skb); 429 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
342 430
343 __set_bit(ENTRY_DATA_PENDING, &entry->flags); 431 set_bit(ENTRY_DATA_PENDING, &entry->flags);
344 432
345 rt2x00queue_index_inc(queue, Q_INDEX); 433 rt2x00queue_index_inc(queue, Q_INDEX);
346 rt2x00queue_write_tx_descriptor(entry, &txdesc); 434 rt2x00queue_write_tx_descriptor(entry, &txdesc);
@@ -653,6 +741,7 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
653 741
654 queue->rt2x00dev = rt2x00dev; 742 queue->rt2x00dev = rt2x00dev;
655 queue->qid = qid; 743 queue->qid = qid;
744 queue->txop = 0;
656 queue->aifs = 2; 745 queue->aifs = 2;
657 queue->cw_min = 5; 746 queue->cw_min = 5;
658 queue->cw_max = 10; 747 queue->cw_max = 10;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ff78e52ce43c..9dbf04f0f04c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -87,10 +87,13 @@ enum data_queue_qid {
87 * 87 *
88 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX 88 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
90 * @FRAME_DESC_IV_STRIPPED: Frame contained a IV/EIV provided by
91 * mac80211 but was stripped for processing by the driver.
90 */ 92 */
91enum skb_frame_desc_flags { 93enum skb_frame_desc_flags {
92 SKBDESC_DMA_MAPPED_RX = (1 << 0), 94 SKBDESC_DMA_MAPPED_RX = 1 << 0,
93 SKBDESC_DMA_MAPPED_TX = (1 << 1), 95 SKBDESC_DMA_MAPPED_TX = 1 << 1,
96 FRAME_DESC_IV_STRIPPED = 1 << 2,
94}; 97};
95 98
96/** 99/**
@@ -104,6 +107,8 @@ enum skb_frame_desc_flags {
104 * @desc: Pointer to descriptor part of the frame. 107 * @desc: Pointer to descriptor part of the frame.
105 * Note that this pointer could point to something outside 108 * Note that this pointer could point to something outside
106 * of the scope of the skb->data pointer. 109 * of the scope of the skb->data pointer.
110 * @iv: IV data used during encryption/decryption.
111 * @eiv: EIV data used during encryption/decryption.
107 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. 112 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
108 * @entry: The entry to which this sk buffer belongs. 113 * @entry: The entry to which this sk buffer belongs.
109 */ 114 */
@@ -113,6 +118,9 @@ struct skb_frame_desc {
113 unsigned int desc_len; 118 unsigned int desc_len;
114 void *desc; 119 void *desc;
115 120
121 __le32 iv;
122 __le32 eiv;
123
116 dma_addr_t skb_dma; 124 dma_addr_t skb_dma;
117 125
118 struct queue_entry *entry; 126 struct queue_entry *entry;
@@ -132,13 +140,14 @@ static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
132/** 140/**
133 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc 141 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
134 * 142 *
135 * @RXDONE_SIGNAL_PLCP: Does the signal field contain the plcp value, 143 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
136 * or does it contain the bitrate itself. 144 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
137 * @RXDONE_MY_BSS: Does this frame originate from device's BSS. 145 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
138 */ 146 */
139enum rxdone_entry_desc_flags { 147enum rxdone_entry_desc_flags {
140 RXDONE_SIGNAL_PLCP = 1 << 0, 148 RXDONE_SIGNAL_PLCP = 1 << 0,
141 RXDONE_MY_BSS = 1 << 1, 149 RXDONE_SIGNAL_BITRATE = 1 << 1,
150 RXDONE_MY_BSS = 1 << 2,
142}; 151};
143 152
144/** 153/**
@@ -152,7 +161,11 @@ enum rxdone_entry_desc_flags {
152 * @size: Data size of the received frame. 161 * @size: Data size of the received frame.
153 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 162 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
154 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 163 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
155 164 * @cipher: Cipher type used during decryption.
165 * @cipher_status: Decryption status.
166 * @iv: IV data used during decryption.
167 * @eiv: EIV data used during decryption.
168 * @icv: ICV data used during decryption.
156 */ 169 */
157struct rxdone_entry_desc { 170struct rxdone_entry_desc {
158 u64 timestamp; 171 u64 timestamp;
@@ -161,6 +174,12 @@ struct rxdone_entry_desc {
161 int size; 174 int size;
162 int flags; 175 int flags;
163 int dev_flags; 176 int dev_flags;
177 u8 cipher;
178 u8 cipher_status;
179
180 __le32 iv;
181 __le32 eiv;
182 __le32 icv;
164}; 183};
165 184
166/** 185/**
@@ -206,6 +225,10 @@ struct txdone_entry_desc {
206 * @ENTRY_TXD_BURST: This frame belongs to the same burst event. 225 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
207 * @ENTRY_TXD_ACK: An ACK is required for this frame. 226 * @ENTRY_TXD_ACK: An ACK is required for this frame.
208 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used. 227 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
228 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
229 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
230 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
231 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
209 */ 232 */
210enum txentry_desc_flags { 233enum txentry_desc_flags {
211 ENTRY_TXD_RTS_FRAME, 234 ENTRY_TXD_RTS_FRAME,
@@ -218,6 +241,10 @@ enum txentry_desc_flags {
218 ENTRY_TXD_BURST, 241 ENTRY_TXD_BURST,
219 ENTRY_TXD_ACK, 242 ENTRY_TXD_ACK,
220 ENTRY_TXD_RETRY_MODE, 243 ENTRY_TXD_RETRY_MODE,
244 ENTRY_TXD_ENCRYPT,
245 ENTRY_TXD_ENCRYPT_PAIRWISE,
246 ENTRY_TXD_ENCRYPT_IV,
247 ENTRY_TXD_ENCRYPT_MMIC,
221}; 248};
222 249
223/** 250/**
@@ -236,6 +263,9 @@ enum txentry_desc_flags {
236 * @ifs: IFS value. 263 * @ifs: IFS value.
237 * @cw_min: cwmin value. 264 * @cw_min: cwmin value.
238 * @cw_max: cwmax value. 265 * @cw_max: cwmax value.
266 * @cipher: Cipher type used for encryption.
267 * @key_idx: Key index used for encryption.
268 * @iv_offset: Position where IV should be inserted by hardware.
239 */ 269 */
240struct txentry_desc { 270struct txentry_desc {
241 unsigned long flags; 271 unsigned long flags;
@@ -252,6 +282,10 @@ struct txentry_desc {
252 short ifs; 282 short ifs;
253 short cw_min; 283 short cw_min;
254 short cw_max; 284 short cw_max;
285
286 enum cipher cipher;
287 u16 key_idx;
288 u16 iv_offset;
255}; 289};
256 290
257/** 291/**
@@ -335,6 +369,7 @@ enum queue_index {
335 * @length: Number of frames in queue. 369 * @length: Number of frames in queue.
336 * @index: Index pointers to entry positions in the queue, 370 * @index: Index pointers to entry positions in the queue,
337 * use &enum queue_index to get a specific index field. 371 * use &enum queue_index to get a specific index field.
372 * @txop: maximum burst time.
338 * @aifs: The aifs value for outgoing frames (field ignored in RX queue). 373 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
339 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue). 374 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
340 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). 375 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
@@ -354,6 +389,7 @@ struct data_queue {
354 unsigned short length; 389 unsigned short length;
355 unsigned short index[Q_INDEX_MAX]; 390 unsigned short index[Q_INDEX_MAX];
356 391
392 unsigned short txop;
357 unsigned short aifs; 393 unsigned short aifs;
358 unsigned short cw_min; 394 unsigned short cw_min;
359 unsigned short cw_max; 395 unsigned short cw_max;
@@ -484,25 +520,51 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
484} 520}
485 521
486/** 522/**
487 * rt2x00_desc_read - Read a word from the hardware descriptor. 523 * _rt2x00_desc_read - Read a word from the hardware descriptor.
524 * @desc: Base descriptor address
525 * @word: Word index from where the descriptor should be read.
526 * @value: Address where the descriptor value should be written into.
527 */
528static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
529{
530 *value = desc[word];
531}
532
533/**
534 * rt2x00_desc_read - Read a word from the hardware descriptor, this
535 * function will take care of the byte ordering.
488 * @desc: Base descriptor address 536 * @desc: Base descriptor address
489 * @word: Word index from where the descriptor should be read. 537 * @word: Word index from where the descriptor should be read.
490 * @value: Address where the descriptor value should be written into. 538 * @value: Address where the descriptor value should be written into.
491 */ 539 */
492static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value) 540static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
493{ 541{
494 *value = le32_to_cpu(desc[word]); 542 __le32 tmp;
543 _rt2x00_desc_read(desc, word, &tmp);
544 *value = le32_to_cpu(tmp);
545}
546
547/**
548 * rt2x00_desc_write - write a word to the hardware descriptor, this
549 * function will take care of the byte ordering.
550 * @desc: Base descriptor address
551 * @word: Word index from where the descriptor should be written.
552 * @value: Value that should be written into the descriptor.
553 */
554static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
555{
556 desc[word] = value;
495} 557}
496 558
497/** 559/**
498 * rt2x00_desc_write - wrote a word to the hardware descriptor. 560 * rt2x00_desc_write - write a word to the hardware descriptor.
499 * @desc: Base descriptor address 561 * @desc: Base descriptor address
500 * @word: Word index from where the descriptor should be written. 562 * @word: Word index from where the descriptor should be written.
501 * @value: Value that should be written into the descriptor. 563 * @value: Value that should be written into the descriptor.
502 */ 564 */
503static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value) 565static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
504{ 566{
505 desc[word] = cpu_to_le32(value); 567 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
506} 568}
507 569
508#endif /* RT2X00QUEUE_H */ 570#endif /* RT2X00QUEUE_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 7e88ce5651b9..c2fba7c9f05c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -27,6 +27,16 @@
27#define RT2X00REG_H 27#define RT2X00REG_H
28 28
29/* 29/*
30 * RX crypto status
31 */
32enum rx_crypto {
33 RX_CRYPTO_SUCCESS = 0,
34 RX_CRYPTO_FAIL_ICV = 1,
35 RX_CRYPTO_FAIL_MIC = 2,
36 RX_CRYPTO_FAIL_KEY = 3,
37};
38
39/*
30 * Antenna values 40 * Antenna values
31 */ 41 */
32enum antenna { 42enum antenna {
@@ -104,7 +114,14 @@ enum cipher {
104 */ 114 */
105 CIPHER_CKIP64 = 5, 115 CIPHER_CKIP64 = 5,
106 CIPHER_CKIP128 = 6, 116 CIPHER_CKIP128 = 6,
107 CIPHER_TKIP_NO_MIC = 7, 117 CIPHER_TKIP_NO_MIC = 7, /* Don't send to device */
118
119/*
120 * Max cipher type.
121 * Note that CIPHER_NONE isn't counted, and CKIP64 and CKIP128
122 * are excluded due to limitations in mac80211.
123 */
124 CIPHER_MAX = 4,
108}; 125};
109 126
110/* 127/*
@@ -136,7 +153,7 @@ struct rt2x00_field32 {
136 */ 153 */
137#define is_power_of_two(x) ( !((x) & ((x)-1)) ) 154#define is_power_of_two(x) ( !((x) & ((x)-1)) )
138#define low_bit_mask(x) ( ((x)-1) & ~(x) ) 155#define low_bit_mask(x) ( ((x)-1) & ~(x) )
139#define is_valid_mask(x) is_power_of_two(1 + (x) + low_bit_mask(x)) 156#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
140 157
141/* 158/*
142 * Macro's to find first set bit in a variable. 159 * Macro's to find first set bit in a variable.
@@ -173,8 +190,7 @@ struct rt2x00_field32 {
173 * does not exceed the given typelimit. 190 * does not exceed the given typelimit.
174 */ 191 */
175#define FIELD_CHECK(__mask, __type) \ 192#define FIELD_CHECK(__mask, __type) \
176 BUILD_BUG_ON(!__builtin_constant_p(__mask) || \ 193 BUILD_BUG_ON(!(__mask) || \
177 !(__mask) || \
178 !is_valid_mask(__mask) || \ 194 !is_valid_mask(__mask) || \
179 (__mask) != (__type)(__mask)) \ 195 (__mask) != (__type)(__mask)) \
180 196
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index 04b29716d356..55eff58f1889 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -41,20 +41,19 @@ static int rt2x00rfkill_toggle_radio(void *data, enum rfkill_state state)
41 /* 41 /*
42 * Only continue if there are enabled interfaces. 42 * Only continue if there are enabled interfaces.
43 */ 43 */
44 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 44 if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
45 return 0; 45 return 0;
46 46
47 if (state == RFKILL_STATE_UNBLOCKED) { 47 if (state == RFKILL_STATE_UNBLOCKED) {
48 INFO(rt2x00dev, "Hardware button pressed, enabling radio.\n"); 48 INFO(rt2x00dev, "RFKILL event: enabling radio.\n");
49 __clear_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); 49 clear_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
50 retval = rt2x00lib_enable_radio(rt2x00dev); 50 retval = rt2x00lib_enable_radio(rt2x00dev);
51 } else if (state == RFKILL_STATE_SOFT_BLOCKED) { 51 } else if (state == RFKILL_STATE_SOFT_BLOCKED) {
52 INFO(rt2x00dev, "Hardware button pressed, disabling radio.\n"); 52 INFO(rt2x00dev, "RFKILL event: disabling radio.\n");
53 __set_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); 53 set_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
54 rt2x00lib_disable_radio(rt2x00dev); 54 rt2x00lib_disable_radio(rt2x00dev);
55 } else { 55 } else {
56 WARNING(rt2x00dev, "Received unexpected rfkill state %d.\n", 56 WARNING(rt2x00dev, "RFKILL event: unknown state %d.\n", state);
57 state);
58 } 57 }
59 58
60 return retval; 59 return retval;
@@ -64,7 +63,12 @@ static int rt2x00rfkill_get_state(void *data, enum rfkill_state *state)
64{ 63{
65 struct rt2x00_dev *rt2x00dev = data; 64 struct rt2x00_dev *rt2x00dev = data;
66 65
67 *state = rt2x00dev->rfkill->state; 66 /*
67 * rfkill_poll reports 1 when the key has been pressed and the
68 * radio should be blocked.
69 */
70 *state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
71 RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
68 72
69 return 0; 73 return 0;
70} 74}
@@ -73,19 +77,18 @@ static void rt2x00rfkill_poll(struct work_struct *work)
73{ 77{
74 struct rt2x00_dev *rt2x00dev = 78 struct rt2x00_dev *rt2x00dev =
75 container_of(work, struct rt2x00_dev, rfkill_work.work); 79 container_of(work, struct rt2x00_dev, rfkill_work.work);
76 int state; 80 enum rfkill_state state;
77 81
78 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 82 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state) ||
83 !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
79 return; 84 return;
80 85
81 /* 86 /*
82 * rfkill_poll reports 1 when the key has been pressed and the 87 * Poll latest state and report it to rfkill who should sort
83 * radio should be blocked. 88 * out if the state should be toggled or not.
84 */ 89 */
85 state = !rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? 90 if (!rt2x00rfkill_get_state(rt2x00dev, &state))
86 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED; 91 rfkill_force_state(rt2x00dev->rfkill, state);
87
88 rfkill_force_state(rt2x00dev->rfkill, state);
89 92
90 queue_delayed_work(rt2x00dev->hw->workqueue, 93 queue_delayed_work(rt2x00dev->hw->workqueue,
91 &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL); 94 &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL);
@@ -93,8 +96,8 @@ static void rt2x00rfkill_poll(struct work_struct *work)
93 96
94void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) 97void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
95{ 98{
96 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 99 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
97 !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) 100 test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
98 return; 101 return;
99 102
100 if (rfkill_register(rt2x00dev->rfkill)) { 103 if (rfkill_register(rt2x00dev->rfkill)) {
@@ -114,7 +117,7 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
114 117
115void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) 118void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
116{ 119{
117 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 120 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
118 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 121 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
119 return; 122 return;
120 123
@@ -127,21 +130,25 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
127 130
128void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) 131void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
129{ 132{
130 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) 133 struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
134
135 if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
131 return; 136 return;
132 137
133 rt2x00dev->rfkill = 138 rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
134 rfkill_allocate(wiphy_dev(rt2x00dev->hw->wiphy), RFKILL_TYPE_WLAN);
135 if (!rt2x00dev->rfkill) { 139 if (!rt2x00dev->rfkill) {
136 ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); 140 ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
137 return; 141 return;
138 } 142 }
139 143
144 __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
145
140 rt2x00dev->rfkill->name = rt2x00dev->ops->name; 146 rt2x00dev->rfkill->name = rt2x00dev->ops->name;
141 rt2x00dev->rfkill->data = rt2x00dev; 147 rt2x00dev->rfkill->data = rt2x00dev;
142 rt2x00dev->rfkill->state = -1; 148 rt2x00dev->rfkill->state = -1;
143 rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; 149 rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
144 rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; 150 if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
151 rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
145 152
146 INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); 153 INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
147 154
@@ -150,8 +157,7 @@ void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
150 157
151void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 158void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
152{ 159{
153 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 160 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags))
154 !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
155 return; 161 return;
156 162
157 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 163 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 2050227ea530..b73a7e0aeed4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -163,16 +163,11 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
163 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 163 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
164 struct txdone_entry_desc txdesc; 164 struct txdone_entry_desc txdesc;
165 165
166 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 166 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
167 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 167 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
168 return; 168 return;
169 169
170 /* 170 /*
171 * Remove the descriptor data from the buffer.
172 */
173 skb_pull(entry->skb, entry->queue->desc_size);
174
175 /*
176 * Obtain the status about this packet. 171 * Obtain the status about this packet.
177 * Note that when the status is 0 it does not mean the 172 * Note that when the status is 0 it does not mean the
178 * frame was send out correctly. It only means the frame 173 * frame was send out correctly. It only means the frame
@@ -224,6 +219,12 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
224 entry->skb->data, length, 219 entry->skb->data, length,
225 rt2x00usb_interrupt_txdone, entry); 220 rt2x00usb_interrupt_txdone, entry);
226 221
222 /*
223 * Make sure the skb->data pointer points to the frame, not the
224 * descriptor.
225 */
226 skb_pull(entry->skb, entry->queue->desc_size);
227
227 return 0; 228 return 0;
228} 229}
229EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); 230EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
@@ -232,7 +233,7 @@ static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
232{ 233{
233 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 234 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
234 235
235 if (__test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) 236 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
236 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 237 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
237} 238}
238 239
@@ -283,7 +284,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
283 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 284 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
284 u8 rxd[32]; 285 u8 rxd[32];
285 286
286 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 287 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
287 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 288 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
288 return; 289 return;
289 290
@@ -293,7 +294,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
293 * a problem. 294 * a problem.
294 */ 295 */
295 if (urb->actual_length < entry->queue->desc_size || urb->status) { 296 if (urb->actual_length < entry->queue->desc_size || urb->status) {
296 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 297 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
297 usb_submit_urb(urb, GFP_ATOMIC); 298 usb_submit_urb(urb, GFP_ATOMIC);
298 return; 299 return;
299 } 300 }
@@ -361,7 +362,7 @@ void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
361 entry->skb->data, entry->skb->len, 362 entry->skb->data, entry->skb->len,
362 rt2x00usb_interrupt_rxdone, entry); 363 rt2x00usb_interrupt_rxdone, entry);
363 364
364 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 365 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
365 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 366 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
366} 367}
367EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 368EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 087e90b328cd..2c36b91ff4c7 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -38,6 +38,13 @@
38#include "rt61pci.h" 38#include "rt61pci.h"
39 39
40/* 40/*
41 * Allow hardware encryption to be disabled.
42 */
43static int modparam_nohwcrypt = 0;
44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
46
47/*
41 * Register access. 48 * Register access.
42 * BBP and RF register require indirect register access, 49 * BBP and RF register require indirect register access,
43 * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. 50 * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this.
@@ -156,7 +163,7 @@ rf_write:
156 rt2x00_rf_write(rt2x00dev, word, value); 163 rt2x00_rf_write(rt2x00dev, word, value);
157} 164}
158 165
159#ifdef CONFIG_RT61PCI_LEDS 166#ifdef CONFIG_RT2X00_LIB_LEDS
160/* 167/*
161 * This function is only called from rt61pci_led_brightness() 168 * This function is only called from rt61pci_led_brightness()
162 * make gcc happy by placing this function inside the 169 * make gcc happy by placing this function inside the
@@ -188,7 +195,7 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
188 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1); 195 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
189 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); 196 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
190} 197}
191#endif /* CONFIG_RT61PCI_LEDS */ 198#endif /* CONFIG_RT2X00_LIB_LEDS */
192 199
193static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 200static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
194{ 201{
@@ -264,7 +271,7 @@ static const struct rt2x00debug rt61pci_rt2x00debug = {
264}; 271};
265#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 272#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
266 273
267#ifdef CONFIG_RT61PCI_RFKILL 274#ifdef CONFIG_RT2X00_LIB_RFKILL
268static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 275static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
269{ 276{
270 u32 reg; 277 u32 reg;
@@ -274,9 +281,9 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
274} 281}
275#else 282#else
276#define rt61pci_rfkill_poll NULL 283#define rt61pci_rfkill_poll NULL
277#endif /* CONFIG_RT61PCI_RFKILL */ 284#endif /* CONFIG_RT2X00_LIB_RFKILL */
278 285
279#ifdef CONFIG_RT61PCI_LEDS 286#ifdef CONFIG_RT2X00_LIB_LEDS
280static void rt61pci_brightness_set(struct led_classdev *led_cdev, 287static void rt61pci_brightness_set(struct led_classdev *led_cdev,
281 enum led_brightness brightness) 288 enum led_brightness brightness)
282{ 289{
@@ -341,11 +348,209 @@ static void rt61pci_init_led(struct rt2x00_dev *rt2x00dev,
341 led->led_dev.blink_set = rt61pci_blink_set; 348 led->led_dev.blink_set = rt61pci_blink_set;
342 led->flags = LED_INITIALIZED; 349 led->flags = LED_INITIALIZED;
343} 350}
344#endif /* CONFIG_RT61PCI_LEDS */ 351#endif /* CONFIG_RT2X00_LIB_LEDS */
345 352
346/* 353/*
347 * Configuration handlers. 354 * Configuration handlers.
348 */ 355 */
356static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
357 struct rt2x00lib_crypto *crypto,
358 struct ieee80211_key_conf *key)
359{
360 struct hw_key_entry key_entry;
361 struct rt2x00_field32 field;
362 u32 mask;
363 u32 reg;
364
365 if (crypto->cmd == SET_KEY) {
366 /*
367 * rt2x00lib can't determine the correct free
368 * key_idx for shared keys. We have 1 register
369 * with key valid bits. The goal is simple, read
370 * the register, if that is full we have no slots
371 * left.
372 * Note that each BSS is allowed to have up to 4
373 * shared keys, so put a mask over the allowed
374 * entries.
375 */
376 mask = (0xf << crypto->bssidx);
377
378 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
379 reg &= mask;
380
381 if (reg && reg == mask)
382 return -ENOSPC;
383
384 key->hw_key_idx += reg ? (ffz(reg) - 1) : 0;
385
386 /*
387 * Upload key to hardware
388 */
389 memcpy(key_entry.key, crypto->key,
390 sizeof(key_entry.key));
391 memcpy(key_entry.tx_mic, crypto->tx_mic,
392 sizeof(key_entry.tx_mic));
393 memcpy(key_entry.rx_mic, crypto->rx_mic,
394 sizeof(key_entry.rx_mic));
395
396 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
397 rt2x00pci_register_multiwrite(rt2x00dev, reg,
398 &key_entry, sizeof(key_entry));
399
400 /*
401 * The cipher types are stored over 2 registers.
402 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
403 * bssidx 1 and 2 keys are stored in SEC_CSR5.
404 * Using the correct defines correctly will cause overhead,
405 * so just calculate the correct offset.
406 */
407 if (key->hw_key_idx < 8) {
408 field.bit_offset = (3 * key->hw_key_idx);
409 field.bit_mask = 0x7 << field.bit_offset;
410
411 rt2x00pci_register_read(rt2x00dev, SEC_CSR1, &reg);
412 rt2x00_set_field32(&reg, field, crypto->cipher);
413 rt2x00pci_register_write(rt2x00dev, SEC_CSR1, reg);
414 } else {
415 field.bit_offset = (3 * (key->hw_key_idx - 8));
416 field.bit_mask = 0x7 << field.bit_offset;
417
418 rt2x00pci_register_read(rt2x00dev, SEC_CSR5, &reg);
419 rt2x00_set_field32(&reg, field, crypto->cipher);
420 rt2x00pci_register_write(rt2x00dev, SEC_CSR5, reg);
421 }
422
423 /*
424 * The driver does not support the IV/EIV generation
425 * in hardware. However it doesn't support the IV/EIV
426 * inside the ieee80211 frame either, but requires it
427 * to be provided seperately for the descriptor.
428 * rt2x00lib will cut the IV/EIV data out of all frames
429 * given to us by mac80211, but we must tell mac80211
430 * to generate the IV/EIV data.
431 */
432 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
433 }
434
435 /*
436 * SEC_CSR0 contains only single-bit fields to indicate
437 * a particular key is valid. Because using the FIELD32()
438 * defines directly will cause a lot of overhead we use
439 * a calculation to determine the correct bit directly.
440 */
441 mask = 1 << key->hw_key_idx;
442
443 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
444 if (crypto->cmd == SET_KEY)
445 reg |= mask;
446 else if (crypto->cmd == DISABLE_KEY)
447 reg &= ~mask;
448 rt2x00pci_register_write(rt2x00dev, SEC_CSR0, reg);
449
450 return 0;
451}
452
453static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
454 struct rt2x00lib_crypto *crypto,
455 struct ieee80211_key_conf *key)
456{
457 struct hw_pairwise_ta_entry addr_entry;
458 struct hw_key_entry key_entry;
459 u32 mask;
460 u32 reg;
461
462 if (crypto->cmd == SET_KEY) {
463 /*
464 * rt2x00lib can't determine the correct free
465 * key_idx for pairwise keys. We have 2 registers
466 * with key valid bits. The goal is simple, read
467 * the first register, if that is full move to
468 * the next register.
469 * When both registers are full, we drop the key,
470 * otherwise we use the first invalid entry.
471 */
472 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
473 if (reg && reg == ~0) {
474 key->hw_key_idx = 32;
475 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
476 if (reg && reg == ~0)
477 return -ENOSPC;
478 }
479
480 key->hw_key_idx += reg ? (ffz(reg) - 1) : 0;
481
482 /*
483 * Upload key to hardware
484 */
485 memcpy(key_entry.key, crypto->key,
486 sizeof(key_entry.key));
487 memcpy(key_entry.tx_mic, crypto->tx_mic,
488 sizeof(key_entry.tx_mic));
489 memcpy(key_entry.rx_mic, crypto->rx_mic,
490 sizeof(key_entry.rx_mic));
491
492 memset(&addr_entry, 0, sizeof(addr_entry));
493 memcpy(&addr_entry, crypto->address, ETH_ALEN);
494 addr_entry.cipher = crypto->cipher;
495
496 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
497 rt2x00pci_register_multiwrite(rt2x00dev, reg,
498 &key_entry, sizeof(key_entry));
499
500 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
501 rt2x00pci_register_multiwrite(rt2x00dev, reg,
502 &addr_entry, sizeof(addr_entry));
503
504 /*
505 * Enable pairwise lookup table for given BSS idx,
506 * without this received frames will not be decrypted
507 * by the hardware.
508 */
509 rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg);
510 reg |= (1 << crypto->bssidx);
511 rt2x00pci_register_write(rt2x00dev, SEC_CSR4, reg);
512
513 /*
514 * The driver does not support the IV/EIV generation
515 * in hardware. However it doesn't support the IV/EIV
516 * inside the ieee80211 frame either, but requires it
517 * to be provided seperately for the descriptor.
518 * rt2x00lib will cut the IV/EIV data out of all frames
519 * given to us by mac80211, but we must tell mac80211
520 * to generate the IV/EIV data.
521 */
522 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
523 }
524
525 /*
526 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
527 * a particular key is valid. Because using the FIELD32()
528 * defines directly will cause a lot of overhead we use
529 * a calculation to determine the correct bit directly.
530 */
531 if (key->hw_key_idx < 32) {
532 mask = 1 << key->hw_key_idx;
533
534 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
535 if (crypto->cmd == SET_KEY)
536 reg |= mask;
537 else if (crypto->cmd == DISABLE_KEY)
538 reg &= ~mask;
539 rt2x00pci_register_write(rt2x00dev, SEC_CSR2, reg);
540 } else {
541 mask = 1 << (key->hw_key_idx - 32);
542
543 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
544 if (crypto->cmd == SET_KEY)
545 reg |= mask;
546 else if (crypto->cmd == DISABLE_KEY)
547 reg &= ~mask;
548 rt2x00pci_register_write(rt2x00dev, SEC_CSR3, reg);
549 }
550
551 return 0;
552}
553
349static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, 554static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
350 const unsigned int filter_flags) 555 const unsigned int filter_flags)
351{ 556{
@@ -440,6 +645,30 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
440 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 645 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
441} 646}
442 647
648
649static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
650 struct rt2x00lib_conf *libconf)
651{
652 u16 eeprom;
653 short lna_gain = 0;
654
655 if (libconf->band == IEEE80211_BAND_2GHZ) {
656 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
657 lna_gain += 14;
658
659 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
660 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
661 } else {
662 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
663 lna_gain += 14;
664
665 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
666 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
667 }
668
669 rt2x00dev->lna_gain = lna_gain;
670}
671
443static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev, 672static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev,
444 const int basic_rate_mask) 673 const int basic_rate_mask)
445{ 674{
@@ -758,6 +987,9 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
758 struct rt2x00lib_conf *libconf, 987 struct rt2x00lib_conf *libconf,
759 const unsigned int flags) 988 const unsigned int flags)
760{ 989{
990 /* Always recalculate LNA gain before changing configuration */
991 rt61pci_config_lna_gain(rt2x00dev, libconf);
992
761 if (flags & CONFIG_UPDATE_PHYMODE) 993 if (flags & CONFIG_UPDATE_PHYMODE)
762 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates); 994 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates);
763 if (flags & CONFIG_UPDATE_CHANNEL) 995 if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1246,16 +1478,6 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1246 1478
1247 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); 1479 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff);
1248 1480
1249 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
1250 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
1251 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
1252 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
1253
1254 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
1255 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
1256 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
1257 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
1258
1259 /* 1481 /*
1260 * Clear all beacons 1482 * Clear all beacons
1261 * For the Beacon base registers we only need to clear 1483 * For the Beacon base registers we only need to clear
@@ -1533,8 +1755,8 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1533 * TX descriptor initialization 1755 * TX descriptor initialization
1534 */ 1756 */
1535static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1757static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1536 struct sk_buff *skb, 1758 struct sk_buff *skb,
1537 struct txentry_desc *txdesc) 1759 struct txentry_desc *txdesc)
1538{ 1760{
1539 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1761 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1540 __le32 *txd = skbdesc->desc; 1762 __le32 *txd = skbdesc->desc;
@@ -1548,7 +1770,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1548 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1770 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1549 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1771 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1550 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1772 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1551 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1773 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1552 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1774 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1553 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1775 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1554 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); 1776 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
@@ -1561,6 +1783,11 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1561 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1783 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1562 rt2x00_desc_write(txd, 2, word); 1784 rt2x00_desc_write(txd, 2, word);
1563 1785
1786 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1787 _rt2x00_desc_write(txd, 3, skbdesc->iv);
1788 _rt2x00_desc_write(txd, 4, skbdesc->eiv);
1789 }
1790
1564 rt2x00_desc_read(txd, 5, &word); 1791 rt2x00_desc_read(txd, 5, &word);
1565 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid); 1792 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
1566 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, 1793 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
@@ -1595,11 +1822,15 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1595 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1822 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1596 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1823 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1597 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1824 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1598 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1825 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1826 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1827 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1828 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1829 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1599 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1830 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1600 rt2x00_set_field32(&word, TXD_W0_BURST, 1831 rt2x00_set_field32(&word, TXD_W0_BURST,
1601 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1832 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1602 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1833 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1603 rt2x00_desc_write(txd, 0, word); 1834 rt2x00_desc_write(txd, 0, word);
1604} 1835}
1605 1836
@@ -1676,40 +1907,27 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1676 */ 1907 */
1677static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) 1908static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1678{ 1909{
1679 u16 eeprom; 1910 u8 offset = rt2x00dev->lna_gain;
1680 u8 offset;
1681 u8 lna; 1911 u8 lna;
1682 1912
1683 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); 1913 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
1684 switch (lna) { 1914 switch (lna) {
1685 case 3: 1915 case 3:
1686 offset = 90; 1916 offset += 90;
1687 break; 1917 break;
1688 case 2: 1918 case 2:
1689 offset = 74; 1919 offset += 74;
1690 break; 1920 break;
1691 case 1: 1921 case 1:
1692 offset = 64; 1922 offset += 64;
1693 break; 1923 break;
1694 default: 1924 default:
1695 return 0; 1925 return 0;
1696 } 1926 }
1697 1927
1698 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1928 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
1699 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
1700 offset += 14;
1701
1702 if (lna == 3 || lna == 2) 1929 if (lna == 3 || lna == 2)
1703 offset += 10; 1930 offset += 10;
1704
1705 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
1706 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
1707 } else {
1708 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
1709 offset += 14;
1710
1711 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
1712 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
1713 } 1931 }
1714 1932
1715 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1933 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1718,6 +1936,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1718static void rt61pci_fill_rxdone(struct queue_entry *entry, 1936static void rt61pci_fill_rxdone(struct queue_entry *entry,
1719 struct rxdone_entry_desc *rxdesc) 1937 struct rxdone_entry_desc *rxdesc)
1720{ 1938{
1939 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1721 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1940 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1722 u32 word0; 1941 u32 word0;
1723 u32 word1; 1942 u32 word1;
@@ -1728,6 +1947,38 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1728 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1947 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1729 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1948 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1730 1949
1950 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
1951 rxdesc->cipher =
1952 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1953 rxdesc->cipher_status =
1954 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1955 }
1956
1957 if (rxdesc->cipher != CIPHER_NONE) {
1958 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv);
1959 _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->eiv);
1960 _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv);
1961
1962 /*
1963 * Hardware has stripped IV/EIV data from 802.11 frame during
1964 * decryption. It has provided the data seperately but rt2x00lib
1965 * should decide if it should be reinserted.
1966 */
1967 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1968
1969 /*
1970 * FIXME: Legacy driver indicates that the frame does
1971 * contain the Michael Mic. Unfortunately, in rt2x00
1972 * the MIC seems to be missing completely...
1973 */
1974 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1975
1976 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1977 rxdesc->flags |= RX_FLAG_DECRYPTED;
1978 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
1979 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
1980 }
1981
1731 /* 1982 /*
1732 * Obtain the status about this packet. 1983 * Obtain the status about this packet.
1733 * When frame was received with an OFDM bitrate, 1984 * When frame was received with an OFDM bitrate,
@@ -1735,11 +1986,13 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1735 * a CCK bitrate the signal is the rate in 100kbit/s. 1986 * a CCK bitrate the signal is the rate in 100kbit/s.
1736 */ 1987 */
1737 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1988 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1738 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1); 1989 rxdesc->rssi = rt61pci_agc_to_rssi(rt2x00dev, word1);
1739 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1990 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1740 1991
1741 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1992 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1742 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1993 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1994 else
1995 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1743 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1996 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1744 rxdesc->dev_flags |= RXDONE_MY_BSS; 1997 rxdesc->dev_flags |= RXDONE_MY_BSS;
1745} 1998}
@@ -1860,7 +2113,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
1860 if (!reg && !reg_mcu) 2113 if (!reg && !reg_mcu)
1861 return IRQ_NONE; 2114 return IRQ_NONE;
1862 2115
1863 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 2116 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1864 return IRQ_HANDLED; 2117 return IRQ_HANDLED;
1865 2118
1866 /* 2119 /*
@@ -2060,10 +2313,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2060 /* 2313 /*
2061 * Detect if this device has an hardware controlled radio. 2314 * Detect if this device has an hardware controlled radio.
2062 */ 2315 */
2063#ifdef CONFIG_RT61PCI_RFKILL 2316#ifdef CONFIG_RT2X00_LIB_RFKILL
2064 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 2317 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
2065 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 2318 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
2066#endif /* CONFIG_RT61PCI_RFKILL */ 2319#endif /* CONFIG_RT2X00_LIB_RFKILL */
2067 2320
2068 /* 2321 /*
2069 * Read frequency offset and RF programming sequence. 2322 * Read frequency offset and RF programming sequence.
@@ -2121,7 +2374,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2121 * If the eeprom value is invalid, 2374 * If the eeprom value is invalid,
2122 * switch to default led mode. 2375 * switch to default led mode.
2123 */ 2376 */
2124#ifdef CONFIG_RT61PCI_LEDS 2377#ifdef CONFIG_RT2X00_LIB_LEDS
2125 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 2378 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
2126 value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); 2379 value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE);
2127 2380
@@ -2155,7 +2408,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2155 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, 2408 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
2156 rt2x00_get_field16(eeprom, 2409 rt2x00_get_field16(eeprom,
2157 EEPROM_LED_POLARITY_RDY_A)); 2410 EEPROM_LED_POLARITY_RDY_A));
2158#endif /* CONFIG_RT61PCI_LEDS */ 2411#endif /* CONFIG_RT2X00_LIB_LEDS */
2159 2412
2160 return 0; 2413 return 0;
2161} 2414}
@@ -2274,10 +2527,11 @@ static const struct rf_channel rf_vals_seq[] = {
2274 { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, 2527 { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 },
2275}; 2528};
2276 2529
2277static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2530static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2278{ 2531{
2279 struct hw_mode_spec *spec = &rt2x00dev->spec; 2532 struct hw_mode_spec *spec = &rt2x00dev->spec;
2280 u8 *txpower; 2533 struct channel_info *info;
2534 char *tx_power;
2281 unsigned int i; 2535 unsigned int i;
2282 2536
2283 /* 2537 /*
@@ -2294,20 +2548,10 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2294 EEPROM_MAC_ADDR_0)); 2548 EEPROM_MAC_ADDR_0));
2295 2549
2296 /* 2550 /*
2297 * Convert tx_power array in eeprom.
2298 */
2299 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2300 for (i = 0; i < 14; i++)
2301 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
2302
2303 /*
2304 * Initialize hw_mode information. 2551 * Initialize hw_mode information.
2305 */ 2552 */
2306 spec->supported_bands = SUPPORT_BAND_2GHZ; 2553 spec->supported_bands = SUPPORT_BAND_2GHZ;
2307 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2554 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2308 spec->tx_power_a = NULL;
2309 spec->tx_power_bg = txpower;
2310 spec->tx_power_default = DEFAULT_TXPOWER;
2311 2555
2312 if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { 2556 if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) {
2313 spec->num_channels = 14; 2557 spec->num_channels = 14;
@@ -2321,13 +2565,28 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2321 rt2x00_rf(&rt2x00dev->chip, RF5325)) { 2565 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2322 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2566 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2323 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2567 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2568 }
2324 2569
2325 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2570 /*
2326 for (i = 0; i < 14; i++) 2571 * Create channel information array
2327 txpower[i] = TXPOWER_FROM_DEV(txpower[i]); 2572 */
2573 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
2574 if (!info)
2575 return -ENOMEM;
2328 2576
2329 spec->tx_power_a = txpower; 2577 spec->channels_info = info;
2578
2579 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2580 for (i = 0; i < 14; i++)
2581 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2582
2583 if (spec->num_channels > 14) {
2584 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2585 for (i = 14; i < spec->num_channels; i++)
2586 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2330 } 2587 }
2588
2589 return 0;
2331} 2590}
2332 2591
2333static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) 2592static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -2348,13 +2607,17 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2348 /* 2607 /*
2349 * Initialize hw specifications. 2608 * Initialize hw specifications.
2350 */ 2609 */
2351 rt61pci_probe_hw_mode(rt2x00dev); 2610 retval = rt61pci_probe_hw_mode(rt2x00dev);
2611 if (retval)
2612 return retval;
2352 2613
2353 /* 2614 /*
2354 * This device requires firmware and DMA mapped skbs. 2615 * This device requires firmware and DMA mapped skbs.
2355 */ 2616 */
2356 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2617 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
2357 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 2618 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
2619 if (!modparam_nohwcrypt)
2620 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
2358 2621
2359 /* 2622 /*
2360 * Set the rssi offset. 2623 * Set the rssi offset.
@@ -2381,6 +2644,63 @@ static int rt61pci_set_retry_limit(struct ieee80211_hw *hw,
2381 return 0; 2644 return 0;
2382} 2645}
2383 2646
2647static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2648 const struct ieee80211_tx_queue_params *params)
2649{
2650 struct rt2x00_dev *rt2x00dev = hw->priv;
2651 struct data_queue *queue;
2652 struct rt2x00_field32 field;
2653 int retval;
2654 u32 reg;
2655
2656 /*
2657 * First pass the configuration through rt2x00lib, that will
2658 * update the queue settings and validate the input. After that
2659 * we are free to update the registers based on the value
2660 * in the queue parameter.
2661 */
2662 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2663 if (retval)
2664 return retval;
2665
2666 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2667
2668 /* Update WMM TXOP register */
2669 if (queue_idx < 2) {
2670 field.bit_offset = queue_idx * 16;
2671 field.bit_mask = 0xffff << field.bit_offset;
2672
2673 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
2674 rt2x00_set_field32(&reg, field, queue->txop);
2675 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
2676 } else if (queue_idx < 4) {
2677 field.bit_offset = (queue_idx - 2) * 16;
2678 field.bit_mask = 0xffff << field.bit_offset;
2679
2680 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2681 rt2x00_set_field32(&reg, field, queue->txop);
2682 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2683 }
2684
2685 /* Update WMM registers */
2686 field.bit_offset = queue_idx * 4;
2687 field.bit_mask = 0xf << field.bit_offset;
2688
2689 rt2x00pci_register_read(rt2x00dev, AIFSN_CSR, &reg);
2690 rt2x00_set_field32(&reg, field, queue->aifs);
2691 rt2x00pci_register_write(rt2x00dev, AIFSN_CSR, reg);
2692
2693 rt2x00pci_register_read(rt2x00dev, CWMIN_CSR, &reg);
2694 rt2x00_set_field32(&reg, field, queue->cw_min);
2695 rt2x00pci_register_write(rt2x00dev, CWMIN_CSR, reg);
2696
2697 rt2x00pci_register_read(rt2x00dev, CWMAX_CSR, &reg);
2698 rt2x00_set_field32(&reg, field, queue->cw_max);
2699 rt2x00pci_register_write(rt2x00dev, CWMAX_CSR, reg);
2700
2701 return 0;
2702}
2703
2384static u64 rt61pci_get_tsf(struct ieee80211_hw *hw) 2704static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
2385{ 2705{
2386 struct rt2x00_dev *rt2x00dev = hw->priv; 2706 struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2404,10 +2724,11 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2404 .config = rt2x00mac_config, 2724 .config = rt2x00mac_config,
2405 .config_interface = rt2x00mac_config_interface, 2725 .config_interface = rt2x00mac_config_interface,
2406 .configure_filter = rt2x00mac_configure_filter, 2726 .configure_filter = rt2x00mac_configure_filter,
2727 .set_key = rt2x00mac_set_key,
2407 .get_stats = rt2x00mac_get_stats, 2728 .get_stats = rt2x00mac_get_stats,
2408 .set_retry_limit = rt61pci_set_retry_limit, 2729 .set_retry_limit = rt61pci_set_retry_limit,
2409 .bss_info_changed = rt2x00mac_bss_info_changed, 2730 .bss_info_changed = rt2x00mac_bss_info_changed,
2410 .conf_tx = rt2x00mac_conf_tx, 2731 .conf_tx = rt61pci_conf_tx,
2411 .get_tx_stats = rt2x00mac_get_tx_stats, 2732 .get_tx_stats = rt2x00mac_get_tx_stats,
2412 .get_tsf = rt61pci_get_tsf, 2733 .get_tsf = rt61pci_get_tsf,
2413}; 2734};
@@ -2432,6 +2753,8 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2432 .write_beacon = rt61pci_write_beacon, 2753 .write_beacon = rt61pci_write_beacon,
2433 .kick_tx_queue = rt61pci_kick_tx_queue, 2754 .kick_tx_queue = rt61pci_kick_tx_queue,
2434 .fill_rxdone = rt61pci_fill_rxdone, 2755 .fill_rxdone = rt61pci_fill_rxdone,
2756 .config_shared_key = rt61pci_config_shared_key,
2757 .config_pairwise_key = rt61pci_config_pairwise_key,
2435 .config_filter = rt61pci_config_filter, 2758 .config_filter = rt61pci_config_filter,
2436 .config_intf = rt61pci_config_intf, 2759 .config_intf = rt61pci_config_intf,
2437 .config_erp = rt61pci_config_erp, 2760 .config_erp = rt61pci_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 1004d5b899e6..8ec1451308cc 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -134,6 +134,16 @@
134#define PAIRWISE_KEY_TABLE_BASE 0x1200 134#define PAIRWISE_KEY_TABLE_BASE 0x1200
135#define PAIRWISE_TA_TABLE_BASE 0x1a00 135#define PAIRWISE_TA_TABLE_BASE 0x1a00
136 136
137#define SHARED_KEY_ENTRY(__idx) \
138 ( SHARED_KEY_TABLE_BASE + \
139 ((__idx) * sizeof(struct hw_key_entry)) )
140#define PAIRWISE_KEY_ENTRY(__idx) \
141 ( PAIRWISE_KEY_TABLE_BASE + \
142 ((__idx) * sizeof(struct hw_key_entry)) )
143#define PAIRWISE_TA_ENTRY(__idx) \
144 ( PAIRWISE_TA_TABLE_BASE + \
145 ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
146
137struct hw_key_entry { 147struct hw_key_entry {
138 u8 key[16]; 148 u8 key[16];
139 u8 tx_mic[8]; 149 u8 tx_mic[8];
@@ -142,7 +152,8 @@ struct hw_key_entry {
142 152
143struct hw_pairwise_ta_entry { 153struct hw_pairwise_ta_entry {
144 u8 address[6]; 154 u8 address[6];
145 u8 reserved[2]; 155 u8 cipher;
156 u8 reserved;
146} __attribute__ ((packed)); 157} __attribute__ ((packed));
147 158
148/* 159/*
@@ -662,6 +673,10 @@ struct hw_pairwise_ta_entry {
662 * SEC_CSR4: Pairwise key table lookup control. 673 * SEC_CSR4: Pairwise key table lookup control.
663 */ 674 */
664#define SEC_CSR4 0x30b0 675#define SEC_CSR4 0x30b0
676#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001)
677#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002)
678#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004)
679#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008)
665 680
666/* 681/*
667 * SEC_CSR5: shared key table security mode register. 682 * SEC_CSR5: shared key table security mode register.
@@ -1428,8 +1443,10 @@ struct hw_pairwise_ta_entry {
1428 1443
1429/* 1444/*
1430 * Word4 1445 * Word4
1446 * ICV: Received ICV of originally encrypted.
1447 * NOTE: This is a guess, the official definition is "reserved"
1431 */ 1448 */
1432#define RXD_W4_RESERVED FIELD32(0xffffffff) 1449#define RXD_W4_ICV FIELD32(0xffffffff)
1433 1450
1434/* 1451/*
1435 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block 1452 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1465,17 +1482,10 @@ struct hw_pairwise_ta_entry {
1465#define MAX_TXPOWER 31 1482#define MAX_TXPOWER 31
1466#define DEFAULT_TXPOWER 24 1483#define DEFAULT_TXPOWER 24
1467 1484
1468#define TXPOWER_FROM_DEV(__txpower) \ 1485#define TXPOWER_FROM_DEV(__txpower) \
1469({ \ 1486 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1470 ((__txpower) > MAX_TXPOWER) ? \ 1487
1471 DEFAULT_TXPOWER : (__txpower); \ 1488#define TXPOWER_TO_DEV(__txpower) \
1472}) 1489 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1473
1474#define TXPOWER_TO_DEV(__txpower) \
1475({ \
1476 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1477 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1478 (__txpower)); \
1479})
1480 1490
1481#endif /* RT61PCI_H */ 1491#endif /* RT61PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9761eaaa08be..27dde3e34603 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -37,6 +37,13 @@
37#include "rt73usb.h" 37#include "rt73usb.h"
38 38
39/* 39/*
40 * Allow hardware encryption to be disabled.
41 */
42static int modparam_nohwcrypt = 0;
43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45
46/*
40 * Register access. 47 * Register access.
41 * All access to the CSR registers will go through the methods 48 * All access to the CSR registers will go through the methods
42 * rt73usb_register_read and rt73usb_register_write. 49 * rt73usb_register_read and rt73usb_register_write.
@@ -285,7 +292,7 @@ static const struct rt2x00debug rt73usb_rt2x00debug = {
285}; 292};
286#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 293#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
287 294
288#ifdef CONFIG_RT73USB_LEDS 295#ifdef CONFIG_RT2X00_LIB_LEDS
289static void rt73usb_brightness_set(struct led_classdev *led_cdev, 296static void rt73usb_brightness_set(struct led_classdev *led_cdev,
290 enum led_brightness brightness) 297 enum led_brightness brightness)
291{ 298{
@@ -352,11 +359,224 @@ static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev,
352 led->led_dev.blink_set = rt73usb_blink_set; 359 led->led_dev.blink_set = rt73usb_blink_set;
353 led->flags = LED_INITIALIZED; 360 led->flags = LED_INITIALIZED;
354} 361}
355#endif /* CONFIG_RT73USB_LEDS */ 362#endif /* CONFIG_RT2X00_LIB_LEDS */
356 363
357/* 364/*
358 * Configuration handlers. 365 * Configuration handlers.
359 */ 366 */
367static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
368 struct rt2x00lib_crypto *crypto,
369 struct ieee80211_key_conf *key)
370{
371 struct hw_key_entry key_entry;
372 struct rt2x00_field32 field;
373 int timeout;
374 u32 mask;
375 u32 reg;
376
377 if (crypto->cmd == SET_KEY) {
378 /*
379 * rt2x00lib can't determine the correct free
380 * key_idx for shared keys. We have 1 register
381 * with key valid bits. The goal is simple, read
382 * the register, if that is full we have no slots
383 * left.
384 * Note that each BSS is allowed to have up to 4
385 * shared keys, so put a mask over the allowed
386 * entries.
387 */
388 mask = (0xf << crypto->bssidx);
389
390 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
391 reg &= mask;
392
393 if (reg && reg == mask)
394 return -ENOSPC;
395
396 key->hw_key_idx += reg ? (ffz(reg) - 1) : 0;
397
398 /*
399 * Upload key to hardware
400 */
401 memcpy(key_entry.key, crypto->key,
402 sizeof(key_entry.key));
403 memcpy(key_entry.tx_mic, crypto->tx_mic,
404 sizeof(key_entry.tx_mic));
405 memcpy(key_entry.rx_mic, crypto->rx_mic,
406 sizeof(key_entry.rx_mic));
407
408 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
409 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
410 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
411 USB_VENDOR_REQUEST_OUT, reg,
412 &key_entry,
413 sizeof(key_entry),
414 timeout);
415
416 /*
417 * The cipher types are stored over 2 registers.
418 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
419 * bssidx 1 and 2 keys are stored in SEC_CSR5.
420 * Using the correct defines correctly will cause overhead,
421 * so just calculate the correct offset.
422 */
423 if (key->hw_key_idx < 8) {
424 field.bit_offset = (3 * key->hw_key_idx);
425 field.bit_mask = 0x7 << field.bit_offset;
426
427 rt73usb_register_read(rt2x00dev, SEC_CSR1, &reg);
428 rt2x00_set_field32(&reg, field, crypto->cipher);
429 rt73usb_register_write(rt2x00dev, SEC_CSR1, reg);
430 } else {
431 field.bit_offset = (3 * (key->hw_key_idx - 8));
432 field.bit_mask = 0x7 << field.bit_offset;
433
434 rt73usb_register_read(rt2x00dev, SEC_CSR5, &reg);
435 rt2x00_set_field32(&reg, field, crypto->cipher);
436 rt73usb_register_write(rt2x00dev, SEC_CSR5, reg);
437 }
438
439 /*
440 * The driver does not support the IV/EIV generation
441 * in hardware. However it doesn't support the IV/EIV
442 * inside the ieee80211 frame either, but requires it
443 * to be provided seperately for the descriptor.
444 * rt2x00lib will cut the IV/EIV data out of all frames
445 * given to us by mac80211, but we must tell mac80211
446 * to generate the IV/EIV data.
447 */
448 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
449 }
450
451 /*
452 * SEC_CSR0 contains only single-bit fields to indicate
453 * a particular key is valid. Because using the FIELD32()
454 * defines directly will cause a lot of overhead we use
455 * a calculation to determine the correct bit directly.
456 */
457 mask = 1 << key->hw_key_idx;
458
459 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
460 if (crypto->cmd == SET_KEY)
461 reg |= mask;
462 else if (crypto->cmd == DISABLE_KEY)
463 reg &= ~mask;
464 rt73usb_register_write(rt2x00dev, SEC_CSR0, reg);
465
466 return 0;
467}
468
469static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
470 struct rt2x00lib_crypto *crypto,
471 struct ieee80211_key_conf *key)
472{
473 struct hw_pairwise_ta_entry addr_entry;
474 struct hw_key_entry key_entry;
475 int timeout;
476 u32 mask;
477 u32 reg;
478
479 if (crypto->cmd == SET_KEY) {
480 /*
481 * rt2x00lib can't determine the correct free
482 * key_idx for pairwise keys. We have 2 registers
483 * with key valid bits. The goal is simple, read
484 * the first register, if that is full move to
485 * the next register.
486 * When both registers are full, we drop the key,
487 * otherwise we use the first invalid entry.
488 */
489 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
490 if (reg && reg == ~0) {
491 key->hw_key_idx = 32;
492 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
493 if (reg && reg == ~0)
494 return -ENOSPC;
495 }
496
497 key->hw_key_idx += reg ? (ffz(reg) - 1) : 0;
498
499 /*
500 * Upload key to hardware
501 */
502 memcpy(key_entry.key, crypto->key,
503 sizeof(key_entry.key));
504 memcpy(key_entry.tx_mic, crypto->tx_mic,
505 sizeof(key_entry.tx_mic));
506 memcpy(key_entry.rx_mic, crypto->rx_mic,
507 sizeof(key_entry.rx_mic));
508
509 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
510 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
511 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
512 USB_VENDOR_REQUEST_OUT, reg,
513 &key_entry,
514 sizeof(key_entry),
515 timeout);
516
517 /*
518 * Send the address and cipher type to the hardware register.
519 * This data fits within the CSR cache size, so we can use
520 * rt73usb_register_multiwrite() directly.
521 */
522 memset(&addr_entry, 0, sizeof(addr_entry));
523 memcpy(&addr_entry, crypto->address, ETH_ALEN);
524 addr_entry.cipher = crypto->cipher;
525
526 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
527 rt73usb_register_multiwrite(rt2x00dev, reg,
528 &addr_entry, sizeof(addr_entry));
529
530 /*
531 * Enable pairwise lookup table for given BSS idx,
532 * without this received frames will not be decrypted
533 * by the hardware.
534 */
535 rt73usb_register_read(rt2x00dev, SEC_CSR4, &reg);
536 reg |= (1 << crypto->bssidx);
537 rt73usb_register_write(rt2x00dev, SEC_CSR4, reg);
538
539 /*
540 * The driver does not support the IV/EIV generation
541 * in hardware. However it doesn't support the IV/EIV
542 * inside the ieee80211 frame either, but requires it
543 * to be provided seperately for the descriptor.
544 * rt2x00lib will cut the IV/EIV data out of all frames
545 * given to us by mac80211, but we must tell mac80211
546 * to generate the IV/EIV data.
547 */
548 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
549 }
550
551 /*
552 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
553 * a particular key is valid. Because using the FIELD32()
554 * defines directly will cause a lot of overhead we use
555 * a calculation to determine the correct bit directly.
556 */
557 if (key->hw_key_idx < 32) {
558 mask = 1 << key->hw_key_idx;
559
560 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
561 if (crypto->cmd == SET_KEY)
562 reg |= mask;
563 else if (crypto->cmd == DISABLE_KEY)
564 reg &= ~mask;
565 rt73usb_register_write(rt2x00dev, SEC_CSR2, reg);
566 } else {
567 mask = 1 << (key->hw_key_idx - 32);
568
569 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
570 if (crypto->cmd == SET_KEY)
571 reg |= mask;
572 else if (crypto->cmd == DISABLE_KEY)
573 reg &= ~mask;
574 rt73usb_register_write(rt2x00dev, SEC_CSR3, reg);
575 }
576
577 return 0;
578}
579
360static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, 580static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
361 const unsigned int filter_flags) 581 const unsigned int filter_flags)
362{ 582{
@@ -451,6 +671,26 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
451 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); 671 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg);
452} 672}
453 673
674static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
675 struct rt2x00lib_conf *libconf)
676{
677 u16 eeprom;
678 short lna_gain = 0;
679
680 if (libconf->band == IEEE80211_BAND_2GHZ) {
681 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
682 lna_gain += 14;
683
684 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
685 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
686 } else {
687 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
688 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
689 }
690
691 rt2x00dev->lna_gain = lna_gain;
692}
693
454static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev, 694static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev,
455 const int basic_rate_mask) 695 const int basic_rate_mask)
456{ 696{
@@ -705,6 +945,9 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
705 struct rt2x00lib_conf *libconf, 945 struct rt2x00lib_conf *libconf,
706 const unsigned int flags) 946 const unsigned int flags)
707{ 947{
948 /* Always recalculate LNA gain before changing configuration */
949 rt73usb_config_lna_gain(rt2x00dev, libconf);
950
708 if (flags & CONFIG_UPDATE_PHYMODE) 951 if (flags & CONFIG_UPDATE_PHYMODE)
709 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates); 952 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates);
710 if (flags & CONFIG_UPDATE_CHANNEL) 953 if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1034,16 +1277,6 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1034 rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); 1277 rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
1035 rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); 1278 rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
1036 1279
1037 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
1038 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
1039 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
1040 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
1041
1042 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
1043 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
1044 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
1045 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
1046
1047 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg); 1280 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg);
1048 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); 1281 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0);
1049 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); 1282 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -1265,8 +1498,8 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1265 * TX descriptor initialization 1498 * TX descriptor initialization
1266 */ 1499 */
1267static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1500static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1268 struct sk_buff *skb, 1501 struct sk_buff *skb,
1269 struct txentry_desc *txdesc) 1502 struct txentry_desc *txdesc)
1270{ 1503{
1271 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1504 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1272 __le32 *txd = skbdesc->desc; 1505 __le32 *txd = skbdesc->desc;
@@ -1280,7 +1513,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1280 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1513 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1281 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1514 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1282 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1515 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1283 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1516 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1284 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1517 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1285 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1518 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1286 rt2x00_desc_write(txd, 1, word); 1519 rt2x00_desc_write(txd, 1, word);
@@ -1292,6 +1525,11 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1292 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1525 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1293 rt2x00_desc_write(txd, 2, word); 1526 rt2x00_desc_write(txd, 2, word);
1294 1527
1528 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1529 _rt2x00_desc_write(txd, 3, skbdesc->iv);
1530 _rt2x00_desc_write(txd, 4, skbdesc->eiv);
1531 }
1532
1295 rt2x00_desc_read(txd, 5, &word); 1533 rt2x00_desc_read(txd, 5, &word);
1296 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1534 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1297 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1535 TXPOWER_TO_DEV(rt2x00dev->tx_power));
@@ -1313,12 +1551,15 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1313 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1551 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1314 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1552 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1315 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1553 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1316 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1554 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1317 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, 1555 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1318 skb->len - skbdesc->desc_len); 1556 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1557 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1558 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1559 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1319 rt2x00_set_field32(&word, TXD_W0_BURST2, 1560 rt2x00_set_field32(&word, TXD_W0_BURST2,
1320 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1561 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1321 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1562 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1322 rt2x00_desc_write(txd, 0, word); 1563 rt2x00_desc_write(txd, 0, word);
1323} 1564}
1324 1565
@@ -1331,7 +1572,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1331 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1572 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1332 unsigned int beacon_base; 1573 unsigned int beacon_base;
1333 u32 reg; 1574 u32 reg;
1334 u32 word, len;
1335 1575
1336 /* 1576 /*
1337 * Add the descriptor in front of the skb. 1577 * Add the descriptor in front of the skb.
@@ -1341,17 +1581,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1341 skbdesc->desc = entry->skb->data; 1581 skbdesc->desc = entry->skb->data;
1342 1582
1343 /* 1583 /*
1344 * Adjust the beacon databyte count. The current number is
1345 * calculated before this function gets called, but falsely
1346 * assumes that the descriptor was already present in the SKB.
1347 */
1348 rt2x00_desc_read(skbdesc->desc, 0, &word);
1349 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1350 len += skbdesc->desc_len;
1351 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1352 rt2x00_desc_write(skbdesc->desc, 0, word);
1353
1354 /*
1355 * Disable beaconing while we are reloading the beacon data, 1584 * Disable beaconing while we are reloading the beacon data,
1356 * otherwise we might be sending out invalid data. 1585 * otherwise we might be sending out invalid data.
1357 */ 1586 */
@@ -1422,20 +1651,19 @@ static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1422 */ 1651 */
1423static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) 1652static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1424{ 1653{
1425 u16 eeprom; 1654 u8 offset = rt2x00dev->lna_gain;
1426 u8 offset;
1427 u8 lna; 1655 u8 lna;
1428 1656
1429 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); 1657 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
1430 switch (lna) { 1658 switch (lna) {
1431 case 3: 1659 case 3:
1432 offset = 90; 1660 offset += 90;
1433 break; 1661 break;
1434 case 2: 1662 case 2:
1435 offset = 74; 1663 offset += 74;
1436 break; 1664 break;
1437 case 1: 1665 case 1:
1438 offset = 64; 1666 offset += 64;
1439 break; 1667 break;
1440 default: 1668 default:
1441 return 0; 1669 return 0;
@@ -1451,15 +1679,6 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1451 else if (lna == 2) 1679 else if (lna == 2)
1452 offset += 8; 1680 offset += 8;
1453 } 1681 }
1454
1455 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
1456 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
1457 } else {
1458 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
1459 offset += 14;
1460
1461 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
1462 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
1463 } 1682 }
1464 1683
1465 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1684 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1468,6 +1687,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1468static void rt73usb_fill_rxdone(struct queue_entry *entry, 1687static void rt73usb_fill_rxdone(struct queue_entry *entry,
1469 struct rxdone_entry_desc *rxdesc) 1688 struct rxdone_entry_desc *rxdesc)
1470{ 1689{
1690 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1471 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1691 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1472 __le32 *rxd = (__le32 *)entry->skb->data; 1692 __le32 *rxd = (__le32 *)entry->skb->data;
1473 u32 word0; 1693 u32 word0;
@@ -1489,6 +1709,38 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1489 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1709 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1490 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1710 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1491 1711
1712 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
1713 rxdesc->cipher =
1714 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1715 rxdesc->cipher_status =
1716 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1717 }
1718
1719 if (rxdesc->cipher != CIPHER_NONE) {
1720 _rt2x00_desc_read(rxd, 2, &rxdesc->iv);
1721 _rt2x00_desc_read(rxd, 3, &rxdesc->eiv);
1722 _rt2x00_desc_read(rxd, 4, &rxdesc->icv);
1723
1724 /*
1725 * Hardware has stripped IV/EIV data from 802.11 frame during
1726 * decryption. It has provided the data seperately but rt2x00lib
1727 * should decide if it should be reinserted.
1728 */
1729 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1730
1731 /*
1732 * FIXME: Legacy driver indicates that the frame does
1733 * contain the Michael Mic. Unfortunately, in rt2x00
1734 * the MIC seems to be missing completely...
1735 */
1736 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1737
1738 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1739 rxdesc->flags |= RX_FLAG_DECRYPTED;
1740 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
1741 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
1742 }
1743
1492 /* 1744 /*
1493 * Obtain the status about this packet. 1745 * Obtain the status about this packet.
1494 * When frame was received with an OFDM bitrate, 1746 * When frame was received with an OFDM bitrate,
@@ -1496,11 +1748,13 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1496 * a CCK bitrate the signal is the rate in 100kbit/s. 1748 * a CCK bitrate the signal is the rate in 100kbit/s.
1497 */ 1749 */
1498 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1750 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1499 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1); 1751 rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1);
1500 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1752 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1501 1753
1502 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1754 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1503 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1755 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1756 else
1757 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1504 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1758 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1505 rxdesc->dev_flags |= RXDONE_MY_BSS; 1759 rxdesc->dev_flags |= RXDONE_MY_BSS;
1506 1760
@@ -1678,7 +1932,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1678 /* 1932 /*
1679 * Store led settings, for correct led behaviour. 1933 * Store led settings, for correct led behaviour.
1680 */ 1934 */
1681#ifdef CONFIG_RT73USB_LEDS 1935#ifdef CONFIG_RT2X00_LIB_LEDS
1682 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 1936 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
1683 1937
1684 rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1938 rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
@@ -1711,7 +1965,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1711 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, 1965 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
1712 rt2x00_get_field16(eeprom, 1966 rt2x00_get_field16(eeprom,
1713 EEPROM_LED_POLARITY_RDY_A)); 1967 EEPROM_LED_POLARITY_RDY_A));
1714#endif /* CONFIG_RT73USB_LEDS */ 1968#endif /* CONFIG_RT2X00_LIB_LEDS */
1715 1969
1716 return 0; 1970 return 0;
1717} 1971}
@@ -1852,10 +2106,11 @@ static const struct rf_channel rf_vals_5225_2527[] = {
1852}; 2106};
1853 2107
1854 2108
1855static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2109static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1856{ 2110{
1857 struct hw_mode_spec *spec = &rt2x00dev->spec; 2111 struct hw_mode_spec *spec = &rt2x00dev->spec;
1858 u8 *txpower; 2112 struct channel_info *info;
2113 char *tx_power;
1859 unsigned int i; 2114 unsigned int i;
1860 2115
1861 /* 2116 /*
@@ -1872,20 +2127,10 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1872 EEPROM_MAC_ADDR_0)); 2127 EEPROM_MAC_ADDR_0));
1873 2128
1874 /* 2129 /*
1875 * Convert tx_power array in eeprom.
1876 */
1877 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
1878 for (i = 0; i < 14; i++)
1879 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1880
1881 /*
1882 * Initialize hw_mode information. 2130 * Initialize hw_mode information.
1883 */ 2131 */
1884 spec->supported_bands = SUPPORT_BAND_2GHZ; 2132 spec->supported_bands = SUPPORT_BAND_2GHZ;
1885 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2133 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1886 spec->tx_power_a = NULL;
1887 spec->tx_power_bg = txpower;
1888 spec->tx_power_default = DEFAULT_TXPOWER;
1889 2134
1890 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { 2135 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
1891 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 2136 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
@@ -1903,14 +2148,26 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1903 spec->channels = rf_vals_5225_2527; 2148 spec->channels = rf_vals_5225_2527;
1904 } 2149 }
1905 2150
1906 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2151 /*
1907 rt2x00_rf(&rt2x00dev->chip, RF5226)) { 2152 * Create channel information array
1908 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2153 */
1909 for (i = 0; i < 14; i++) 2154 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1910 txpower[i] = TXPOWER_FROM_DEV(txpower[i]); 2155 if (!info)
2156 return -ENOMEM;
1911 2157
1912 spec->tx_power_a = txpower; 2158 spec->channels_info = info;
2159
2160 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2161 for (i = 0; i < 14; i++)
2162 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2163
2164 if (spec->num_channels > 14) {
2165 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2166 for (i = 14; i < spec->num_channels; i++)
2167 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1913 } 2168 }
2169
2170 return 0;
1914} 2171}
1915 2172
1916static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) 2173static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1931,13 +2188,17 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1931 /* 2188 /*
1932 * Initialize hw specifications. 2189 * Initialize hw specifications.
1933 */ 2190 */
1934 rt73usb_probe_hw_mode(rt2x00dev); 2191 retval = rt73usb_probe_hw_mode(rt2x00dev);
2192 if (retval)
2193 return retval;
1935 2194
1936 /* 2195 /*
1937 * This device requires firmware. 2196 * This device requires firmware.
1938 */ 2197 */
1939 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2198 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1940 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 2199 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
2200 if (!modparam_nohwcrypt)
2201 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1941 2202
1942 /* 2203 /*
1943 * Set the rssi offset. 2204 * Set the rssi offset.
@@ -1964,6 +2225,63 @@ static int rt73usb_set_retry_limit(struct ieee80211_hw *hw,
1964 return 0; 2225 return 0;
1965} 2226}
1966 2227
2228static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2229 const struct ieee80211_tx_queue_params *params)
2230{
2231 struct rt2x00_dev *rt2x00dev = hw->priv;
2232 struct data_queue *queue;
2233 struct rt2x00_field32 field;
2234 int retval;
2235 u32 reg;
2236
2237 /*
2238 * First pass the configuration through rt2x00lib, that will
2239 * update the queue settings and validate the input. After that
2240 * we are free to update the registers based on the value
2241 * in the queue parameter.
2242 */
2243 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2244 if (retval)
2245 return retval;
2246
2247 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2248
2249 /* Update WMM TXOP register */
2250 if (queue_idx < 2) {
2251 field.bit_offset = queue_idx * 16;
2252 field.bit_mask = 0xffff << field.bit_offset;
2253
2254 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
2255 rt2x00_set_field32(&reg, field, queue->txop);
2256 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
2257 } else if (queue_idx < 4) {
2258 field.bit_offset = (queue_idx - 2) * 16;
2259 field.bit_mask = 0xffff << field.bit_offset;
2260
2261 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2262 rt2x00_set_field32(&reg, field, queue->txop);
2263 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2264 }
2265
2266 /* Update WMM registers */
2267 field.bit_offset = queue_idx * 4;
2268 field.bit_mask = 0xf << field.bit_offset;
2269
2270 rt73usb_register_read(rt2x00dev, AIFSN_CSR, &reg);
2271 rt2x00_set_field32(&reg, field, queue->aifs);
2272 rt73usb_register_write(rt2x00dev, AIFSN_CSR, reg);
2273
2274 rt73usb_register_read(rt2x00dev, CWMIN_CSR, &reg);
2275 rt2x00_set_field32(&reg, field, queue->cw_min);
2276 rt73usb_register_write(rt2x00dev, CWMIN_CSR, reg);
2277
2278 rt73usb_register_read(rt2x00dev, CWMAX_CSR, &reg);
2279 rt2x00_set_field32(&reg, field, queue->cw_max);
2280 rt73usb_register_write(rt2x00dev, CWMAX_CSR, reg);
2281
2282 return 0;
2283}
2284
1967#if 0 2285#if 0
1968/* 2286/*
1969 * Mac80211 demands get_tsf must be atomic. 2287 * Mac80211 demands get_tsf must be atomic.
@@ -1997,10 +2315,11 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
1997 .config = rt2x00mac_config, 2315 .config = rt2x00mac_config,
1998 .config_interface = rt2x00mac_config_interface, 2316 .config_interface = rt2x00mac_config_interface,
1999 .configure_filter = rt2x00mac_configure_filter, 2317 .configure_filter = rt2x00mac_configure_filter,
2318 .set_key = rt2x00mac_set_key,
2000 .get_stats = rt2x00mac_get_stats, 2319 .get_stats = rt2x00mac_get_stats,
2001 .set_retry_limit = rt73usb_set_retry_limit, 2320 .set_retry_limit = rt73usb_set_retry_limit,
2002 .bss_info_changed = rt2x00mac_bss_info_changed, 2321 .bss_info_changed = rt2x00mac_bss_info_changed,
2003 .conf_tx = rt2x00mac_conf_tx, 2322 .conf_tx = rt73usb_conf_tx,
2004 .get_tx_stats = rt2x00mac_get_tx_stats, 2323 .get_tx_stats = rt2x00mac_get_tx_stats,
2005 .get_tsf = rt73usb_get_tsf, 2324 .get_tsf = rt73usb_get_tsf,
2006}; 2325};
@@ -2024,6 +2343,8 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2024 .get_tx_data_len = rt73usb_get_tx_data_len, 2343 .get_tx_data_len = rt73usb_get_tx_data_len,
2025 .kick_tx_queue = rt73usb_kick_tx_queue, 2344 .kick_tx_queue = rt73usb_kick_tx_queue,
2026 .fill_rxdone = rt73usb_fill_rxdone, 2345 .fill_rxdone = rt73usb_fill_rxdone,
2346 .config_shared_key = rt73usb_config_shared_key,
2347 .config_pairwise_key = rt73usb_config_pairwise_key,
2027 .config_filter = rt73usb_config_filter, 2348 .config_filter = rt73usb_config_filter,
2028 .config_intf = rt73usb_config_intf, 2349 .config_intf = rt73usb_config_intf,
2029 .config_erp = rt73usb_config_erp, 2350 .config_erp = rt73usb_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 148493501011..868386c457f6 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -92,6 +92,16 @@
92#define PAIRWISE_KEY_TABLE_BASE 0x1200 92#define PAIRWISE_KEY_TABLE_BASE 0x1200
93#define PAIRWISE_TA_TABLE_BASE 0x1a00 93#define PAIRWISE_TA_TABLE_BASE 0x1a00
94 94
95#define SHARED_KEY_ENTRY(__idx) \
96 ( SHARED_KEY_TABLE_BASE + \
97 ((__idx) * sizeof(struct hw_key_entry)) )
98#define PAIRWISE_KEY_ENTRY(__idx) \
99 ( PAIRWISE_KEY_TABLE_BASE + \
100 ((__idx) * sizeof(struct hw_key_entry)) )
101#define PAIRWISE_TA_ENTRY(__idx) \
102 ( PAIRWISE_TA_TABLE_BASE + \
103 ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
104
95struct hw_key_entry { 105struct hw_key_entry {
96 u8 key[16]; 106 u8 key[16];
97 u8 tx_mic[8]; 107 u8 tx_mic[8];
@@ -100,7 +110,8 @@ struct hw_key_entry {
100 110
101struct hw_pairwise_ta_entry { 111struct hw_pairwise_ta_entry {
102 u8 address[6]; 112 u8 address[6];
103 u8 reserved[2]; 113 u8 cipher;
114 u8 reserved;
104} __attribute__ ((packed)); 115} __attribute__ ((packed));
105 116
106/* 117/*
@@ -563,6 +574,10 @@ struct hw_pairwise_ta_entry {
563 * SEC_CSR4: Pairwise key table lookup control. 574 * SEC_CSR4: Pairwise key table lookup control.
564 */ 575 */
565#define SEC_CSR4 0x30b0 576#define SEC_CSR4 0x30b0
577#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001)
578#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002)
579#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004)
580#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008)
566 581
567/* 582/*
568 * SEC_CSR5: shared key table security mode register. 583 * SEC_CSR5: shared key table security mode register.
@@ -1010,8 +1025,10 @@ struct hw_pairwise_ta_entry {
1010 1025
1011/* 1026/*
1012 * Word4 1027 * Word4
1028 * ICV: Received ICV of originally encrypted.
1029 * NOTE: This is a guess, the official definition is "reserved"
1013 */ 1030 */
1014#define RXD_W4_RESERVED FIELD32(0xffffffff) 1031#define RXD_W4_ICV FIELD32(0xffffffff)
1015 1032
1016/* 1033/*
1017 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block 1034 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1033,17 +1050,10 @@ struct hw_pairwise_ta_entry {
1033#define MAX_TXPOWER 31 1050#define MAX_TXPOWER 31
1034#define DEFAULT_TXPOWER 24 1051#define DEFAULT_TXPOWER 24
1035 1052
1036#define TXPOWER_FROM_DEV(__txpower) \ 1053#define TXPOWER_FROM_DEV(__txpower) \
1037({ \ 1054 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1038 ((__txpower) > MAX_TXPOWER) ? \ 1055
1039 DEFAULT_TXPOWER : (__txpower); \ 1056#define TXPOWER_TO_DEV(__txpower) \
1040}) 1057 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1041
1042#define TXPOWER_TO_DEV(__txpower) \
1043({ \
1044 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1045 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1046 (__txpower)); \
1047})
1048 1058
1049#endif /* RT73USB_H */ 1059#endif /* RT73USB_H */
diff --git a/drivers/net/wireless/rtl8180.h b/drivers/net/wireless/rtl8180.h
index 082a11f93beb..8721282a8185 100644
--- a/drivers/net/wireless/rtl8180.h
+++ b/drivers/net/wireless/rtl8180.h
@@ -24,20 +24,6 @@
24#define ANAPARAM_PWR1_SHIFT 20 24#define ANAPARAM_PWR1_SHIFT 20
25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT) 25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT)
26 26
27enum rtl8180_tx_desc_flags {
28 RTL8180_TX_DESC_FLAG_NO_ENC = (1 << 15),
29 RTL8180_TX_DESC_FLAG_TX_OK = (1 << 15),
30 RTL8180_TX_DESC_FLAG_SPLCP = (1 << 16),
31 RTL8180_TX_DESC_FLAG_RX_UNDER = (1 << 16),
32 RTL8180_TX_DESC_FLAG_MOREFRAG = (1 << 17),
33 RTL8180_TX_DESC_FLAG_CTS = (1 << 18),
34 RTL8180_TX_DESC_FLAG_RTS = (1 << 23),
35 RTL8180_TX_DESC_FLAG_LS = (1 << 28),
36 RTL8180_TX_DESC_FLAG_FS = (1 << 29),
37 RTL8180_TX_DESC_FLAG_DMA = (1 << 30),
38 RTL8180_TX_DESC_FLAG_OWN = (1 << 31)
39};
40
41struct rtl8180_tx_desc { 27struct rtl8180_tx_desc {
42 __le32 flags; 28 __le32 flags;
43 __le16 rts_duration; 29 __le16 rts_duration;
@@ -52,23 +38,6 @@ struct rtl8180_tx_desc {
52 u32 reserved[2]; 38 u32 reserved[2];
53} __attribute__ ((packed)); 39} __attribute__ ((packed));
54 40
55enum rtl8180_rx_desc_flags {
56 RTL8180_RX_DESC_FLAG_ICV_ERR = (1 << 12),
57 RTL8180_RX_DESC_FLAG_CRC32_ERR = (1 << 13),
58 RTL8180_RX_DESC_FLAG_PM = (1 << 14),
59 RTL8180_RX_DESC_FLAG_RX_ERR = (1 << 15),
60 RTL8180_RX_DESC_FLAG_BCAST = (1 << 16),
61 RTL8180_RX_DESC_FLAG_PAM = (1 << 17),
62 RTL8180_RX_DESC_FLAG_MCAST = (1 << 18),
63 RTL8180_RX_DESC_FLAG_SPLCP = (1 << 25),
64 RTL8180_RX_DESC_FLAG_FOF = (1 << 26),
65 RTL8180_RX_DESC_FLAG_DMA_FAIL = (1 << 27),
66 RTL8180_RX_DESC_FLAG_LS = (1 << 28),
67 RTL8180_RX_DESC_FLAG_FS = (1 << 29),
68 RTL8180_RX_DESC_FLAG_EOR = (1 << 30),
69 RTL8180_RX_DESC_FLAG_OWN = (1 << 31)
70};
71
72struct rtl8180_rx_desc { 41struct rtl8180_rx_desc {
73 __le32 flags; 42 __le32 flags;
74 __le32 flags2; 43 __le32 flags2;
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index b7172a12c057..abcd641c54be 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -110,12 +110,12 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
111 u32 flags = le32_to_cpu(entry->flags); 111 u32 flags = le32_to_cpu(entry->flags);
112 112
113 if (flags & RTL8180_RX_DESC_FLAG_OWN) 113 if (flags & RTL818X_RX_DESC_FLAG_OWN)
114 return; 114 return;
115 115
116 if (unlikely(flags & (RTL8180_RX_DESC_FLAG_DMA_FAIL | 116 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
117 RTL8180_RX_DESC_FLAG_FOF | 117 RTL818X_RX_DESC_FLAG_FOF |
118 RTL8180_RX_DESC_FLAG_RX_ERR))) 118 RTL818X_RX_DESC_FLAG_RX_ERR)))
119 goto done; 119 goto done;
120 else { 120 else {
121 u32 flags2 = le32_to_cpu(entry->flags2); 121 u32 flags2 = le32_to_cpu(entry->flags2);
@@ -140,7 +140,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
140 rx_status.band = dev->conf.channel->band; 140 rx_status.band = dev->conf.channel->band;
141 rx_status.mactime = le64_to_cpu(entry->tsft); 141 rx_status.mactime = le64_to_cpu(entry->tsft);
142 rx_status.flag |= RX_FLAG_TSFT; 142 rx_status.flag |= RX_FLAG_TSFT;
143 if (flags & RTL8180_RX_DESC_FLAG_CRC32_ERR) 143 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
144 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 144 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
145 145
146 ieee80211_rx_irqsafe(dev, skb, &rx_status); 146 ieee80211_rx_irqsafe(dev, skb, &rx_status);
@@ -154,10 +154,10 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
154 154
155 done: 155 done:
156 entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb)); 156 entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
157 entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN | 157 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
158 MAX_RX_SIZE); 158 MAX_RX_SIZE);
159 if (priv->rx_idx == 31) 159 if (priv->rx_idx == 31)
160 entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR); 160 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
161 priv->rx_idx = (priv->rx_idx + 1) % 32; 161 priv->rx_idx = (priv->rx_idx + 1) % 32;
162 } 162 }
163} 163}
@@ -173,7 +173,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
173 struct ieee80211_tx_info *info; 173 struct ieee80211_tx_info *info;
174 u32 flags = le32_to_cpu(entry->flags); 174 u32 flags = le32_to_cpu(entry->flags);
175 175
176 if (flags & RTL8180_TX_DESC_FLAG_OWN) 176 if (flags & RTL818X_TX_DESC_FLAG_OWN)
177 return; 177 return;
178 178
179 ring->idx = (ring->idx + 1) % ring->entries; 179 ring->idx = (ring->idx + 1) % ring->entries;
@@ -185,7 +185,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
185 memset(&info->status, 0, sizeof(info->status)); 185 memset(&info->status, 0, sizeof(info->status));
186 186
187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
188 if (flags & RTL8180_TX_DESC_FLAG_TX_OK) 188 if (flags & RTL818X_TX_DESC_FLAG_TX_OK)
189 info->flags |= IEEE80211_TX_STAT_ACK; 189 info->flags |= IEEE80211_TX_STAT_ACK;
190 else 190 else
191 info->status.excessive_retries = 1; 191 info->status.excessive_retries = 1;
@@ -252,20 +252,20 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
252 mapping = pci_map_single(priv->pdev, skb->data, 252 mapping = pci_map_single(priv->pdev, skb->data,
253 skb->len, PCI_DMA_TODEVICE); 253 skb->len, PCI_DMA_TODEVICE);
254 254
255 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS | 255 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
256 RTL8180_TX_DESC_FLAG_LS | 256 RTL818X_TX_DESC_FLAG_LS |
257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | 257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
258 skb->len; 258 skb->len;
259 259
260 if (priv->r8185) 260 if (priv->r8185)
261 tx_flags |= RTL8180_TX_DESC_FLAG_DMA | 261 tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
262 RTL8180_TX_DESC_FLAG_NO_ENC; 262 RTL818X_TX_DESC_FLAG_NO_ENC;
263 263
264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
265 tx_flags |= RTL8180_TX_DESC_FLAG_RTS; 265 tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
268 tx_flags |= RTL8180_TX_DESC_FLAG_CTS; 268 tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
270 } 270 }
271 271
@@ -446,10 +446,10 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
446 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), 446 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
447 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 447 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
448 entry->rx_buf = cpu_to_le32(*mapping); 448 entry->rx_buf = cpu_to_le32(*mapping);
449 entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN | 449 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
450 MAX_RX_SIZE); 450 MAX_RX_SIZE);
451 } 451 }
452 entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR); 452 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
453 return 0; 453 return 0;
454} 454}
455 455
@@ -615,7 +615,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
615 reg |= RTL818X_CMD_TX_ENABLE; 615 reg |= RTL818X_CMD_TX_ENABLE;
616 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 616 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
617 617
618 priv->mode = IEEE80211_IF_TYPE_MNTR; 618 priv->mode = NL80211_IFTYPE_MONITOR;
619 return 0; 619 return 0;
620 620
621 err_free_rings: 621 err_free_rings:
@@ -633,7 +633,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
633 u8 reg; 633 u8 reg;
634 int i; 634 int i;
635 635
636 priv->mode = IEEE80211_IF_TYPE_INVALID; 636 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
637 637
638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
639 639
@@ -661,11 +661,11 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
661{ 661{
662 struct rtl8180_priv *priv = dev->priv; 662 struct rtl8180_priv *priv = dev->priv;
663 663
664 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 664 if (priv->mode != NL80211_IFTYPE_MONITOR)
665 return -EOPNOTSUPP; 665 return -EOPNOTSUPP;
666 666
667 switch (conf->type) { 667 switch (conf->type) {
668 case IEEE80211_IF_TYPE_STA: 668 case NL80211_IFTYPE_STATION:
669 priv->mode = conf->type; 669 priv->mode = conf->type;
670 break; 670 break;
671 default: 671 default:
@@ -688,7 +688,7 @@ static void rtl8180_remove_interface(struct ieee80211_hw *dev,
688 struct ieee80211_if_init_conf *conf) 688 struct ieee80211_if_init_conf *conf)
689{ 689{
690 struct rtl8180_priv *priv = dev->priv; 690 struct rtl8180_priv *priv = dev->priv;
691 priv->mode = IEEE80211_IF_TYPE_MNTR; 691 priv->mode = NL80211_IFTYPE_MONITOR;
692 priv->vif = NULL; 692 priv->vif = NULL;
693} 693}
694 694
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 5a9515c99960..e82bb4d289e8 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -58,12 +58,6 @@ struct rtl8187b_rx_hdr {
58 58
59/* {rtl8187,rtl8187b}_tx_info is in skb */ 59/* {rtl8187,rtl8187b}_tx_info is in skb */
60 60
61/* Tx flags are common between rtl8187 and rtl8187b */
62#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15)
63#define RTL8187_TX_FLAG_MORE_FRAG (1 << 17)
64#define RTL8187_TX_FLAG_CTS (1 << 18)
65#define RTL8187_TX_FLAG_RTS (1 << 23)
66
67struct rtl8187_tx_hdr { 61struct rtl8187_tx_hdr {
68 __le32 flags; 62 __le32 flags;
69 __le16 rts_duration; 63 __le16 rts_duration;
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index ca5deb6244e6..e9902613e2ee 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -187,18 +187,18 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
187 } 187 }
188 188
189 flags = skb->len; 189 flags = skb->len;
190 flags |= RTL8187_TX_FLAG_NO_ENCRYPT; 190 flags |= RTL818X_TX_DESC_FLAG_NO_ENC;
191 191
192 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; 192 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
193 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control)) 193 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
194 flags |= RTL8187_TX_FLAG_MORE_FRAG; 194 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
195 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 195 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
196 flags |= RTL8187_TX_FLAG_RTS; 196 flags |= RTL818X_TX_DESC_FLAG_RTS;
197 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 197 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
198 rts_dur = ieee80211_rts_duration(dev, priv->vif, 198 rts_dur = ieee80211_rts_duration(dev, priv->vif,
199 skb->len, info); 199 skb->len, info);
200 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 200 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
201 flags |= RTL8187_TX_FLAG_CTS; 201 flags |= RTL818X_TX_DESC_FLAG_CTS;
202 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 202 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
203 } 203 }
204 204
@@ -354,7 +354,7 @@ static void rtl8187_rx_cb(struct urb *urb)
354 rx_status.freq = dev->conf.channel->center_freq; 354 rx_status.freq = dev->conf.channel->center_freq;
355 rx_status.band = dev->conf.channel->band; 355 rx_status.band = dev->conf.channel->band;
356 rx_status.flag |= RX_FLAG_TSFT; 356 rx_status.flag |= RX_FLAG_TSFT;
357 if (flags & (1 << 13)) 357 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
358 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 358 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
359 ieee80211_rx_irqsafe(dev, skb, &rx_status); 359 ieee80211_rx_irqsafe(dev, skb, &rx_status);
360 360
@@ -836,11 +836,11 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
836 struct rtl8187_priv *priv = dev->priv; 836 struct rtl8187_priv *priv = dev->priv;
837 int i; 837 int i;
838 838
839 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 839 if (priv->mode != NL80211_IFTYPE_MONITOR)
840 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
841 841
842 switch (conf->type) { 842 switch (conf->type) {
843 case IEEE80211_IF_TYPE_STA: 843 case NL80211_IFTYPE_STATION:
844 priv->mode = conf->type; 844 priv->mode = conf->type;
845 break; 845 break;
846 default: 846 default:
@@ -865,7 +865,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
865{ 865{
866 struct rtl8187_priv *priv = dev->priv; 866 struct rtl8187_priv *priv = dev->priv;
867 mutex_lock(&priv->conf_mutex); 867 mutex_lock(&priv->conf_mutex);
868 priv->mode = IEEE80211_IF_TYPE_MNTR; 868 priv->mode = NL80211_IFTYPE_MONITOR;
869 priv->vif = NULL; 869 priv->vif = NULL;
870 mutex_unlock(&priv->conf_mutex); 870 mutex_unlock(&priv->conf_mutex);
871} 871}
@@ -1057,7 +1057,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1057 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1057 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1058 1058
1059 1059
1060 priv->mode = IEEE80211_IF_TYPE_MNTR; 1060 priv->mode = NL80211_IFTYPE_MONITOR;
1061 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1061 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1062 IEEE80211_HW_RX_INCLUDES_FCS; 1062 IEEE80211_HW_RX_INCLUDES_FCS;
1063 1063
@@ -1184,6 +1184,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1184 dev->max_signal = 65; 1184 dev->max_signal = 65;
1185 } 1185 }
1186 1186
1187 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1188
1187 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) 1189 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
1188 printk(KERN_INFO "rtl8187: inconsistency between id with OEM" 1190 printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
1189 " info!\n"); 1191 " info!\n");
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
index 00900fe16fce..3538b15211b1 100644
--- a/drivers/net/wireless/rtl818x.h
+++ b/drivers/net/wireless/rtl818x.h
@@ -193,4 +193,39 @@ struct rtl818x_rf_ops {
193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
194}; 194};
195 195
196/* Tx/Rx flags are common between RTL818X chips */
197
198enum rtl818x_tx_desc_flags {
199 RTL818X_TX_DESC_FLAG_NO_ENC = (1 << 15),
200 RTL818X_TX_DESC_FLAG_TX_OK = (1 << 15),
201 RTL818X_TX_DESC_FLAG_SPLCP = (1 << 16),
202 RTL818X_TX_DESC_FLAG_RX_UNDER = (1 << 16),
203 RTL818X_TX_DESC_FLAG_MOREFRAG = (1 << 17),
204 RTL818X_TX_DESC_FLAG_CTS = (1 << 18),
205 RTL818X_TX_DESC_FLAG_RTS = (1 << 23),
206 RTL818X_TX_DESC_FLAG_LS = (1 << 28),
207 RTL818X_TX_DESC_FLAG_FS = (1 << 29),
208 RTL818X_TX_DESC_FLAG_DMA = (1 << 30),
209 RTL818X_TX_DESC_FLAG_OWN = (1 << 31)
210};
211
212enum rtl818x_rx_desc_flags {
213 RTL818X_RX_DESC_FLAG_ICV_ERR = (1 << 12),
214 RTL818X_RX_DESC_FLAG_CRC32_ERR = (1 << 13),
215 RTL818X_RX_DESC_FLAG_PM = (1 << 14),
216 RTL818X_RX_DESC_FLAG_RX_ERR = (1 << 15),
217 RTL818X_RX_DESC_FLAG_BCAST = (1 << 16),
218 RTL818X_RX_DESC_FLAG_PAM = (1 << 17),
219 RTL818X_RX_DESC_FLAG_MCAST = (1 << 18),
220 RTL818X_RX_DESC_FLAG_QOS = (1 << 19), /* RTL8187(B) only */
221 RTL818X_RX_DESC_FLAG_TRSW = (1 << 24), /* RTL8187(B) only */
222 RTL818X_RX_DESC_FLAG_SPLCP = (1 << 25),
223 RTL818X_RX_DESC_FLAG_FOF = (1 << 26),
224 RTL818X_RX_DESC_FLAG_DMA_FAIL = (1 << 27),
225 RTL818X_RX_DESC_FLAG_LS = (1 << 28),
226 RTL818X_RX_DESC_FLAG_FS = (1 << 29),
227 RTL818X_RX_DESC_FLAG_EOR = (1 << 30),
228 RTL818X_RX_DESC_FLAG_OWN = (1 << 31)
229};
230
196#endif /* RTL818X_H */ 231#endif /* RTL818X_H */
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 98df9bc7836a..e368759d1d89 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -25,7 +25,6 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/firmware.h>
29#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
30#include <pcmcia/cs.h> 29#include <pcmcia/cs.h>
31#include <pcmcia/cistpl.h> 30#include <pcmcia/cistpl.h>
@@ -34,9 +33,6 @@
34 33
35#include "orinoco.h" 34#include "orinoco.h"
36 35
37static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
38static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
39
40/********************************************************************/ 36/********************************************************************/
41/* Module stuff */ 37/* Module stuff */
42/********************************************************************/ 38/********************************************************************/
@@ -71,161 +67,11 @@ struct orinoco_pccard {
71static int spectrum_cs_config(struct pcmcia_device *link); 67static int spectrum_cs_config(struct pcmcia_device *link);
72static void spectrum_cs_release(struct pcmcia_device *link); 68static void spectrum_cs_release(struct pcmcia_device *link);
73 69
74/********************************************************************/
75/* Firmware downloader */
76/********************************************************************/
77
78/* Position of PDA in the adapter memory */
79#define EEPROM_ADDR 0x3000
80#define EEPROM_LEN 0x200
81#define PDA_OFFSET 0x100
82
83#define PDA_ADDR (EEPROM_ADDR + PDA_OFFSET)
84#define PDA_WORDS ((EEPROM_LEN - PDA_OFFSET) / 2)
85
86/* Constants for the CISREG_CCSR register */ 70/* Constants for the CISREG_CCSR register */
87#define HCR_RUN 0x07 /* run firmware after reset */ 71#define HCR_RUN 0x07 /* run firmware after reset */
88#define HCR_IDLE 0x0E /* don't run firmware after reset */ 72#define HCR_IDLE 0x0E /* don't run firmware after reset */
89#define HCR_MEM16 0x10 /* memory width bit, should be preserved */ 73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
90 74
91/*
92 * AUX port access. To unlock the AUX port write the access keys to the
93 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
94 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
95 */
96#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
97#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
98#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
99
100#define HERMES_AUX_PW0 0xFE01
101#define HERMES_AUX_PW1 0xDC23
102#define HERMES_AUX_PW2 0xBA45
103
104/* End markers */
105#define PDI_END 0x00000000 /* End of PDA */
106#define BLOCK_END 0xFFFFFFFF /* Last image block */
107#define TEXT_END 0x1A /* End of text header */
108
109/*
110 * The following structures have little-endian fields denoted by
111 * the leading underscore. Don't access them directly - use inline
112 * functions defined below.
113 */
114
115/*
116 * The binary image to be downloaded consists of series of data blocks.
117 * Each block has the following structure.
118 */
119struct dblock {
120 __le32 addr; /* adapter address where to write the block */
121 __le16 len; /* length of the data only, in bytes */
122 char data[0]; /* data to be written */
123} __attribute__ ((packed));
124
125/*
126 * Plug Data References are located in in the image after the last data
127 * block. They refer to areas in the adapter memory where the plug data
128 * items with matching ID should be written.
129 */
130struct pdr {
131 __le32 id; /* record ID */
132 __le32 addr; /* adapter address where to write the data */
133 __le32 len; /* expected length of the data, in bytes */
134 char next[0]; /* next PDR starts here */
135} __attribute__ ((packed));
136
137
138/*
139 * Plug Data Items are located in the EEPROM read from the adapter by
140 * primary firmware. They refer to the device-specific data that should
141 * be plugged into the secondary firmware.
142 */
143struct pdi {
144 __le16 len; /* length of ID and data, in words */
145 __le16 id; /* record ID */
146 char data[0]; /* plug data */
147} __attribute__ ((packed));
148
149
150/* Functions for access to little-endian data */
151static inline u32
152dblock_addr(const struct dblock *blk)
153{
154 return le32_to_cpu(blk->addr);
155}
156
157static inline u32
158dblock_len(const struct dblock *blk)
159{
160 return le16_to_cpu(blk->len);
161}
162
163static inline u32
164pdr_id(const struct pdr *pdr)
165{
166 return le32_to_cpu(pdr->id);
167}
168
169static inline u32
170pdr_addr(const struct pdr *pdr)
171{
172 return le32_to_cpu(pdr->addr);
173}
174
175static inline u32
176pdr_len(const struct pdr *pdr)
177{
178 return le32_to_cpu(pdr->len);
179}
180
181static inline u32
182pdi_id(const struct pdi *pdi)
183{
184 return le16_to_cpu(pdi->id);
185}
186
187/* Return length of the data only, in bytes */
188static inline u32
189pdi_len(const struct pdi *pdi)
190{
191 return 2 * (le16_to_cpu(pdi->len) - 1);
192}
193
194
195/* Set address of the auxiliary port */
196static inline void
197spectrum_aux_setaddr(hermes_t *hw, u32 addr)
198{
199 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
200 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
201}
202
203
204/* Open access to the auxiliary port */
205static int
206spectrum_aux_open(hermes_t *hw)
207{
208 int i;
209
210 /* Already open? */
211 if (hermes_read_reg(hw, HERMES_CONTROL) == HERMES_AUX_ENABLED)
212 return 0;
213
214 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
215 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
216 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
217 hermes_write_reg(hw, HERMES_CONTROL, HERMES_AUX_ENABLE);
218
219 for (i = 0; i < 20; i++) {
220 udelay(10);
221 if (hermes_read_reg(hw, HERMES_CONTROL) ==
222 HERMES_AUX_ENABLED)
223 return 0;
224 }
225
226 return -EBUSY;
227}
228
229 75
230#define CS_CHECK(fn, ret) \ 76#define CS_CHECK(fn, ret) \
231 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 77 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
@@ -292,275 +138,29 @@ spectrum_reset(struct pcmcia_device *link, int idle)
292 return -ENODEV; 138 return -ENODEV;
293} 139}
294 140
141/********************************************************************/
142/* Device methods */
143/********************************************************************/
295 144
296/*
297 * Scan PDR for the record with the specified RECORD_ID.
298 * If it's not found, return NULL.
299 */
300static struct pdr *
301spectrum_find_pdr(struct pdr *first_pdr, u32 record_id)
302{
303 struct pdr *pdr = first_pdr;
304
305 while (pdr_id(pdr) != PDI_END) {
306 /*
307 * PDR area is currently not terminated by PDI_END.
308 * It's followed by CRC records, which have the type
309 * field where PDR has length. The type can be 0 or 1.
310 */
311 if (pdr_len(pdr) < 2)
312 return NULL;
313
314 /* If the record ID matches, we are done */
315 if (pdr_id(pdr) == record_id)
316 return pdr;
317
318 pdr = (struct pdr *) pdr->next;
319 }
320 return NULL;
321}
322
323
324/* Process one Plug Data Item - find corresponding PDR and plug it */
325static int
326spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
327{
328 struct pdr *pdr;
329
330 /* Find the PDI corresponding to this PDR */
331 pdr = spectrum_find_pdr(first_pdr, pdi_id(pdi));
332
333 /* No match is found, safe to ignore */
334 if (!pdr)
335 return 0;
336
337 /* Lengths of the data in PDI and PDR must match */
338 if (pdi_len(pdi) != pdr_len(pdr))
339 return -EINVAL;
340
341 /* do the actual plugging */
342 spectrum_aux_setaddr(hw, pdr_addr(pdr));
343 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
344
345 return 0;
346}
347
348
349/* Read PDA from the adapter */
350static int
351spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
352{
353 int ret;
354 int pda_size;
355
356 /* Issue command to read EEPROM */
357 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
358 if (ret)
359 return ret;
360
361 /* Open auxiliary port */
362 ret = spectrum_aux_open(hw);
363 if (ret)
364 return ret;
365
366 /* read PDA from EEPROM */
367 spectrum_aux_setaddr(hw, PDA_ADDR);
368 hermes_read_words(hw, HERMES_AUXDATA, pda, pda_len / 2);
369
370 /* Check PDA length */
371 pda_size = le16_to_cpu(pda[0]);
372 if (pda_size > pda_len)
373 return -EINVAL;
374
375 return 0;
376}
377
378
379/* Parse PDA and write the records into the adapter */
380static int
381spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
382 __le16 *pda)
383{
384 int ret;
385 struct pdi *pdi;
386 struct pdr *first_pdr;
387 const struct dblock *blk = first_block;
388
389 /* Skip all blocks to locate Plug Data References */
390 while (dblock_addr(blk) != BLOCK_END)
391 blk = (struct dblock *) &blk->data[dblock_len(blk)];
392
393 first_pdr = (struct pdr *) blk;
394
395 /* Go through every PDI and plug them into the adapter */
396 pdi = (struct pdi *) (pda + 2);
397 while (pdi_id(pdi) != PDI_END) {
398 ret = spectrum_plug_pdi(hw, first_pdr, pdi);
399 if (ret)
400 return ret;
401
402 /* Increment to the next PDI */
403 pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
404 }
405 return 0;
406}
407
408
409/* Load firmware blocks into the adapter */
410static int
411spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
412{
413 const struct dblock *blk;
414 u32 blkaddr;
415 u32 blklen;
416
417 blk = first_block;
418 blkaddr = dblock_addr(blk);
419 blklen = dblock_len(blk);
420
421 while (dblock_addr(blk) != BLOCK_END) {
422 spectrum_aux_setaddr(hw, blkaddr);
423 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
424 blklen);
425
426 blk = (struct dblock *) &blk->data[blklen];
427 blkaddr = dblock_addr(blk);
428 blklen = dblock_len(blk);
429 }
430 return 0;
431}
432
433
434/*
435 * Process a firmware image - stop the card, load the firmware, reset
436 * the card and make sure it responds. For the secondary firmware take
437 * care of the PDA - read it and then write it on top of the firmware.
438 */
439static int 145static int
440spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, 146spectrum_cs_hard_reset(struct orinoco_private *priv)
441 const unsigned char *image, int secondary)
442{ 147{
443 int ret; 148 struct orinoco_pccard *card = priv->card;
444 const unsigned char *ptr; 149 struct pcmcia_device *link = card->p_dev;
445 const struct dblock *first_block;
446
447 /* Plug Data Area (PDA) */
448 __le16 pda[PDA_WORDS];
449
450 /* Binary block begins after the 0x1A marker */
451 ptr = image;
452 while (*ptr++ != TEXT_END);
453 first_block = (const struct dblock *) ptr;
454
455 /* Read the PDA */
456 if (secondary) {
457 ret = spectrum_read_pda(hw, pda, sizeof(pda));
458 if (ret)
459 return ret;
460 }
461
462 /* Stop the firmware, so that it can be safely rewritten */
463 ret = spectrum_reset(link, 1);
464 if (ret)
465 return ret;
466
467 /* Program the adapter with new firmware */
468 ret = spectrum_load_blocks(hw, first_block);
469 if (ret)
470 return ret;
471
472 /* Write the PDA to the adapter */
473 if (secondary) {
474 ret = spectrum_apply_pda(hw, first_block, pda);
475 if (ret)
476 return ret;
477 }
478
479 /* Run the firmware */
480 ret = spectrum_reset(link, 0);
481 if (ret)
482 return ret;
483
484 /* Reset hermes chip and make sure it responds */
485 ret = hermes_init(hw);
486
487 /* hermes_reset() should return 0 with the secondary firmware */
488 if (secondary && ret != 0)
489 return -ENODEV;
490 150
491 /* And this should work with any firmware */ 151 /* Soft reset using COR and HCR */
492 if (!hermes_present(hw)) 152 spectrum_reset(link, 0);
493 return -ENODEV;
494 153
495 return 0; 154 return 0;
496} 155}
497 156
498
499/*
500 * Download the firmware into the card, this also does a PCMCIA soft
501 * reset on the card, to make sure it's in a sane state.
502 */
503static int 157static int
504spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link) 158spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
505{
506 int ret;
507 const struct firmware *fw_entry;
508
509 if (request_firmware(&fw_entry, primary_fw_name,
510 &handle_to_dev(link)) != 0) {
511 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
512 primary_fw_name);
513 return -ENOENT;
514 }
515
516 /* Load primary firmware */
517 ret = spectrum_dl_image(hw, link, fw_entry->data, 0);
518 release_firmware(fw_entry);
519 if (ret) {
520 printk(KERN_ERR PFX "Primary firmware download failed\n");
521 return ret;
522 }
523
524 if (request_firmware(&fw_entry, secondary_fw_name,
525 &handle_to_dev(link)) != 0) {
526 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
527 secondary_fw_name);
528 return -ENOENT;
529 }
530
531 /* Load secondary firmware */
532 ret = spectrum_dl_image(hw, link, fw_entry->data, 1);
533 release_firmware(fw_entry);
534 if (ret) {
535 printk(KERN_ERR PFX "Secondary firmware download failed\n");
536 }
537
538 return ret;
539}
540
541/********************************************************************/
542/* Device methods */
543/********************************************************************/
544
545static int
546spectrum_cs_hard_reset(struct orinoco_private *priv)
547{ 159{
548 struct orinoco_pccard *card = priv->card; 160 struct orinoco_pccard *card = priv->card;
549 struct pcmcia_device *link = card->p_dev; 161 struct pcmcia_device *link = card->p_dev;
550 int err;
551 162
552 if (!hermes_present(&priv->hw)) { 163 return spectrum_reset(link, idle);
553 /* The firmware needs to be reloaded */
554 if (spectrum_dl_firmware(&priv->hw, link) != 0) {
555 printk(KERN_ERR PFX "Firmware download failed\n");
556 err = -ENODEV;
557 }
558 } else {
559 /* Soft reset using COR and HCR */
560 spectrum_reset(link, 0);
561 }
562
563 return 0;
564} 164}
565 165
566/********************************************************************/ 166/********************************************************************/
@@ -582,7 +182,9 @@ spectrum_cs_probe(struct pcmcia_device *link)
582 struct orinoco_private *priv; 182 struct orinoco_private *priv;
583 struct orinoco_pccard *card; 183 struct orinoco_pccard *card;
584 184
585 dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset); 185 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
186 spectrum_cs_hard_reset,
187 spectrum_cs_stop_firmware);
586 if (! dev) 188 if (! dev)
587 return -ENOMEM; 189 return -ENOMEM;
588 priv = netdev_priv(dev); 190 priv = netdev_priv(dev);
@@ -784,7 +386,7 @@ spectrum_cs_config(struct pcmcia_device *link)
784 dev->irq = link->irq.AssignedIRQ; 386 dev->irq = link->irq.AssignedIRQ;
785 card->node.major = card->node.minor = 0; 387 card->node.major = card->node.minor = 0;
786 388
787 /* Reset card and download firmware */ 389 /* Reset card */
788 if (spectrum_cs_hard_reset(priv) != 0) { 390 if (spectrum_cs_hard_reset(priv) != 0) {
789 goto failed; 391 goto failed;
790 } 392 }
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 377141995e36..b6d4e04b8ab4 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -79,7 +79,7 @@ static int pc_debug = PCMCIA_DEBUG;
79module_param(pc_debug, int, 0); 79module_param(pc_debug, int, 0);
80#define dprintk(n, format, args...) \ 80#define dprintk(n, format, args...) \
81 { if (pc_debug > (n)) \ 81 { if (pc_debug > (n)) \
82 printk(KERN_INFO "%s: " format "\n", __FUNCTION__ , ##args); } 82 printk(KERN_INFO "%s: " format "\n", __func__ , ##args); }
83#else 83#else
84#define dprintk(n, format, args...) 84#define dprintk(n, format, args...)
85#endif 85#endif
@@ -470,7 +470,7 @@ static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
470 spin_unlock_irqrestore(&this->lock, flags); 470 spin_unlock_irqrestore(&this->lock, flags);
471 rc = wait_event_interruptible(this->wait, 471 rc = wait_event_interruptible(this->wait,
472 this->sig_pwr_mgmt_confirm.status != 255); 472 this->sig_pwr_mgmt_confirm.status != 255);
473 printk(KERN_INFO "%s: %s status=%d\n", __FUNCTION__, 473 printk(KERN_INFO "%s: %s status=%d\n", __func__,
474 suspend ? "suspend" : "resume", 474 suspend ? "suspend" : "resume",
475 this->sig_pwr_mgmt_confirm.status); 475 this->sig_pwr_mgmt_confirm.status);
476 goto out; 476 goto out;
@@ -1199,7 +1199,7 @@ static int wl3501_reset_board(struct wl3501_card *this)
1199 } 1199 }
1200 WL3501_NOPLOOP(10); 1200 WL3501_NOPLOOP(10);
1201 } 1201 }
1202 printk(KERN_WARNING "%s: failed to reset the board!\n", __FUNCTION__); 1202 printk(KERN_WARNING "%s: failed to reset the board!\n", __func__);
1203 rc = -ENODEV; 1203 rc = -ENODEV;
1204out: 1204out:
1205 return rc; 1205 return rc;
@@ -1250,7 +1250,7 @@ static int wl3501_init_firmware(struct wl3501_card *this)
1250out: 1250out:
1251 return rc; 1251 return rc;
1252fail: 1252fail:
1253 printk(KERN_WARNING "%s: failed!\n", __FUNCTION__); 1253 printk(KERN_WARNING "%s: failed!\n", __func__);
1254 goto out; 1254 goto out;
1255} 1255}
1256 1256
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index cc36126cee88..1907eafb9b16 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_ZD1211RW) += zd1211rw.o 1obj-$(CONFIG_ZD1211RW) += zd1211rw.o
2 2
3zd1211rw-objs := zd_chip.o zd_ieee80211.o zd_mac.o \ 3zd1211rw-objs := zd_chip.o zd_mac.o \
4 zd_rf_al2230.o zd_rf_rf2959.o \ 4 zd_rf_al2230.o zd_rf_rf2959.o \
5 zd_rf_al7230b.o zd_rf_uw2453.o \ 5 zd_rf_al7230b.o zd_rf_uw2453.o \
6 zd_rf.o zd_usb.o 6 zd_rf.o zd_usb.o
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 0acb5c345734..e0ac58b8ff1f 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -28,7 +28,6 @@
28 28
29#include "zd_def.h" 29#include "zd_def.h"
30#include "zd_chip.h" 30#include "zd_chip.h"
31#include "zd_ieee80211.h"
32#include "zd_mac.h" 31#include "zd_mac.h"
33#include "zd_rf.h" 32#include "zd_rf.h"
34 33
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
deleted file mode 100644
index d8dc41ec0e5d..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/* ZD1211 USB-WLAN driver for Linux
2 *
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * In the long term, we'll probably find a better way of handling regulatory
23 * requirements outside of the driver.
24 */
25
26#include <linux/kernel.h>
27#include <net/mac80211.h>
28
29#include "zd_ieee80211.h"
30#include "zd_mac.h"
31
32struct channel_range {
33 u8 regdomain;
34 u8 start;
35 u8 end; /* exclusive (channel must be less than end) */
36};
37
38static const struct channel_range channel_ranges[] = {
39 { ZD_REGDOMAIN_FCC, 1, 12 },
40 { ZD_REGDOMAIN_IC, 1, 12 },
41 { ZD_REGDOMAIN_ETSI, 1, 14 },
42 { ZD_REGDOMAIN_JAPAN, 1, 14 },
43 { ZD_REGDOMAIN_SPAIN, 1, 14 },
44 { ZD_REGDOMAIN_FRANCE, 1, 14 },
45
46 /* Japan originally only had channel 14 available (see CHNL_ID 0x40 in
47 * 802.11). However, in 2001 the range was extended to include channels
48 * 1-13. The ZyDAS devices still use the old region code but are
49 * designed to allow the extra channel access in Japan. */
50 { ZD_REGDOMAIN_JAPAN_ADD, 1, 15 },
51};
52
53static const struct channel_range *zd_channel_range(u8 regdomain)
54{
55 int i;
56 for (i = 0; i < ARRAY_SIZE(channel_ranges); i++) {
57 const struct channel_range *range = &channel_ranges[i];
58 if (range->regdomain == regdomain)
59 return range;
60 }
61 return NULL;
62}
63
64#define CHAN_TO_IDX(chan) ((chan) - 1)
65
66static void unmask_bg_channels(struct ieee80211_hw *hw,
67 const struct channel_range *range,
68 struct ieee80211_supported_band *sband)
69{
70 u8 channel;
71
72 for (channel = range->start; channel < range->end; channel++) {
73 struct ieee80211_channel *chan =
74 &sband->channels[CHAN_TO_IDX(channel)];
75 chan->flags = 0;
76 }
77}
78
79void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain)
80{
81 struct zd_mac *mac = zd_hw_mac(hw);
82 const struct channel_range *range;
83
84 dev_dbg(zd_mac_dev(mac), "regdomain %#02x\n", regdomain);
85
86 range = zd_channel_range(regdomain);
87 if (!range) {
88 /* The vendor driver overrides the regulatory domain and
89 * allowed channel registers and unconditionally restricts
90 * available channels to 1-11 everywhere. Match their
91 * questionable behaviour only for regdomains which we don't
92 * recognise. */
93 dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: "
94 "%#02x. Defaulting to FCC.\n", regdomain);
95 range = zd_channel_range(ZD_REGDOMAIN_FCC);
96 }
97
98 unmask_bg_channels(hw, range, &mac->band);
99}
100
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
deleted file mode 100644
index 26b79f197587..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/* ZD1211 USB-WLAN driver for Linux
2 *
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _ZD_IEEE80211_H
22#define _ZD_IEEE80211_H
23
24#include <net/mac80211.h>
25
26/* Additional definitions from the standards.
27 */
28
29#define ZD_REGDOMAIN_FCC 0x10
30#define ZD_REGDOMAIN_IC 0x20
31#define ZD_REGDOMAIN_ETSI 0x30
32#define ZD_REGDOMAIN_SPAIN 0x31
33#define ZD_REGDOMAIN_FRANCE 0x32
34#define ZD_REGDOMAIN_JAPAN_ADD 0x40
35#define ZD_REGDOMAIN_JAPAN 0x41
36
37enum {
38 MIN_CHANNEL24 = 1,
39 MAX_CHANNEL24 = 14,
40};
41
42void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain);
43
44#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
45
46struct ofdm_plcp_header {
47 u8 prefix[3];
48 __le16 service;
49} __attribute__((packed));
50
51static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
52{
53 return header->prefix[0] & 0xf;
54}
55
56/* The following defines give the encoding of the 4-bit rate field in the
57 * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
58 * define the zd-rate values for OFDM.
59 *
60 * See the struct zd_ctrlset definition in zd_mac.h.
61 */
62#define ZD_OFDM_PLCP_RATE_6M 0xb
63#define ZD_OFDM_PLCP_RATE_9M 0xf
64#define ZD_OFDM_PLCP_RATE_12M 0xa
65#define ZD_OFDM_PLCP_RATE_18M 0xe
66#define ZD_OFDM_PLCP_RATE_24M 0x9
67#define ZD_OFDM_PLCP_RATE_36M 0xd
68#define ZD_OFDM_PLCP_RATE_48M 0x8
69#define ZD_OFDM_PLCP_RATE_54M 0xc
70
71struct cck_plcp_header {
72 u8 signal;
73 u8 service;
74 __le16 length;
75 __le16 crc16;
76} __attribute__((packed));
77
78static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
79{
80 return header->signal;
81}
82
83/* These defines give the encodings of the signal field in the 802.11b PLCP
84 * header. The signal field gives the bit rate of the following packet. Even
85 * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
86 * rate to stay consistent with Zydas and our use of the term.
87 *
88 * Notify that these values are *not* used in the zd-rates.
89 */
90#define ZD_CCK_PLCP_SIGNAL_1M 0x0a
91#define ZD_CCK_PLCP_SIGNAL_2M 0x14
92#define ZD_CCK_PLCP_SIGNAL_5M5 0x37
93#define ZD_CCK_PLCP_SIGNAL_11M 0x6e
94
95#endif /* _ZD_IEEE80211_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 4d7b98b05030..fe1867b25ff7 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -3,7 +3,7 @@
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> 3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> 4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net> 5 * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 6 * Copyright (C) 2007-2008 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -29,9 +29,23 @@
29#include "zd_def.h" 29#include "zd_def.h"
30#include "zd_chip.h" 30#include "zd_chip.h"
31#include "zd_mac.h" 31#include "zd_mac.h"
32#include "zd_ieee80211.h"
33#include "zd_rf.h" 32#include "zd_rf.h"
34 33
34struct zd_reg_alpha2_map {
35 u32 reg;
36 char alpha2[2];
37};
38
39static struct zd_reg_alpha2_map reg_alpha2_map[] = {
40 { ZD_REGDOMAIN_FCC, "US" },
41 { ZD_REGDOMAIN_IC, "CA" },
42 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
43 { ZD_REGDOMAIN_JAPAN, "JP" },
44 { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
45 { ZD_REGDOMAIN_SPAIN, "ES" },
46 { ZD_REGDOMAIN_FRANCE, "FR" },
47};
48
35/* This table contains the hardware specific values for the modulation rates. */ 49/* This table contains the hardware specific values for the modulation rates. */
36static const struct ieee80211_rate zd_rates[] = { 50static const struct ieee80211_rate zd_rates[] = {
37 { .bitrate = 10, 51 { .bitrate = 10,
@@ -95,6 +109,21 @@ static void housekeeping_init(struct zd_mac *mac);
95static void housekeeping_enable(struct zd_mac *mac); 109static void housekeeping_enable(struct zd_mac *mac);
96static void housekeeping_disable(struct zd_mac *mac); 110static void housekeeping_disable(struct zd_mac *mac);
97 111
112static int zd_reg2alpha2(u8 regdomain, char *alpha2)
113{
114 unsigned int i;
115 struct zd_reg_alpha2_map *reg_map;
116 for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
117 reg_map = &reg_alpha2_map[i];
118 if (regdomain == reg_map->reg) {
119 alpha2[0] = reg_map->alpha2[0];
120 alpha2[1] = reg_map->alpha2[1];
121 return 0;
122 }
123 }
124 return 1;
125}
126
98int zd_mac_preinit_hw(struct ieee80211_hw *hw) 127int zd_mac_preinit_hw(struct ieee80211_hw *hw)
99{ 128{
100 int r; 129 int r;
@@ -115,6 +144,7 @@ int zd_mac_init_hw(struct ieee80211_hw *hw)
115 int r; 144 int r;
116 struct zd_mac *mac = zd_hw_mac(hw); 145 struct zd_mac *mac = zd_hw_mac(hw);
117 struct zd_chip *chip = &mac->chip; 146 struct zd_chip *chip = &mac->chip;
147 char alpha2[2];
118 u8 default_regdomain; 148 u8 default_regdomain;
119 149
120 r = zd_chip_enable_int(chip); 150 r = zd_chip_enable_int(chip);
@@ -139,7 +169,9 @@ int zd_mac_init_hw(struct ieee80211_hw *hw)
139 if (r) 169 if (r)
140 goto disable_int; 170 goto disable_int;
141 171
142 zd_geo_init(hw, mac->regdomain); 172 r = zd_reg2alpha2(mac->regdomain, alpha2);
173 if (!r)
174 regulatory_hint(hw->wiphy, alpha2, NULL);
143 175
144 r = 0; 176 r = 0;
145disable_int: 177disable_int:
@@ -579,7 +611,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
579 611
580 q = &zd_hw_mac(hw)->ack_wait_queue; 612 q = &zd_hw_mac(hw)->ack_wait_queue;
581 spin_lock_irqsave(&q->lock, flags); 613 spin_lock_irqsave(&q->lock, flags);
582 for (skb = q->next; skb != (struct sk_buff *)q; skb = skb->next) { 614 skb_queue_walk(q, skb) {
583 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
584 616
585 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
@@ -684,15 +716,15 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
684{ 716{
685 struct zd_mac *mac = zd_hw_mac(hw); 717 struct zd_mac *mac = zd_hw_mac(hw);
686 718
687 /* using IEEE80211_IF_TYPE_INVALID to indicate no mode selected */ 719 /* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */
688 if (mac->type != IEEE80211_IF_TYPE_INVALID) 720 if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
689 return -EOPNOTSUPP; 721 return -EOPNOTSUPP;
690 722
691 switch (conf->type) { 723 switch (conf->type) {
692 case IEEE80211_IF_TYPE_MNTR: 724 case NL80211_IFTYPE_MONITOR:
693 case IEEE80211_IF_TYPE_MESH_POINT: 725 case NL80211_IFTYPE_MESH_POINT:
694 case IEEE80211_IF_TYPE_STA: 726 case NL80211_IFTYPE_STATION:
695 case IEEE80211_IF_TYPE_IBSS: 727 case NL80211_IFTYPE_ADHOC:
696 mac->type = conf->type; 728 mac->type = conf->type;
697 break; 729 break;
698 default: 730 default:
@@ -706,7 +738,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
706 struct ieee80211_if_init_conf *conf) 738 struct ieee80211_if_init_conf *conf)
707{ 739{
708 struct zd_mac *mac = zd_hw_mac(hw); 740 struct zd_mac *mac = zd_hw_mac(hw);
709 mac->type = IEEE80211_IF_TYPE_INVALID; 741 mac->type = NL80211_IFTYPE_UNSPECIFIED;
710 zd_set_beacon_interval(&mac->chip, 0); 742 zd_set_beacon_interval(&mac->chip, 0);
711 zd_write_mac_addr(&mac->chip, NULL); 743 zd_write_mac_addr(&mac->chip, NULL);
712} 744}
@@ -725,8 +757,8 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
725 int associated; 757 int associated;
726 int r; 758 int r;
727 759
728 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT || 760 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
729 mac->type == IEEE80211_IF_TYPE_IBSS) { 761 mac->type == NL80211_IFTYPE_ADHOC) {
730 associated = true; 762 associated = true;
731 if (conf->changed & IEEE80211_IFCC_BEACON) { 763 if (conf->changed & IEEE80211_IFCC_BEACON) {
732 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 764 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -753,7 +785,7 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
753 return 0; 785 return 0;
754} 786}
755 787
756void zd_process_intr(struct work_struct *work) 788static void zd_process_intr(struct work_struct *work)
757{ 789{
758 u16 int_status; 790 u16 int_status;
759 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); 791 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
@@ -923,7 +955,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
923 spin_lock_init(&mac->lock); 955 spin_lock_init(&mac->lock);
924 mac->hw = hw; 956 mac->hw = hw;
925 957
926 mac->type = IEEE80211_IF_TYPE_INVALID; 958 mac->type = NL80211_IFTYPE_UNSPECIFIED;
927 959
928 memcpy(mac->channels, zd_channels, sizeof(zd_channels)); 960 memcpy(mac->channels, zd_channels, sizeof(zd_channels));
929 memcpy(mac->rates, zd_rates, sizeof(zd_rates)); 961 memcpy(mac->rates, zd_rates, sizeof(zd_rates));
@@ -937,6 +969,11 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 969 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
938 IEEE80211_HW_SIGNAL_DB; 970 IEEE80211_HW_SIGNAL_DB;
939 971
972 hw->wiphy->interface_modes =
973 BIT(NL80211_IFTYPE_MESH_POINT) |
974 BIT(NL80211_IFTYPE_STATION) |
975 BIT(NL80211_IFTYPE_ADHOC);
976
940 hw->max_signal = 100; 977 hw->max_signal = 100;
941 hw->queues = 1; 978 hw->queues = 1;
942 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 979 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 18c1d56d3dd7..4c05d3ee4c37 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -25,7 +25,6 @@
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26 26
27#include "zd_chip.h" 27#include "zd_chip.h"
28#include "zd_ieee80211.h"
29 28
30struct zd_ctrlset { 29struct zd_ctrlset {
31 u8 modulation; 30 u8 modulation;
@@ -187,6 +186,70 @@ struct zd_mac {
187 unsigned int pass_ctrl:1; 186 unsigned int pass_ctrl:1;
188}; 187};
189 188
189#define ZD_REGDOMAIN_FCC 0x10
190#define ZD_REGDOMAIN_IC 0x20
191#define ZD_REGDOMAIN_ETSI 0x30
192#define ZD_REGDOMAIN_SPAIN 0x31
193#define ZD_REGDOMAIN_FRANCE 0x32
194#define ZD_REGDOMAIN_JAPAN_ADD 0x40
195#define ZD_REGDOMAIN_JAPAN 0x41
196
197enum {
198 MIN_CHANNEL24 = 1,
199 MAX_CHANNEL24 = 14,
200};
201
202#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
203
204struct ofdm_plcp_header {
205 u8 prefix[3];
206 __le16 service;
207} __attribute__((packed));
208
209static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
210{
211 return header->prefix[0] & 0xf;
212}
213
214/* The following defines give the encoding of the 4-bit rate field in the
215 * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
216 * define the zd-rate values for OFDM.
217 *
218 * See the struct zd_ctrlset definition in zd_mac.h.
219 */
220#define ZD_OFDM_PLCP_RATE_6M 0xb
221#define ZD_OFDM_PLCP_RATE_9M 0xf
222#define ZD_OFDM_PLCP_RATE_12M 0xa
223#define ZD_OFDM_PLCP_RATE_18M 0xe
224#define ZD_OFDM_PLCP_RATE_24M 0x9
225#define ZD_OFDM_PLCP_RATE_36M 0xd
226#define ZD_OFDM_PLCP_RATE_48M 0x8
227#define ZD_OFDM_PLCP_RATE_54M 0xc
228
229struct cck_plcp_header {
230 u8 signal;
231 u8 service;
232 __le16 length;
233 __le16 crc16;
234} __attribute__((packed));
235
236static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
237{
238 return header->signal;
239}
240
241/* These defines give the encodings of the signal field in the 802.11b PLCP
242 * header. The signal field gives the bit rate of the following packet. Even
243 * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
244 * rate to stay consistent with Zydas and our use of the term.
245 *
246 * Notify that these values are *not* used in the zd-rates.
247 */
248#define ZD_CCK_PLCP_SIGNAL_1M 0x0a
249#define ZD_CCK_PLCP_SIGNAL_2M 0x14
250#define ZD_CCK_PLCP_SIGNAL_5M5 0x37
251#define ZD_CCK_PLCP_SIGNAL_11M 0x6e
252
190static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw) 253static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw)
191{ 254{
192 return hw->priv; 255 return hw->priv;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index ec4129312813..7207bfd2e6cd 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -23,7 +23,7 @@
23 23
24#include "zd_def.h" 24#include "zd_def.h"
25#include "zd_rf.h" 25#include "zd_rf.h"
26#include "zd_ieee80211.h" 26#include "zd_mac.h"
27#include "zd_chip.h" 27#include "zd_chip.h"
28 28
29static const char * const rfs[] = { 29static const char * const rfs[] = {
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 82634a2f1b1d..1aad599816f7 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -352,11 +352,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
352 continue; 352 continue;
353 r_size = r->end - r->start + 1; 353 r_size = r->end - r->start + 1;
354 /* For bridges size != alignment */ 354 /* For bridges size != alignment */
355 align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; 355 align = resource_alignment(r);
356 order = __ffs(align) - 20; 356 order = __ffs(align) - 20;
357 if (order > 11) { 357 if (order > 11) {
358 dev_warn(&dev->dev, "BAR %d too large: " 358 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
359 "%#016llx-%#016llx\n", i, 359 "%#016llx-%#016llx\n", i,
360 (unsigned long long)align,
360 (unsigned long long)r->start, 361 (unsigned long long)r->start,
361 (unsigned long long)r->end); 362 (unsigned long long)r->end);
362 r->flags = 0; 363 r->flags = 0;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index d7e9f2152df0..95015cbfd33f 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -405,8 +405,6 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
405 405
406 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 406 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
407 extended_irq = &res->data.extended_irq; 407 extended_irq = &res->data.extended_irq;
408 if (extended_irq->producer_consumer == ACPI_PRODUCER)
409 return AE_OK;
410 408
411 if (extended_irq->interrupt_count == 0) 409 if (extended_irq->interrupt_count == 0)
412 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED); 410 pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 6ea349aba3ba..b184367637d0 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -800,7 +800,6 @@ static void __exit cmos_do_remove(struct device *dev)
800static int cmos_suspend(struct device *dev, pm_message_t mesg) 800static int cmos_suspend(struct device *dev, pm_message_t mesg)
801{ 801{
802 struct cmos_rtc *cmos = dev_get_drvdata(dev); 802 struct cmos_rtc *cmos = dev_get_drvdata(dev);
803 int do_wake = device_may_wakeup(dev);
804 unsigned char tmp; 803 unsigned char tmp;
805 804
806 /* only the alarm might be a wakeup event source */ 805 /* only the alarm might be a wakeup event source */
@@ -809,7 +808,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
809 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) { 808 if (tmp & (RTC_PIE|RTC_AIE|RTC_UIE)) {
810 unsigned char mask; 809 unsigned char mask;
811 810
812 if (do_wake) 811 if (device_may_wakeup(dev))
813 mask = RTC_IRQMASK & ~RTC_AIE; 812 mask = RTC_IRQMASK & ~RTC_AIE;
814 else 813 else
815 mask = RTC_IRQMASK; 814 mask = RTC_IRQMASK;
@@ -837,6 +836,17 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
837 return 0; 836 return 0;
838} 837}
839 838
839/* We want RTC alarms to wake us from e.g. ACPI G2/S5 "soft off", even
840 * after a detour through G3 "mechanical off", although the ACPI spec
841 * says wakeup should only work from G1/S4 "hibernate". To most users,
842 * distinctions between S4 and S5 are pointless. So when the hardware
843 * allows, don't draw that distinction.
844 */
845static inline int cmos_poweroff(struct device *dev)
846{
847 return cmos_suspend(dev, PMSG_HIBERNATE);
848}
849
840static int cmos_resume(struct device *dev) 850static int cmos_resume(struct device *dev)
841{ 851{
842 struct cmos_rtc *cmos = dev_get_drvdata(dev); 852 struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -884,6 +894,12 @@ static int cmos_resume(struct device *dev)
884#else 894#else
885#define cmos_suspend NULL 895#define cmos_suspend NULL
886#define cmos_resume NULL 896#define cmos_resume NULL
897
898static inline int cmos_poweroff(struct device *dev)
899{
900 return -ENOSYS;
901}
902
887#endif 903#endif
888 904
889/*----------------------------------------------------------------*/ 905/*----------------------------------------------------------------*/
@@ -903,10 +919,6 @@ static int cmos_resume(struct device *dev)
903static int __devinit 919static int __devinit
904cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) 920cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
905{ 921{
906 /* REVISIT paranoia argues for a shutdown notifier, since PNP
907 * drivers can't provide shutdown() methods to disable IRQs.
908 * Or better yet, fix PNP to allow those methods...
909 */
910 if (pnp_port_start(pnp,0) == 0x70 && !pnp_irq_valid(pnp,0)) 922 if (pnp_port_start(pnp,0) == 0x70 && !pnp_irq_valid(pnp,0))
911 /* Some machines contain a PNP entry for the RTC, but 923 /* Some machines contain a PNP entry for the RTC, but
912 * don't define the IRQ. It should always be safe to 924 * don't define the IRQ. It should always be safe to
@@ -942,6 +954,13 @@ static int cmos_pnp_resume(struct pnp_dev *pnp)
942#define cmos_pnp_resume NULL 954#define cmos_pnp_resume NULL
943#endif 955#endif
944 956
957static void cmos_pnp_shutdown(struct device *pdev)
958{
959 if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev))
960 return;
961
962 cmos_do_shutdown();
963}
945 964
946static const struct pnp_device_id rtc_ids[] = { 965static const struct pnp_device_id rtc_ids[] = {
947 { .id = "PNP0b00", }, 966 { .id = "PNP0b00", },
@@ -961,6 +980,10 @@ static struct pnp_driver cmos_pnp_driver = {
961 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, 980 .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
962 .suspend = cmos_pnp_suspend, 981 .suspend = cmos_pnp_suspend,
963 .resume = cmos_pnp_resume, 982 .resume = cmos_pnp_resume,
983 .driver = {
984 .name = (char *)driver_name,
985 .shutdown = cmos_pnp_shutdown,
986 }
964}; 987};
965 988
966#endif /* CONFIG_PNP */ 989#endif /* CONFIG_PNP */
@@ -986,6 +1009,9 @@ static int __exit cmos_platform_remove(struct platform_device *pdev)
986 1009
987static void cmos_platform_shutdown(struct platform_device *pdev) 1010static void cmos_platform_shutdown(struct platform_device *pdev)
988{ 1011{
1012 if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pdev->dev))
1013 return;
1014
989 cmos_do_shutdown(); 1015 cmos_do_shutdown();
990} 1016}
991 1017
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 9f996ec881ce..dd70bf73ce9d 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -51,10 +51,11 @@ EXPORT_SYMBOL(rtc_year_days);
51 */ 51 */
52void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) 52void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
53{ 53{
54 unsigned int days, month, year; 54 unsigned int month, year;
55 int days;
55 56
56 days = time / 86400; 57 days = time / 86400;
57 time -= days * 86400; 58 time -= (unsigned int) days * 86400;
58 59
59 /* day of the week, 1970-01-01 was a Thursday */ 60 /* day of the week, 1970-01-01 was a Thursday */
60 tm->tm_wday = (days + 4) % 7; 61 tm->tm_wday = (days + 4) % 7;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e5e7d7856454..2a2bc89aba83 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1875,6 +1875,7 @@ static int sd_probe(struct device *dev)
1875 1875
1876 dev_set_drvdata(dev, sdkp); 1876 dev_set_drvdata(dev, sdkp);
1877 add_disk(gd); 1877 add_disk(gd);
1878 blk_register_filter(gd);
1878 sd_dif_config_host(sdkp); 1879 sd_dif_config_host(sdkp);
1879 1880
1880 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1881 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
@@ -1908,6 +1909,7 @@ static int sd_remove(struct device *dev)
1908 struct scsi_disk *sdkp = dev_get_drvdata(dev); 1909 struct scsi_disk *sdkp = dev_get_drvdata(dev);
1909 1910
1910 device_del(&sdkp->dev); 1911 device_del(&sdkp->dev);
1912 blk_unregister_filter(sdkp->disk);
1911 del_gendisk(sdkp->disk); 1913 del_gendisk(sdkp->disk);
1912 sd_shutdown(dev); 1914 sd_shutdown(dev);
1913 1915
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 27f5bfd1def3..3292965bfd84 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -656,6 +656,7 @@ static int sr_probe(struct device *dev)
656 dev_set_drvdata(dev, cd); 656 dev_set_drvdata(dev, cd);
657 disk->flags |= GENHD_FL_REMOVABLE; 657 disk->flags |= GENHD_FL_REMOVABLE;
658 add_disk(disk); 658 add_disk(disk);
659 blk_register_filter(disk);
659 660
660 sdev_printk(KERN_DEBUG, sdev, 661 sdev_printk(KERN_DEBUG, sdev,
661 "Attached scsi CD-ROM %s\n", cd->cdi.name); 662 "Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -894,6 +895,7 @@ static int sr_remove(struct device *dev)
894{ 895{
895 struct scsi_cd *cd = dev_get_drvdata(dev); 896 struct scsi_cd *cd = dev_get_drvdata(dev);
896 897
898 blk_unregister_filter(cd->disk);
897 del_gendisk(cd->disk); 899 del_gendisk(cd->disk);
898 900
899 mutex_lock(&sr_ref_mutex); 901 mutex_lock(&sr_ref_mutex);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 342e12fb1c25..9ccc563d8730 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1908,15 +1908,23 @@ static int serial8250_startup(struct uart_port *port)
1908 * kick the UART on a regular basis. 1908 * kick the UART on a regular basis.
1909 */ 1909 */
1910 if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) { 1910 if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
1911 up->bugs |= UART_BUG_THRE;
1911 pr_debug("ttyS%d - using backup timer\n", port->line); 1912 pr_debug("ttyS%d - using backup timer\n", port->line);
1912 up->timer.function = serial8250_backup_timeout;
1913 up->timer.data = (unsigned long)up;
1914 mod_timer(&up->timer, jiffies +
1915 poll_timeout(up->port.timeout) + HZ / 5);
1916 } 1913 }
1917 } 1914 }
1918 1915
1919 /* 1916 /*
1917 * The above check will only give an accurate result the first time
1918 * the port is opened so this value needs to be preserved.
1919 */
1920 if (up->bugs & UART_BUG_THRE) {
1921 up->timer.function = serial8250_backup_timeout;
1922 up->timer.data = (unsigned long)up;
1923 mod_timer(&up->timer, jiffies +
1924 poll_timeout(up->port.timeout) + HZ / 5);
1925 }
1926
1927 /*
1920 * If the "interrupt" for this port doesn't correspond with any 1928 * If the "interrupt" for this port doesn't correspond with any
1921 * hardware interrupt, we use a timer-based system. The original 1929 * hardware interrupt, we use a timer-based system. The original
1922 * driver used to do this with IRQ0. 1930 * driver used to do this with IRQ0.
diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
index 78c00162b04e..520260326f3d 100644
--- a/drivers/serial/8250.h
+++ b/drivers/serial/8250.h
@@ -47,6 +47,7 @@ struct serial8250_config {
47#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ 47#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
48#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ 48#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
49#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */ 49#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
50#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
50 51
51#define PROBE_RSA (1 << 0) 52#define PROBE_RSA (1 << 0)
52#define PROBE_ANY (~0) 53#define PROBE_ANY (~0)
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index f883dcfffe06..d5cde051806b 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -327,11 +327,9 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
327 s8 gain; 327 s8 gain;
328 u16 loc[3]; 328 u16 loc[3];
329 329
330 if (out->revision == 3) { /* rev 3 moved MAC */ 330 if (out->revision == 3) /* rev 3 moved MAC */
331 loc[0] = SSB_SPROM3_IL0MAC; 331 loc[0] = SSB_SPROM3_IL0MAC;
332 loc[1] = SSB_SPROM3_ET0MAC; 332 else {
333 loc[2] = SSB_SPROM3_ET1MAC;
334 } else {
335 loc[0] = SSB_SPROM1_IL0MAC; 333 loc[0] = SSB_SPROM1_IL0MAC;
336 loc[1] = SSB_SPROM1_ET0MAC; 334 loc[1] = SSB_SPROM1_ET0MAC;
337 loc[2] = SSB_SPROM1_ET1MAC; 335 loc[2] = SSB_SPROM1_ET1MAC;
@@ -340,13 +338,15 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
340 v = in[SPOFF(loc[0]) + i]; 338 v = in[SPOFF(loc[0]) + i];
341 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); 339 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
342 } 340 }
343 for (i = 0; i < 3; i++) { 341 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */
344 v = in[SPOFF(loc[1]) + i]; 342 for (i = 0; i < 3; i++) {
345 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v); 343 v = in[SPOFF(loc[1]) + i];
346 } 344 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
347 for (i = 0; i < 3; i++) { 345 }
348 v = in[SPOFF(loc[2]) + i]; 346 for (i = 0; i < 3; i++) {
349 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v); 347 v = in[SPOFF(loc[2]) + i];
348 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
349 }
350 } 350 }
351 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); 351 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
352 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, 352 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -399,30 +399,33 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
399 out->antenna_gain.ghz5.a3 = gain; 399 out->antenna_gain.ghz5.a3 = gain;
400} 400}
401 401
402static void sprom_extract_r4(struct ssb_sprom *out, const u16 *in) 402static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
403{ 403{
404 int i; 404 int i;
405 u16 v; 405 u16 v;
406 u16 il0mac_offset;
406 407
407 /* extract the equivalent of the r1 variables */ 408 if (out->revision == 4)
409 il0mac_offset = SSB_SPROM4_IL0MAC;
410 else
411 il0mac_offset = SSB_SPROM5_IL0MAC;
412 /* extract the MAC address */
408 for (i = 0; i < 3; i++) { 413 for (i = 0; i < 3; i++) {
409 v = in[SPOFF(SSB_SPROM4_IL0MAC) + i]; 414 v = in[SPOFF(il0mac_offset) + i];
410 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); 415 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
411 } 416 }
412 for (i = 0; i < 3; i++) {
413 v = in[SPOFF(SSB_SPROM4_ET0MAC) + i];
414 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
415 }
416 for (i = 0; i < 3; i++) {
417 v = in[SPOFF(SSB_SPROM4_ET1MAC) + i];
418 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
419 }
420 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); 417 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
421 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, 418 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
422 SSB_SPROM4_ETHPHY_ET1A_SHIFT); 419 SSB_SPROM4_ETHPHY_ET1A_SHIFT);
423 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0); 420 if (out->revision == 4) {
424 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); 421 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
425 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); 422 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
423 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
424 } else {
425 SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
426 SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
427 SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
428 }
426 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, 429 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
427 SSB_SPROM4_ANTAVAIL_A_SHIFT); 430 SSB_SPROM4_ANTAVAIL_A_SHIFT);
428 SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG, 431 SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG,
@@ -433,12 +436,21 @@ static void sprom_extract_r4(struct ssb_sprom *out, const u16 *in)
433 SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0); 436 SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0);
434 SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A, 437 SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A,
435 SSB_SPROM4_ITSSI_A_SHIFT); 438 SSB_SPROM4_ITSSI_A_SHIFT);
436 SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0); 439 if (out->revision == 4) {
437 SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1, 440 SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0);
438 SSB_SPROM4_GPIOA_P1_SHIFT); 441 SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1,
439 SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0); 442 SSB_SPROM4_GPIOA_P1_SHIFT);
440 SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3, 443 SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0);
441 SSB_SPROM4_GPIOB_P3_SHIFT); 444 SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3,
445 SSB_SPROM4_GPIOB_P3_SHIFT);
446 } else {
447 SPEX(gpio0, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P0, 0);
448 SPEX(gpio1, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P1,
449 SSB_SPROM5_GPIOA_P1_SHIFT);
450 SPEX(gpio2, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P2, 0);
451 SPEX(gpio3, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P3,
452 SSB_SPROM5_GPIOB_P3_SHIFT);
453 }
442 454
443 /* Extract the antenna gain values. */ 455 /* Extract the antenna gain values. */
444 SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01, 456 SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01,
@@ -462,6 +474,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
462 474
463 out->revision = in[size - 1] & 0x00FF; 475 out->revision = in[size - 1] & 0x00FF;
464 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); 476 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision);
477 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
478 memset(out->et1mac, 0xFF, 6);
465 if ((bus->chip_id & 0xFF00) == 0x4400) { 479 if ((bus->chip_id & 0xFF00) == 0x4400) {
466 /* Workaround: The BCM44XX chip has a stupid revision 480 /* Workaround: The BCM44XX chip has a stupid revision
467 * number stored in the SPROM. 481 * number stored in the SPROM.
@@ -471,16 +485,16 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
471 } else if (bus->chip_id == 0x4321) { 485 } else if (bus->chip_id == 0x4321) {
472 /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */ 486 /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */
473 out->revision = 4; 487 out->revision = 4;
474 sprom_extract_r4(out, in); 488 sprom_extract_r45(out, in);
475 } else { 489 } else {
476 if (out->revision == 0) 490 if (out->revision == 0)
477 goto unsupported; 491 goto unsupported;
478 if (out->revision >= 1 && out->revision <= 3) { 492 if (out->revision >= 1 && out->revision <= 3) {
479 sprom_extract_r123(out, in); 493 sprom_extract_r123(out, in);
480 } 494 }
481 if (out->revision == 4) 495 if (out->revision == 4 || out->revision == 5)
482 sprom_extract_r4(out, in); 496 sprom_extract_r45(out, in);
483 if (out->revision >= 5) 497 if (out->revision > 5)
484 goto unsupported; 498 goto unsupported;
485 } 499 }
486 500
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 07228721cafe..0da2c25bab3b 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -640,14 +640,13 @@ static void usbatm_cancel_send(struct usbatm_data *instance,
640 640
641 atm_dbg(instance, "%s entered\n", __func__); 641 atm_dbg(instance, "%s entered\n", __func__);
642 spin_lock_irq(&instance->sndqueue.lock); 642 spin_lock_irq(&instance->sndqueue.lock);
643 for (skb = instance->sndqueue.next, n = skb->next; 643 skb_queue_walk_safe(&instance->sndqueue, skb, n) {
644 skb != (struct sk_buff *)&instance->sndqueue;
645 skb = n, n = skb->next)
646 if (UDSL_SKB(skb)->atm.vcc == vcc) { 644 if (UDSL_SKB(skb)->atm.vcc == vcc) {
647 atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb); 645 atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb);
648 __skb_unlink(skb, &instance->sndqueue); 646 __skb_unlink(skb, &instance->sndqueue);
649 usbatm_pop(vcc, skb); 647 usbatm_pop(vcc, skb);
650 } 648 }
649 }
651 spin_unlock_irq(&instance->sndqueue.lock); 650 spin_unlock_irq(&instance->sndqueue.lock);
652 651
653 tasklet_disable(&instance->tx_channel.tasklet); 652 tasklet_disable(&instance->tx_channel.tasklet);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 1eb64d08b60a..95b3ec89c126 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -208,7 +208,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
208 if (cpu_is_omap16xx()) 208 if (cpu_is_omap16xx())
209 ocpi_enable(); 209 ocpi_enable();
210 210
211#ifdef CONFIG_ARCH_OMAP_OTG 211#ifdef CONFIG_USB_OTG
212 if (need_transceiver) { 212 if (need_transceiver) {
213 ohci->transceiver = otg_get_transceiver(); 213 ohci->transceiver = otg_get_transceiver();
214 if (ohci->transceiver) { 214 if (ohci->transceiver) {
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 7b74238ad1c7..e980766bb84b 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -161,7 +161,7 @@ static int usb_console_setup(struct console *co, char *options)
161 if (serial->type->set_termios) { 161 if (serial->type->set_termios) {
162 termios->c_cflag = cflag; 162 termios->c_cflag = cflag;
163 tty_termios_encode_baud_rate(termios, baud, baud); 163 tty_termios_encode_baud_rate(termios, baud, baud);
164 serial->type->set_termios(NULL, port, &dummy); 164 serial->type->set_termios(tty, port, &dummy);
165 165
166 port->port.tty = NULL; 166 port->port.tty = NULL;
167 kfree(termios); 167 kfree(termios);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 9c5925927ece..5a24c6411d34 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -939,7 +939,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
939 ret = register_framebuffer(info); 939 ret = register_framebuffer(info);
940 if (ret < 0) { 940 if (ret < 0) {
941 dev_err(dev, "failed to register framebuffer device: %d\n", ret); 941 dev_err(dev, "failed to register framebuffer device: %d\n", ret);
942 goto free_cmap; 942 goto reset_drvdata;
943 } 943 }
944 944
945 /* add selected videomode to modelist */ 945 /* add selected videomode to modelist */
@@ -955,7 +955,8 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
955 955
956 return 0; 956 return 0;
957 957
958 958reset_drvdata:
959 dev_set_drvdata(dev, NULL);
959free_cmap: 960free_cmap:
960 fb_dealloc_cmap(&info->cmap); 961 fb_dealloc_cmap(&info->cmap);
961unregister_irqs: 962unregister_irqs:
@@ -992,10 +993,11 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
992{ 993{
993 struct device *dev = &pdev->dev; 994 struct device *dev = &pdev->dev;
994 struct fb_info *info = dev_get_drvdata(dev); 995 struct fb_info *info = dev_get_drvdata(dev);
995 struct atmel_lcdfb_info *sinfo = info->par; 996 struct atmel_lcdfb_info *sinfo;
996 997
997 if (!sinfo) 998 if (!info || !info->par)
998 return 0; 999 return 0;
1000 sinfo = info->par;
999 1001
1000 cancel_work_sync(&sinfo->task); 1002 cancel_work_sync(&sinfo->task);
1001 exit_backlight(sinfo); 1003 exit_backlight(sinfo);
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index c14b2435d23e..e729fb279645 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -628,27 +628,18 @@ static long cirrusfb_get_mclk(long freq, int bpp, long *div)
628static int cirrusfb_check_var(struct fb_var_screeninfo *var, 628static int cirrusfb_check_var(struct fb_var_screeninfo *var,
629 struct fb_info *info) 629 struct fb_info *info)
630{ 630{
631 int nom, den; /* translyting from pixels->bytes */ 631 int yres;
632 int yres, i; 632 /* memory size in pixels */
633 static struct { int xres, yres; } modes[] = 633 unsigned pixels = info->screen_size * 8 / var->bits_per_pixel;
634 { { 1600, 1280 },
635 { 1280, 1024 },
636 { 1024, 768 },
637 { 800, 600 },
638 { 640, 480 },
639 { -1, -1 } };
640 634
641 switch (var->bits_per_pixel) { 635 switch (var->bits_per_pixel) {
642 case 1: 636 case 1:
643 nom = 4; 637 pixels /= 4;
644 den = 8;
645 break; /* 8 pixel per byte, only 1/4th of mem usable */ 638 break; /* 8 pixel per byte, only 1/4th of mem usable */
646 case 8: 639 case 8:
647 case 16: 640 case 16:
648 case 24: 641 case 24:
649 case 32: 642 case 32:
650 nom = var->bits_per_pixel / 8;
651 den = 1;
652 break; /* 1 pixel == 1 byte */ 643 break; /* 1 pixel == 1 byte */
653 default: 644 default:
654 printk(KERN_ERR "cirrusfb: mode %dx%dx%d rejected..." 645 printk(KERN_ERR "cirrusfb: mode %dx%dx%d rejected..."
@@ -658,43 +649,29 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
658 return -EINVAL; 649 return -EINVAL;
659 } 650 }
660 651
661 if (var->xres * nom / den * var->yres > info->screen_size) { 652 if (var->xres_virtual < var->xres)
662 printk(KERN_ERR "cirrusfb: mode %dx%dx%d rejected..." 653 var->xres_virtual = var->xres;
663 "resolution too high to fit into video memory!\n",
664 var->xres, var->yres, var->bits_per_pixel);
665 DPRINTK("EXIT - EINVAL error\n");
666 return -EINVAL;
667 }
668
669 /* use highest possible virtual resolution */ 654 /* use highest possible virtual resolution */
670 if (var->xres_virtual == -1 && 655 if (var->yres_virtual == -1) {
671 var->yres_virtual == -1) { 656 var->yres_virtual = pixels / var->xres_virtual;
672 printk(KERN_INFO
673 "cirrusfb: using maximum available virtual resolution\n");
674 for (i = 0; modes[i].xres != -1; i++) {
675 int size = modes[i].xres * nom / den * modes[i].yres;
676 if (size < info->screen_size / 2)
677 break;
678 }
679 if (modes[i].xres == -1) {
680 printk(KERN_ERR "cirrusfb: could not find a virtual "
681 "resolution that fits into video memory!!\n");
682 DPRINTK("EXIT - EINVAL error\n");
683 return -EINVAL;
684 }
685 var->xres_virtual = modes[i].xres;
686 var->yres_virtual = modes[i].yres;
687 657
688 printk(KERN_INFO "cirrusfb: virtual resolution set to " 658 printk(KERN_INFO "cirrusfb: virtual resolution set to "
689 "maximum of %dx%d\n", var->xres_virtual, 659 "maximum of %dx%d\n", var->xres_virtual,
690 var->yres_virtual); 660 var->yres_virtual);
691 } 661 }
692
693 if (var->xres_virtual < var->xres)
694 var->xres_virtual = var->xres;
695 if (var->yres_virtual < var->yres) 662 if (var->yres_virtual < var->yres)
696 var->yres_virtual = var->yres; 663 var->yres_virtual = var->yres;
697 664
665 if (var->xres_virtual * var->yres_virtual > pixels) {
666 printk(KERN_ERR "cirrusfb: mode %dx%dx%d rejected... "
667 "virtual resolution too high to fit into video memory!\n",
668 var->xres_virtual, var->yres_virtual,
669 var->bits_per_pixel);
670 DPRINTK("EXIT - EINVAL error\n");
671 return -EINVAL;
672 }
673
674
698 if (var->xoffset < 0) 675 if (var->xoffset < 0)
699 var->xoffset = 0; 676 var->xoffset = 0;
700 if (var->yoffset < 0) 677 if (var->yoffset < 0)
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 77aafcfae037..4599a4385bc9 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -95,7 +95,6 @@ static inline int mtrr_del(int reg, unsigned long base,
95#define VOODOO5_MAX_PIXCLOCK 350000 95#define VOODOO5_MAX_PIXCLOCK 350000
96 96
97static struct fb_fix_screeninfo tdfx_fix __devinitdata = { 97static struct fb_fix_screeninfo tdfx_fix __devinitdata = {
98 .id = "3Dfx",
99 .type = FB_TYPE_PACKED_PIXELS, 98 .type = FB_TYPE_PACKED_PIXELS,
100 .visual = FB_VISUAL_PSEUDOCOLOR, 99 .visual = FB_VISUAL_PSEUDOCOLOR,
101 .ypanstep = 1, 100 .ypanstep = 1,
@@ -426,7 +425,7 @@ static unsigned long do_lfb_size(struct tdfx_par *par, unsigned short dev_id)
426 if (dev_id < PCI_DEVICE_ID_3DFX_VOODOO5) { 425 if (dev_id < PCI_DEVICE_ID_3DFX_VOODOO5) {
427 /* Banshee/Voodoo3 */ 426 /* Banshee/Voodoo3 */
428 chip_size = 2; 427 chip_size = 2;
429 if (has_sgram && (draminit0 & DRAMINIT0_SGRAM_TYPE)) 428 if (has_sgram && !(draminit0 & DRAMINIT0_SGRAM_TYPE))
430 chip_size = 1; 429 chip_size = 1;
431 } else { 430 } else {
432 /* Voodoo4/5 */ 431 /* Voodoo4/5 */
@@ -1200,15 +1199,15 @@ static int __devinit tdfxfb_probe(struct pci_dev *pdev,
1200 /* Configure the default fb_fix_screeninfo first */ 1199 /* Configure the default fb_fix_screeninfo first */
1201 switch (pdev->device) { 1200 switch (pdev->device) {
1202 case PCI_DEVICE_ID_3DFX_BANSHEE: 1201 case PCI_DEVICE_ID_3DFX_BANSHEE:
1203 strcat(tdfx_fix.id, " Banshee"); 1202 strcpy(tdfx_fix.id, "3Dfx Banshee");
1204 default_par->max_pixclock = BANSHEE_MAX_PIXCLOCK; 1203 default_par->max_pixclock = BANSHEE_MAX_PIXCLOCK;
1205 break; 1204 break;
1206 case PCI_DEVICE_ID_3DFX_VOODOO3: 1205 case PCI_DEVICE_ID_3DFX_VOODOO3:
1207 strcat(tdfx_fix.id, " Voodoo3"); 1206 strcpy(tdfx_fix.id, "3Dfx Voodoo3");
1208 default_par->max_pixclock = VOODOO3_MAX_PIXCLOCK; 1207 default_par->max_pixclock = VOODOO3_MAX_PIXCLOCK;
1209 break; 1208 break;
1210 case PCI_DEVICE_ID_3DFX_VOODOO5: 1209 case PCI_DEVICE_ID_3DFX_VOODOO5:
1211 strcat(tdfx_fix.id, " Voodoo5"); 1210 strcpy(tdfx_fix.id, "3Dfx Voodoo5");
1212 default_par->max_pixclock = VOODOO5_MAX_PIXCLOCK; 1211 default_par->max_pixclock = VOODOO5_MAX_PIXCLOCK;
1213 break; 1212 break;
1214 } 1213 }
diff --git a/firmware/Makefile b/firmware/Makefile
index 9fe86041f86e..ca8cd305ff93 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -22,6 +22,7 @@ fw-external-y := $(subst ",,$(CONFIG_EXTRA_FIRMWARE))
22 22
23fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin 23fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
24fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw 24fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
25fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
25fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin 26fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
26fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin 27fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
27fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin 28fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
@@ -146,15 +147,27 @@ $(patsubst %,$(obj)/%.gen.o, $(fw-external-y)): $(obj)/%.gen.o: $(fwdir)/%
146$(obj)/%: $(obj)/%.ihex | $(objtree)/$(obj)/$$(dir %) 147$(obj)/%: $(obj)/%.ihex | $(objtree)/$(obj)/$$(dir %)
147 $(call cmd,ihex) 148 $(call cmd,ihex)
148 149
150# Don't depend on ihex2fw if we're installing and it already exists.
151# Putting it after | in the dependencies doesn't seem sufficient when
152# we're installing after a cross-compile, because ihex2fw has dependencies
153# on stuff like /usr/lib/gcc/ppc64-redhat-linux/4.3.0/include/stddef.h and
154# thus wants to be rebuilt. Which it can't be, if the prebuilt kernel tree
155# is exported read-only for someone to run 'make install'.
156ifeq ($(INSTALL):$(wildcard $(obj)/ihex2fw),install:$(obj)/ihex2fw)
157ihex2fw_dep :=
158else
159ihex2fw_dep := $(obj)/ihex2fw
160endif
161
149# .HEX is also Intel HEX, but where the offset and length in each record 162# .HEX is also Intel HEX, but where the offset and length in each record
150# is actually meaningful, because the firmware has to be loaded in a certain 163# is actually meaningful, because the firmware has to be loaded in a certain
151# order rather than as a single binary blob. Thus, we convert them into our 164# order rather than as a single binary blob. Thus, we convert them into our
152# more compact binary representation of ihex records (<linux/ihex.h>) 165# more compact binary representation of ihex records (<linux/ihex.h>)
153$(obj)/%.fw: $(obj)/%.HEX $(obj)/ihex2fw | $(objtree)/$(obj)/$$(dir %) 166$(obj)/%.fw: $(obj)/%.HEX $(ihex2fw_dep) | $(objtree)/$(obj)/$$(dir %)
154 $(call cmd,ihex2fw) 167 $(call cmd,ihex2fw)
155 168
156# .H16 is our own modified form of Intel HEX, with 16-bit length for records. 169# .H16 is our own modified form of Intel HEX, with 16-bit length for records.
157$(obj)/%.fw: $(obj)/%.H16 $(obj)/ihex2fw | $(objtree)/$(obj)/$$(dir %) 170$(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep) | $(objtree)/$(obj)/$$(dir %)
158 $(call cmd,h16tofw) 171 $(call cmd,h16tofw)
159 172
160$(firmware-dirs): 173$(firmware-dirs):
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 66c51b275e9e..57002cdecd42 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -339,3 +339,13 @@ Licence: Allegedly GPLv2+, but no source visible. Marked:
339Found in hex form in kernel source. 339Found in hex form in kernel source.
340 340
341-------------------------------------------------------------------------- 341--------------------------------------------------------------------------
342
343Driver: CASSINI - Sun Cassini
344
345File: sun/cassini.bin
346
347Licence: Unknown
348
349Found in hex form in kernel source.
350
351--------------------------------------------------------------------------
diff --git a/firmware/sun/cassini.bin.ihex b/firmware/sun/cassini.bin.ihex
new file mode 100644
index 000000000000..5cd7ae70e71f
--- /dev/null
+++ b/firmware/sun/cassini.bin.ihex
@@ -0,0 +1,143 @@
1:1000000000827E82090000000000008E8EFFCE82FA
2:1000100025FF010FCE8426FF0111CE853DDFE58649
3:1000200039B78FF87EC3C2964784F38A009747CECC
4:100030008233FF010F9646840C8104270B96468479
5:100040000C810827577E8425964784F38A049747B6
6:10005000CE8254FF010F9646840C81042638B612D6
7:1000600020842026037E8425967BD67CFE8F56BD79
8:10007000F7B6FE8F4EBDEC8EBDFAF7BDF728CE82E7
9:1000800082FF010F9646840C8104260AB612208452
10:100090002027B57E8425BDF71F7E841F964784F3F5
11:1000A0008A089747DEE1AD00CE82AFFF010F7E8464
12:1000B00025964185102606962385402706BDED002E
13:1000C0007E83A2DE42BDEB8E9624840827037E83C6
14:1000D000DF967BD67CFE8F56BDF7B6FE8F50BDEC0B
15:1000E0008EBDFAF78611C649BDE412CE82EFFF013C
16:1000F0000F9646840C81002717C649BDE491240D54
17:10010000B612208520260CCE82C1FF010F7E8425E9
18:100110007E8416FE8F52BDEC8EBDFAF7866AC64904
19:10012000BDE412CE8327FF010F9646840C81002781
20:100130000AC649BDE49125067E84257E8416B6183C
21:1001400070BB19702A0481AF2E19967BF62007FA2E
22:100150002027C4388138270BF62007FA2027CB0840
23:100160007E82D3BDF7668674C649BDE412CE837124
24:10017000FF010F9646840C8108260AC649BDE4910A
25:1001800025067E84257E8416BDF73E260EBDE50934
26:100190002606CE82C1FF010F7E8425FE8F54BDEC62
27:1001A0008EBDFAF7BDF733860FC651BDE412CE837C
28:1001B000B2FF010F9646840C8108265CB61220849B
29:1001C0003F813A271C9623854027037E8425C6510C
30:1001D000BDE49125037E8425CE82C1FF010F7E847C
31:1001E00025BDF8377C007ACE83EEFF010F7E842593
32:1001F0009646840C81082620962484082629B61861
33:1002000082BB1982B1013B2209B6122084378132A8
34:100210002715BDF8447E82C1BDF71FBDF844BDFC63
35:1002200029CE8225FF010F39964784FC8A00974723
36:10023000CE8434FF011196468403810227037E8514
37:100240001E964784FC8A029747DEE1AD008601B71F
38:100250001251BDF714B6103184FDB71031BDF81E30
39:100260009681D682FE8F5ABDF7B6FE8F5CBDEC8EAE
40:10027000BDFAF78608D600C51026028B20C651BDF0
41:10028000E412CE8486FF011196468403810227037F
42:100290007E850FC651BDE49125037E851E9644855B
43:1002A00010260AB61250BA013C851027A8BDF76681
44:1002B000CE84B7FF01117E851E96468403810226F7
45:1002C00050B612308403810127037E851E96448533
46:1002D000102613B61250BA013C85102609CE84535D
47:1002E000FF01117E851EB610318A02B71031BD851F
48:1002F0001FBDF8377C0080CE84FEFF01117E851E75
49:100300009646840381022609B612308403810127B0
50:100310000FBDF844BDF70BBDFC29CE8426FF0111AB
51:1003200039D622C40FB61230BA12328404270D9681
52:100330002285042705CA107E853ACA20D72239862D
53:1003400000978318CE1C00BDEB4696578501270207
54:100350004F3985022701397F8F7D8604B7120486C5
55:1003600008B712078610B7120C8607B71206B68FA9
56:100370007DB712708601BA1204B71204010101019F
57:100380000101B6120484FE8A02B7120401010101C0
58:10039000010186FDB41204B71204B612008408816C
59:1003A000082716B68F7D810C27088B04B78F7D7EBA
60:1003B000856C860397407E896E8607B712065FF7C5
61:1003C0008F825FF78F7FF78F70F78F71F78F72F7DC
62:1003D0008F73F78F74F78F75F78F76F78F77F78FA7
63:1003E00078F78F79F78F7AF78F7BB612048A10B778
64:1003F000120486E4B71270B71207F71205F7120954
65:100400008608BA1204B7120486F7B41204B71204AD
66:10041000010101010101B61208277F8180260B86A8
67:1004200008CE8F79BD897B7E868E8140260B86041F
68:10043000CE8F76BD897B7E868E8120260B8602CE6E
69:100440008F73BD897B7E868E8110260B8601CE8FB1
70:1004500070BD897B7E868E8108260B8608CE8F79BB
71:10046000BD897F7E868E8104260B8604CE8F76BD65
72:10047000897F7E868E8102260B8A02CE8F73BD898C
73:100480007F7E868E810126088601CE8F70BD897F92
74:10049000B68F7F810F26037E8747B61209840381BA
75:1004A0000327067C12097E85FEB6120684078107A3
76:1004B00027088B01B712067E86D5B68F82260A7C66
77:1004C0008F824FB712067E85C0B61206843F813FE9
78:1004D00027108B08B71206B6120984FCB712097EE2
79:1004E00085FECE8F7018CE8F84C60CBD896FCE8FDF
80:1004F0008418CE8F70C60CBD896FD683C14F2D0373
81:100500007E8740B68F7F8107270F810B2715810DCE
82:10051000271B810E27217E8740F78F7B8602B78FAE
83:100520007A201CF78F788602B78F772012F78F75A5
84:100530008602B78F742008F78F728602B78F717E9C
85:100540008747860497407E896ECE8F72BD89F7CE2D
86:100550008F75BD89F7CE8F78BD89F7CE8F7BBD892A
87:10056000F74FB78F7DB78F81B68F7227477C8F7D0E
88:10057000B68F75273F7C8F7DB68F7827377C8F7D30
89:10058000B68F7B272F7F8F7D7C8F817A8F72271B81
90:100590007C8F7D7A8F7527167C8F7D7A8F782711D7
91:1005A0007C8F7D7A8F7B270C7E87837A8F757A8FFD
92:1005B000787A8F7BCEC1FCF68F7D3AA600B7127099
93:1005C000B68F7226037E87FAB68F75260A18CE8FED
94:1005D00073BD89D57E87FAB68F78260A18CE8F76B6
95:1005E000BD89D57E87FAB68F7B260A18CE8F79BD56
96:1005F00089D57E87FA860597407E8900B68F7581FA
97:10060000072EF2F61206C4F81BB71206B68F7881D1
98:10061000072EE2484848F61206C4C71BB71206B6B2
99:100620008F7B81072ECFF61205C4F81BB712058603
100:1006300000F68F71BD89948601F68F74BD8994860A
101:1006400002F68F77BD89948603F68F7ABD8994CEA2
102:100650008F70A60181012707810327037E8866A684
103:1006600000B88F818401260B8C8F792C0E08080826
104:100670007E8850B612048A40B71204B6120484FB76
105:1006800084EFB71204B6120736B68F7C4848B7120B
106:10069000078601BA1204B7120401010101010186A3
107:1006A000FEB41204B712048602BA1204B71204860A
108:1006B000FDB41204B7120432B71207B61200840850
109:1006C0008108270F7C82082607867697407E896EF0
110:1006D0007E86ECB68F7F810F273CBDE6C7B7120D33
111:1006E000BDE6CBB612048A20B71204CEFFFFB612C5
112:1006F00000810C26050926F6271CB6120484DFB7F4
113:100700001204968381072C057C0083200696838B38
114:100710000897837E85417F8F7E8680B7120C860185
115:10072000B78F7DB6120C847FB7120C8A80B7120C7B
116:10073000860ABD8A06B6120A2A09B6120CBA8F7D3D
117:10074000B7120CB68F7E8160271A8B20B78F7EB6CA
118:10075000120C849FBA8F7EB7120CB68F7D48B78F6C
119:100760007D7E8921B612048A20B71204BD8A0A4F01
120:1007700039A60018A7000818085A26F539366C0063
121:1007800032BA8F7FB78F7FB612098403A701B612E2
122:1007900006843FA70239368603B78F8032C1002610
123:1007A00006B78F7C7E89C9C1012718C102270CC1F9
124:1007B000032700F68F800505F78F80F68F800505EB
125:1007C000F78F80F68F800505F78F80F68F8053F4C2
126:1007D00012071BB7120739CE8F70A60018E6001853
127:1007E000A700E700A60118E60118A701E701A60285
128:1007F00018E60218A702E70239A6008407E600C43B
129:10080000385454541BA700394A26FD399622840FC8
130:1008100097228601B78F70B61207B78F71F6120C48
131:10082000C40FC80FF78F72F68F72B68F71840327CB
132:10083000148101271C81022724F48F70272A962215
133:100840008A807E8A64F48F70271E96228A107E8AA0
134:1008500064F48F70271296228A207E8A64F48F7047
135:10086000270696228A409722748F71748F71788F31
136:1008700070B68F70851027AFD622C41058B612708C
137:1008800081E4273681E1260C96228420441BD6225F
138:10089000C4CF20235881C6260D9622844044441B91
139:1008A000D622C4AF2011588127260F962284804477
140:1008B00044441BD622C46F1B972239270C7C820626
141:0D08C000BDD9EDB682077E8AB97F82063968
142:00000001FF
143/* firmware patch for NS_DP83065 */
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index f9e4ad97a79e..06e521a945c3 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -9,7 +9,10 @@ files (e.g. "cp -a") to Windows servers. For mkdir and create honor setgid bit
9on parent directory when server supports Unix Extensions but not POSIX 9on parent directory when server supports Unix Extensions but not POSIX
10create. Update cifs.upcall version to handle new Kerberos sec flags 10create. Update cifs.upcall version to handle new Kerberos sec flags
11(this requires update of cifs.upcall program from Samba). Fix memory leak 11(this requires update of cifs.upcall program from Samba). Fix memory leak
12on dns_upcall (resolving DFS referralls). 12on dns_upcall (resolving DFS referralls). Fix plain text password
13authentication (requires setting SecurityFlags to 0x30030 to enable
14lanman and plain text though). Fix writes to be at correct offset when
15file is open with O_APPEND and file is on a directio (forcediretio) mount.
13 16
14Version 1.53 17Version 1.53
15------------ 18------------
diff --git a/fs/cifs/README b/fs/cifs/README
index 68b5c1169d9d..bd2343d4c6a6 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -542,10 +542,20 @@ SecurityFlags Flags which control security negotiation and
542 hashing mechanisms (as "must use") on the other hand 542 hashing mechanisms (as "must use") on the other hand
543 does not make much sense. Default flags are 543 does not make much sense. Default flags are
544 0x07007 544 0x07007
545 (NTLM, NTLMv2 and packet signing allowed). Maximum 545 (NTLM, NTLMv2 and packet signing allowed). The maximum
546 allowable flags if you want to allow mounts to servers 546 allowable flags if you want to allow mounts to servers
547 using weaker password hashes is 0x37037 (lanman, 547 using weaker password hashes is 0x37037 (lanman,
548 plaintext, ntlm, ntlmv2, signing allowed): 548 plaintext, ntlm, ntlmv2, signing allowed). Some
549 SecurityFlags require the corresponding menuconfig
550 options to be enabled (lanman and plaintext require
551 CONFIG_CIFS_WEAK_PW_HASH for example). Enabling
552 plaintext authentication currently requires also
553 enabling lanman authentication in the security flags
554 because the cifs module only supports sending
555 laintext passwords using the older lanman dialect
556 form of the session setup SMB. (e.g. for authentication
557 using plain text passwords, set the SecurityFlags
558 to 0x30030):
549 559
550 may use packet signing 0x00001 560 may use packet signing 0x00001
551 must use packet signing 0x01001 561 must use packet signing 0x01001
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 83fd40dc1ef0..bd5f13d38450 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -294,6 +294,7 @@ void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key)
294 294
295 if ((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) 295 if ((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
296 if (extended_security & CIFSSEC_MAY_PLNTXT) { 296 if (extended_security & CIFSSEC_MAY_PLNTXT) {
297 memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE);
297 memcpy(lnm_session_key, password_with_pad, 298 memcpy(lnm_session_key, password_with_pad,
298 CIFS_ENCPWD_SIZE); 299 CIFS_ENCPWD_SIZE);
299 return; 300 return;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ff14d14903a0..cbefe1f1f9fe 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -833,6 +833,10 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
833 return -EBADF; 833 return -EBADF;
834 open_file = (struct cifsFileInfo *) file->private_data; 834 open_file = (struct cifsFileInfo *) file->private_data;
835 835
836 rc = generic_write_checks(file, poffset, &write_size, 0);
837 if (rc)
838 return rc;
839
836 xid = GetXid(); 840 xid = GetXid();
837 841
838 if (*poffset > file->f_path.dentry->d_inode->i_size) 842 if (*poffset > file->f_path.dentry->d_inode->i_size)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index b537fad3bf50..252fdc0567f1 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -409,6 +409,8 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
409#ifdef CONFIG_CIFS_WEAK_PW_HASH 409#ifdef CONFIG_CIFS_WEAK_PW_HASH
410 char lnm_session_key[CIFS_SESS_KEY_SIZE]; 410 char lnm_session_key[CIFS_SESS_KEY_SIZE];
411 411
412 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
413
412 /* no capabilities flags in old lanman negotiation */ 414 /* no capabilities flags in old lanman negotiation */
413 415
414 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); 416 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 9abcd2b329f7..e9b20173fef3 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1279,6 +1279,12 @@ static int nfs_parse_mount_options(char *raw,
1279 } 1279 }
1280 } 1280 }
1281 1281
1282 if (errors > 0) {
1283 dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n",
1284 errors, (errors == 1 ? "" : "s"));
1285 if (!sloppy)
1286 return 0;
1287 }
1282 return 1; 1288 return 1;
1283 1289
1284out_nomem: 1290out_nomem:
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index b6ed38380ab8..54b8b4140c8f 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -443,7 +443,7 @@ init_state(struct posix_acl_state *state, int cnt)
443 * enough space for either: 443 * enough space for either:
444 */ 444 */
445 alloc = sizeof(struct posix_ace_state_array) 445 alloc = sizeof(struct posix_ace_state_array)
446 + cnt*sizeof(struct posix_ace_state); 446 + cnt*sizeof(struct posix_user_ace_state);
447 state->users = kzalloc(alloc, GFP_KERNEL); 447 state->users = kzalloc(alloc, GFP_KERNEL);
448 if (!state->users) 448 if (!state->users)
449 return -ENOMEM; 449 return -ENOMEM;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2e51adac65de..e5b51ffafc6c 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -867,11 +867,6 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
867 int slack_bytes; 867 int slack_bytes;
868 __be32 status; 868 __be32 status;
869 869
870 status = nfserr_resource;
871 cstate = cstate_alloc();
872 if (cstate == NULL)
873 goto out;
874
875 resp->xbuf = &rqstp->rq_res; 870 resp->xbuf = &rqstp->rq_res;
876 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; 871 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len;
877 resp->tagp = resp->p; 872 resp->tagp = resp->p;
@@ -890,6 +885,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
890 if (args->minorversion > NFSD_SUPPORTED_MINOR_VERSION) 885 if (args->minorversion > NFSD_SUPPORTED_MINOR_VERSION)
891 goto out; 886 goto out;
892 887
888 status = nfserr_resource;
889 cstate = cstate_alloc();
890 if (cstate == NULL)
891 goto out;
892
893 status = nfs_ok; 893 status = nfs_ok;
894 while (!status && resp->opcnt < args->opcnt) { 894 while (!status && resp->opcnt < args->opcnt) {
895 op = &args->ops[resp->opcnt++]; 895 op = &args->ops[resp->opcnt++];
@@ -957,9 +957,9 @@ encode_op:
957 nfsd4_increment_op_stats(op->opnum); 957 nfsd4_increment_op_stats(op->opnum);
958 } 958 }
959 959
960 cstate_free(cstate);
960out: 961out:
961 nfsd4_release_compoundargs(args); 962 nfsd4_release_compoundargs(args);
962 cstate_free(cstate);
963 dprintk("nfsv4 compound returned %d\n", ntohl(status)); 963 dprintk("nfsv4 compound returned %d\n", ntohl(status));
964 return status; 964 return status;
965} 965}
diff --git a/fs/ntfs/usnjrnl.h b/fs/ntfs/usnjrnl.h
index 3a8af75351e8..4087fbdac327 100644
--- a/fs/ntfs/usnjrnl.h
+++ b/fs/ntfs/usnjrnl.h
@@ -113,7 +113,7 @@ typedef struct {
113 * Reason flags (32-bit). Cumulative flags describing the change(s) to the 113 * Reason flags (32-bit). Cumulative flags describing the change(s) to the
114 * file since it was last opened. I think the names speak for themselves but 114 * file since it was last opened. I think the names speak for themselves but
115 * if you disagree check out the descriptions in the Linux NTFS project NTFS 115 * if you disagree check out the descriptions in the Linux NTFS project NTFS
116 * documentation: http://linux-ntfs.sourceforge.net/ntfs/files/usnjrnl.html 116 * documentation: http://www.linux-ntfs.org/
117 */ 117 */
118enum { 118enum {
119 USN_REASON_DATA_OVERWRITE = const_cpu_to_le32(0x00000001), 119 USN_REASON_DATA_OVERWRITE = const_cpu_to_le32(0x00000001),
@@ -145,7 +145,7 @@ typedef le32 USN_REASON_FLAGS;
145 * Source info flags (32-bit). Information about the source of the change(s) 145 * Source info flags (32-bit). Information about the source of the change(s)
146 * to the file. For detailed descriptions of what these mean, see the Linux 146 * to the file. For detailed descriptions of what these mean, see the Linux
147 * NTFS project NTFS documentation: 147 * NTFS project NTFS documentation:
148 * http://linux-ntfs.sourceforge.net/ntfs/files/usnjrnl.html 148 * http://www.linux-ntfs.org/
149 */ 149 */
150enum { 150enum {
151 USN_SOURCE_DATA_MANAGEMENT = const_cpu_to_le32(0x00000001), 151 USN_SOURCE_DATA_MANAGEMENT = const_cpu_to_le32(0x00000001),
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 0d6eb33597c6..71c9be59c9c2 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -337,65 +337,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
337 return 0; 337 return 0;
338} 338}
339 339
340/*
341 * Use precise platform statistics if available:
342 */
343#ifdef CONFIG_VIRT_CPU_ACCOUNTING
344static cputime_t task_utime(struct task_struct *p)
345{
346 return p->utime;
347}
348
349static cputime_t task_stime(struct task_struct *p)
350{
351 return p->stime;
352}
353#else
354static cputime_t task_utime(struct task_struct *p)
355{
356 clock_t utime = cputime_to_clock_t(p->utime),
357 total = utime + cputime_to_clock_t(p->stime);
358 u64 temp;
359
360 /*
361 * Use CFS's precise accounting:
362 */
363 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
364
365 if (total) {
366 temp *= utime;
367 do_div(temp, total);
368 }
369 utime = (clock_t)temp;
370
371 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
372 return p->prev_utime;
373}
374
375static cputime_t task_stime(struct task_struct *p)
376{
377 clock_t stime;
378
379 /*
380 * Use CFS's precise accounting. (we subtract utime from
381 * the total, to make sure the total observed by userspace
382 * grows monotonically - apps rely on that):
383 */
384 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
385 cputime_to_clock_t(task_utime(p));
386
387 if (stime >= 0)
388 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
389
390 return p->prev_stime;
391}
392#endif
393
394static cputime_t task_gtime(struct task_struct *p)
395{
396 return p->gtime;
397}
398
399static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, 340static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
400 struct pid *pid, struct task_struct *task, int whole) 341 struct pid *pid, struct task_struct *task, int whole)
401{ 342{
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index ded969862960..00f10a2dcf12 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -24,6 +24,7 @@
24#include <linux/tty.h> 24#include <linux/tty.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/mman.h> 26#include <linux/mman.h>
27#include <linux/quicklist.h>
27#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
28#include <linux/ioport.h> 29#include <linux/ioport.h>
29#include <linux/mm.h> 30#include <linux/mm.h>
@@ -189,7 +190,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
189 "Committed_AS: %8lu kB\n" 190 "Committed_AS: %8lu kB\n"
190 "VmallocTotal: %8lu kB\n" 191 "VmallocTotal: %8lu kB\n"
191 "VmallocUsed: %8lu kB\n" 192 "VmallocUsed: %8lu kB\n"
192 "VmallocChunk: %8lu kB\n", 193 "VmallocChunk: %8lu kB\n"
194 "Quicklists: %8lu kB\n",
193 K(i.totalram), 195 K(i.totalram),
194 K(i.freeram), 196 K(i.freeram),
195 K(i.bufferram), 197 K(i.bufferram),
@@ -221,7 +223,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
221 K(committed), 223 K(committed),
222 (unsigned long)VMALLOC_TOTAL >> 10, 224 (unsigned long)VMALLOC_TOTAL >> 10,
223 vmi.used >> 10, 225 vmi.used >> 10,
224 vmi.largest_chunk >> 10 226 vmi.largest_chunk >> 10,
227 K(quicklist_total_size())
225 ); 228 );
226 229
227 len += hugetlb_report_meminfo(page + len); 230 len += hugetlb_report_meminfo(page + len);
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 1170dc60e638..1870d5e05f1c 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,8 +1,10 @@
1ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),) 1ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
2 $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
2header-y += kvm.h 3header-y += kvm.h
3endif 4endif
4 5
5ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) 6ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
7 $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
6unifdef-y += a.out.h 8unifdef-y += a.out.h
7endif 9endif
8unifdef-y += auxvec.h 10unifdef-y += auxvec.h
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index a3f738cffdb6..edc6ba82e090 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -97,6 +97,16 @@ extern void warn_slowpath(const char *file, const int line,
97 unlikely(__ret_warn_once); \ 97 unlikely(__ret_warn_once); \
98}) 98})
99 99
100#define WARN_ONCE(condition, format...) ({ \
101 static int __warned; \
102 int __ret_warn_once = !!(condition); \
103 \
104 if (unlikely(__ret_warn_once)) \
105 if (WARN(!__warned, format)) \
106 __warned = 1; \
107 unlikely(__ret_warn_once); \
108})
109
100#define WARN_ON_RATELIMIT(condition, state) \ 110#define WARN_ON_RATELIMIT(condition, state) \
101 WARN_ON((condition) && __ratelimit(state)) 111 WARN_ON((condition) && __ratelimit(state))
102 112
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index abcf34c2fdc7..ea8087b55ffc 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -126,7 +126,7 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
126 * @args: array of argument values to store 126 * @args: array of argument values to store
127 * 127 *
128 * Changes @n arguments to the system call starting with the @i'th argument. 128 * Changes @n arguments to the system call starting with the @i'th argument.
129 * @n'th argument to @val. Argument @i gets value @args[0], and so on. 129 * Argument @i gets value @args[0], and so on.
130 * An arch inline version is probably optimal when @i and @n are constants. 130 * An arch inline version is probably optimal when @i and @n are constants.
131 * 131 *
132 * It's only valid to call this when @task is stopped for tracing on 132 * It's only valid to call this when @task is stopped for tracing on
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index d5c0f2fda51b..03b1d69b142f 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -63,6 +63,7 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
63} 63}
64 64
65extern void (*flush_icache_range)(unsigned long start, unsigned long end); 65extern void (*flush_icache_range)(unsigned long start, unsigned long end);
66extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
66 67
67extern void (*__flush_cache_vmap)(void); 68extern void (*__flush_cache_vmap)(void);
68 69
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index f0ee4fb55911..90fc708b320e 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -118,4 +118,11 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
118 BUG(); 118 BUG();
119} 119}
120 120
121static inline int
122dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
123{
124 BUG();
125 return 0;
126}
127
121#endif 128#endif
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 762f6a6bc707..9489283a4bcf 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -72,14 +72,15 @@
72#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 72#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
73#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ 73#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 74#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 75#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 76#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
77#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ 77#define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */
78#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ 78#define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */
79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ 79#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ 80#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */
81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ 81#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */
82#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ 82#define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
83 84
84/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 85/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
85#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ 86#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86/required-features.h b/include/asm-x86/required-features.h
index adec887dd7cd..5c2ff4bc2980 100644
--- a/include/asm-x86/required-features.h
+++ b/include/asm-x86/required-features.h
@@ -41,6 +41,12 @@
41# define NEED_3DNOW 0 41# define NEED_3DNOW 0
42#endif 42#endif
43 43
44#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
45# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
46#else
47# define NEED_NOPL 0
48#endif
49
44#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
45#define NEED_PSE 0 51#define NEED_PSE 0
46#define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) 52#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
@@ -67,7 +73,7 @@
67#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW) 73#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
68 74
69#define REQUIRED_MASK2 0 75#define REQUIRED_MASK2 0
70#define REQUIRED_MASK3 0 76#define REQUIRED_MASK3 (NEED_NOPL)
71#define REQUIRED_MASK4 0 77#define REQUIRED_MASK4 0
72#define REQUIRED_MASK5 0 78#define REQUIRED_MASK5 0
73#define REQUIRED_MASK6 0 79#define REQUIRED_MASK6 0
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 7d970678f940..b68ec09399be 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -167,7 +167,8 @@ unifdef-y += acct.h
167unifdef-y += adb.h 167unifdef-y += adb.h
168unifdef-y += adfs_fs.h 168unifdef-y += adfs_fs.h
169unifdef-y += agpgart.h 169unifdef-y += agpgart.h
170ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) 170ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
171 $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
171unifdef-y += a.out.h 172unifdef-y += a.out.h
172endif 173endif
173unifdef-y += apm_bios.h 174unifdef-y += apm_bios.h
@@ -258,7 +259,8 @@ unifdef-y += kd.h
258unifdef-y += kernelcapi.h 259unifdef-y += kernelcapi.h
259unifdef-y += kernel.h 260unifdef-y += kernel.h
260unifdef-y += keyboard.h 261unifdef-y += keyboard.h
261ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),) 262ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
263 $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
262unifdef-y += kvm.h 264unifdef-y += kvm.h
263endif 265endif
264unifdef-y += llc.h 266unifdef-y += llc.h
@@ -297,7 +299,6 @@ unifdef-y += parport.h
297unifdef-y += patchkey.h 299unifdef-y += patchkey.h
298unifdef-y += pci.h 300unifdef-y += pci.h
299unifdef-y += personality.h 301unifdef-y += personality.h
300unifdef-y += pim.h
301unifdef-y += pktcdvd.h 302unifdef-y += pktcdvd.h
302unifdef-y += pmu.h 303unifdef-y += pmu.h
303unifdef-y += poll.h 304unifdef-y += poll.h
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index c33b0dc28e4d..ed3a5d473e52 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -127,6 +127,8 @@ extern int clockevents_register_notifier(struct notifier_block *nb);
127extern int clockevents_program_event(struct clock_event_device *dev, 127extern int clockevents_program_event(struct clock_event_device *dev,
128 ktime_t expires, ktime_t now); 128 ktime_t expires, ktime_t now);
129 129
130extern void clockevents_handle_noop(struct clock_event_device *dev);
131
130#ifdef CONFIG_GENERIC_CLOCKEVENTS 132#ifdef CONFIG_GENERIC_CLOCKEVENTS
131extern void clockevents_notify(unsigned long reason, void *arg); 133extern void clockevents_notify(unsigned long reason, void *arg);
132#else 134#else
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e8f450c499b0..2691926fb506 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -160,7 +160,7 @@ static inline int current_cpuset_is_being_rebound(void)
160 160
161static inline void rebuild_sched_domains(void) 161static inline void rebuild_sched_domains(void)
162{ 162{
163 partition_sched_domains(0, NULL, NULL); 163 partition_sched_domains(1, NULL, NULL);
164} 164}
165 165
166#endif /* !CONFIG_CPUSETS */ 166#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 87c12ed96954..1524829f73f2 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1111,7 +1111,6 @@ void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1111#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 1111#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1112int ide_pci_set_master(struct pci_dev *, const char *); 1112int ide_pci_set_master(struct pci_dev *, const char *);
1113unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); 1113unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1114extern const struct ide_dma_ops sff_dma_ops;
1115int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); 1114int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
1116int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); 1115int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1117#else 1116#else
@@ -1275,6 +1274,7 @@ extern int __ide_dma_end(ide_drive_t *);
1275int ide_dma_test_irq(ide_drive_t *); 1274int ide_dma_test_irq(ide_drive_t *);
1276extern void ide_dma_lost_irq(ide_drive_t *); 1275extern void ide_dma_lost_irq(ide_drive_t *);
1277extern void ide_dma_timeout(ide_drive_t *); 1276extern void ide_dma_timeout(ide_drive_t *);
1277extern const struct ide_dma_ops sff_dma_ops;
1278#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 1278#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1279 1279
1280#else 1280#else
@@ -1448,8 +1448,7 @@ static inline void ide_dump_identify(u8 *id)
1448 1448
1449static inline int hwif_to_node(ide_hwif_t *hwif) 1449static inline int hwif_to_node(ide_hwif_t *hwif)
1450{ 1450{
1451 struct pci_dev *dev = to_pci_dev(hwif->dev); 1451 return hwif->dev ? dev_to_node(hwif->dev) : -1;
1452 return hwif->dev ? pcibus_to_node(dev->bus) : -1;
1453} 1452}
1454 1453
1455static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) 1454static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7f4df7c7659d..abc1abc63bf0 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -643,6 +643,9 @@ struct ieee80211_mgmt {
643 } u; 643 } u;
644} __attribute__ ((packed)); 644} __attribute__ ((packed));
645 645
646/* mgmt header + 1 byte category code */
647#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
648
646 649
647/* Control frames */ 650/* Control frames */
648struct ieee80211_rts { 651struct ieee80211_rts {
@@ -708,12 +711,13 @@ struct ieee80211_ht_addt_info {
708 711
709/* 802.11n HT capabilities masks */ 712/* 802.11n HT capabilities masks */
710#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 713#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
711#define IEEE80211_HT_CAP_MIMO_PS 0x000C 714#define IEEE80211_HT_CAP_SM_PS 0x000C
712#define IEEE80211_HT_CAP_GRN_FLD 0x0010 715#define IEEE80211_HT_CAP_GRN_FLD 0x0010
713#define IEEE80211_HT_CAP_SGI_20 0x0020 716#define IEEE80211_HT_CAP_SGI_20 0x0020
714#define IEEE80211_HT_CAP_SGI_40 0x0040 717#define IEEE80211_HT_CAP_SGI_40 0x0040
715#define IEEE80211_HT_CAP_DELAY_BA 0x0400 718#define IEEE80211_HT_CAP_DELAY_BA 0x0400
716#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 719#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
720#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
717/* 802.11n HT capability AMPDU settings */ 721/* 802.11n HT capability AMPDU settings */
718#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 722#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
719#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C 723#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
@@ -736,11 +740,26 @@ struct ieee80211_ht_addt_info {
736#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 740#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
737#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 741#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
738 742
739/* MIMO Power Save Modes */ 743/* block-ack parameters */
740#define WLAN_HT_CAP_MIMO_PS_STATIC 0 744#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
741#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 745#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
742#define WLAN_HT_CAP_MIMO_PS_INVALID 2 746#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
743#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 747#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
748#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
749
750/*
751 * A-PMDU buffer sizes
752 * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
753 */
754#define IEEE80211_MIN_AMPDU_BUF 0x8
755#define IEEE80211_MAX_AMPDU_BUF 0x40
756
757
758/* Spatial Multiplexing Power Save Modes */
759#define WLAN_HT_CAP_SM_PS_STATIC 0
760#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
761#define WLAN_HT_CAP_SM_PS_INVALID 2
762#define WLAN_HT_CAP_SM_PS_DISABLED 3
744 763
745/* Authentication algorithms */ 764/* Authentication algorithms */
746#define WLAN_AUTH_OPEN 0 765#define WLAN_AUTH_OPEN 0
diff --git a/include/linux/if.h b/include/linux/if.h
index 5c9d1fa93fef..65246846c844 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> /* for "__user" et al */ 24#include <linux/compiler.h> /* for "__user" et al */
25 25
26#define IFNAMSIZ 16 26#define IFNAMSIZ 16
27#define IFALIASZ 256
27#include <linux/hdlc/ioctl.h> 28#include <linux/hdlc/ioctl.h>
28 29
29/* Standard interface flags (netdevice->flags). */ 30/* Standard interface flags (netdevice->flags). */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e157c1399b61..723a1c5fbc6c 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -74,6 +74,7 @@
74#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport 74#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
75 * over Ethernet 75 * over Ethernet
76 */ 76 */
77#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
77#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 78#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
78#define ETH_P_TIPC 0x88CA /* TIPC */ 79#define ETH_P_TIPC 0x88CA /* TIPC */
79 80
@@ -99,6 +100,7 @@
99#define ETH_P_ECONET 0x0018 /* Acorn Econet */ 100#define ETH_P_ECONET 0x0018 /* Acorn Econet */
100#define ETH_P_HDLC 0x0019 /* HDLC frames */ 101#define ETH_P_HDLC 0x0019 /* HDLC frames */
101#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ 102#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */
103#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
102 104
103/* 105/*
104 * This is an Ethernet frame header. 106 * This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 84c3492ae5cb..f9032c88716a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -79,6 +79,7 @@ enum
79 IFLA_LINKINFO, 79 IFLA_LINKINFO,
80#define IFLA_LINKINFO IFLA_LINKINFO 80#define IFLA_LINKINFO IFLA_LINKINFO
81 IFLA_NET_NS_PID, 81 IFLA_NET_NS_PID,
82 IFLA_IFALIAS,
82 __IFLA_MAX 83 __IFLA_MAX
83}; 84};
84 85
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
new file mode 100644
index 000000000000..7e989216ec17
--- /dev/null
+++ b/include/linux/if_phonet.h
@@ -0,0 +1,18 @@
1/*
2 * File: if_phonet.h
3 *
4 * Phonet interface kernel definitions
5 *
6 * Copyright (C) 2008 Nokia Corporation. All rights reserved.
7 */
8
9#define PHONET_HEADER_LEN 8 /* Phonet header length */
10
11#define PHONET_MIN_MTU 6
12/* 6 bytes header + 65535 bytes payload */
13#define PHONET_MAX_MTU 65541
14#define PHONET_DEV_MTU PHONET_MAX_MTU
15
16#ifdef __KERNEL__
17extern struct header_ops phonet_header_ops;
18#endif
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 22d2115458c6..8d3b7a9afd17 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -109,6 +109,7 @@ extern struct resource iomem_resource;
109extern int request_resource(struct resource *root, struct resource *new); 109extern int request_resource(struct resource *root, struct resource *new);
110extern int release_resource(struct resource *new); 110extern int release_resource(struct resource *new);
111extern int insert_resource(struct resource *parent, struct resource *new); 111extern int insert_resource(struct resource *parent, struct resource *new);
112extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
112extern int allocate_resource(struct resource *root, struct resource *new, 113extern int allocate_resource(struct resource *root, struct resource *new,
113 resource_size_t size, resource_size_t min, 114 resource_size_t size, resource_size_t min,
114 resource_size_t max, resource_size_t align, 115 resource_size_t max, resource_size_t align,
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index ec6eb49af2d8..0f434a28fb58 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -242,4 +242,164 @@ struct ip_vs_daemon_user {
242 int syncid; 242 int syncid;
243}; 243};
244 244
245/*
246 *
247 * IPVS Generic Netlink interface definitions
248 *
249 */
250
251/* Generic Netlink family info */
252
253#define IPVS_GENL_NAME "IPVS"
254#define IPVS_GENL_VERSION 0x1
255
256struct ip_vs_flags {
257 __be32 flags;
258 __be32 mask;
259};
260
261/* Generic Netlink command attributes */
262enum {
263 IPVS_CMD_UNSPEC = 0,
264
265 IPVS_CMD_NEW_SERVICE, /* add service */
266 IPVS_CMD_SET_SERVICE, /* modify service */
267 IPVS_CMD_DEL_SERVICE, /* delete service */
268 IPVS_CMD_GET_SERVICE, /* get service info */
269
270 IPVS_CMD_NEW_DEST, /* add destination */
271 IPVS_CMD_SET_DEST, /* modify destination */
272 IPVS_CMD_DEL_DEST, /* delete destination */
273 IPVS_CMD_GET_DEST, /* get destination info */
274
275 IPVS_CMD_NEW_DAEMON, /* start sync daemon */
276 IPVS_CMD_DEL_DAEMON, /* stop sync daemon */
277 IPVS_CMD_GET_DAEMON, /* get sync daemon status */
278
279 IPVS_CMD_SET_CONFIG, /* set config settings */
280 IPVS_CMD_GET_CONFIG, /* get config settings */
281
282 IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */
283 IPVS_CMD_GET_INFO, /* get general IPVS info */
284
285 IPVS_CMD_ZERO, /* zero all counters and stats */
286 IPVS_CMD_FLUSH, /* flush services and dests */
287
288 __IPVS_CMD_MAX,
289};
290
291#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1)
292
293/* Attributes used in the first level of commands */
294enum {
295 IPVS_CMD_ATTR_UNSPEC = 0,
296 IPVS_CMD_ATTR_SERVICE, /* nested service attribute */
297 IPVS_CMD_ATTR_DEST, /* nested destination attribute */
298 IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */
299 IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */
300 IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */
301 IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */
302 __IPVS_CMD_ATTR_MAX,
303};
304
305#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
306
307/*
308 * Attributes used to describe a service
309 *
310 * Used inside nested attribute IPVS_CMD_ATTR_SERVICE
311 */
312enum {
313 IPVS_SVC_ATTR_UNSPEC = 0,
314 IPVS_SVC_ATTR_AF, /* address family */
315 IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */
316 IPVS_SVC_ATTR_ADDR, /* virtual service address */
317 IPVS_SVC_ATTR_PORT, /* virtual service port */
318 IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */
319
320 IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */
321 IPVS_SVC_ATTR_FLAGS, /* virtual service flags */
322 IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */
323 IPVS_SVC_ATTR_NETMASK, /* persistent netmask */
324
325 IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */
326 __IPVS_SVC_ATTR_MAX,
327};
328
329#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
330
331/*
332 * Attributes used to describe a destination (real server)
333 *
334 * Used inside nested attribute IPVS_CMD_ATTR_DEST
335 */
336enum {
337 IPVS_DEST_ATTR_UNSPEC = 0,
338 IPVS_DEST_ATTR_ADDR, /* real server address */
339 IPVS_DEST_ATTR_PORT, /* real server port */
340
341 IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */
342 IPVS_DEST_ATTR_WEIGHT, /* destination weight */
343
344 IPVS_DEST_ATTR_U_THRESH, /* upper threshold */
345 IPVS_DEST_ATTR_L_THRESH, /* lower threshold */
346
347 IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */
348 IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */
349 IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
350
351 IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
352 __IPVS_DEST_ATTR_MAX,
353};
354
355#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1)
356
357/*
358 * Attributes describing a sync daemon
359 *
360 * Used inside nested attribute IPVS_CMD_ATTR_DAEMON
361 */
362enum {
363 IPVS_DAEMON_ATTR_UNSPEC = 0,
364 IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
365 IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
366 IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
367 __IPVS_DAEMON_ATTR_MAX,
368};
369
370#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1)
371
372/*
373 * Attributes used to describe service or destination entry statistics
374 *
375 * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
376 */
377enum {
378 IPVS_STATS_ATTR_UNSPEC = 0,
379 IPVS_STATS_ATTR_CONNS, /* connections scheduled */
380 IPVS_STATS_ATTR_INPKTS, /* incoming packets */
381 IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */
382 IPVS_STATS_ATTR_INBYTES, /* incoming bytes */
383 IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */
384
385 IPVS_STATS_ATTR_CPS, /* current connection rate */
386 IPVS_STATS_ATTR_INPPS, /* current in packet rate */
387 IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
388 IPVS_STATS_ATTR_INBPS, /* current in byte rate */
389 IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
390 __IPVS_STATS_ATTR_MAX,
391};
392
393#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1)
394
395/* Attributes used in response to IPVS_CMD_GET_INFO command */
396enum {
397 IPVS_INFO_ATTR_UNSPEC = 0,
398 IPVS_INFO_ATTR_VERSION, /* IPVS version number */
399 IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */
400 __IPVS_INFO_ATTR_MAX,
401};
402
403#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1)
404
245#endif /* _IP_VS_H */ 405#endif /* _IP_VS_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 8687a7dc0632..4c218ee7587a 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -157,7 +157,7 @@ typedef struct {
157 157
158typedef struct { 158typedef struct {
159 int mp_mrru; /* unused */ 159 int mp_mrru; /* unused */
160 struct sk_buff * frags; /* fragments sl list -- use skb->next */ 160 struct sk_buff_head frags; /* fragments sl list */
161 long frames; /* number of frames in the frame list */ 161 long frames; /* number of frames in the frame list */
162 unsigned int seq; /* last processed packet seq #: any packets 162 unsigned int seq; /* last processed packet seq #: any packets
163 * with smaller seq # will be dropped 163 * with smaller seq # will be dropped
diff --git a/include/linux/list.h b/include/linux/list.h
index db35ef02e745..969f6e92d089 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -619,6 +619,19 @@ static inline void hlist_add_after(struct hlist_node *n,
619 next->next->pprev = &next->next; 619 next->next->pprev = &next->next;
620} 620}
621 621
622/*
623 * Move a list from one list head to another. Fixup the pprev
624 * reference of the first entry if it exists.
625 */
626static inline void hlist_move_list(struct hlist_head *old,
627 struct hlist_head *new)
628{
629 new->first = old->first;
630 if (new->first)
631 new->first->pprev = &new->first;
632 old->first = NULL;
633}
634
622#define hlist_entry(ptr, type, member) container_of(ptr,type,member) 635#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
623 636
624#define hlist_for_each(pos, head) \ 637#define hlist_for_each(pos, head) \
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 07112ee9293a..8a455694d682 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -6,7 +6,6 @@
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7#include <linux/in.h> 7#include <linux/in.h>
8#endif 8#endif
9#include <linux/pim.h>
10 9
11/* 10/*
12 * Based on the MROUTING 3.5 defines primarily to keep 11 * Based on the MROUTING 3.5 defines primarily to keep
@@ -130,6 +129,7 @@ struct igmpmsg
130 */ 129 */
131 130
132#ifdef __KERNEL__ 131#ifdef __KERNEL__
132#include <linux/pim.h>
133#include <net/sock.h> 133#include <net/sock.h>
134 134
135#ifdef CONFIG_IP_MROUTE 135#ifdef CONFIG_IP_MROUTE
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 5cf50473a10f..6f4c180179e2 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -115,6 +115,7 @@ struct sioc_mif_req6
115 115
116#ifdef __KERNEL__ 116#ifdef __KERNEL__
117 117
118#include <linux/pim.h>
118#include <linux/skbuff.h> /* for struct sk_buff_head */ 119#include <linux/skbuff.h> /* for struct sk_buff_head */
119 120
120#ifdef CONFIG_IPV6_MROUTE 121#ifdef CONFIG_IPV6_MROUTE
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 488c56e649b5..d675df08b946 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -471,6 +471,8 @@ struct net_device
471 char name[IFNAMSIZ]; 471 char name[IFNAMSIZ];
472 /* device name hash chain */ 472 /* device name hash chain */
473 struct hlist_node name_hlist; 473 struct hlist_node name_hlist;
474 /* snmp alias */
475 char *ifalias;
474 476
475 /* 477 /*
476 * I/O specific fields 478 * I/O specific fields
@@ -1224,6 +1226,7 @@ extern int dev_ethtool(struct net *net, struct ifreq *);
1224extern unsigned dev_get_flags(const struct net_device *); 1226extern unsigned dev_get_flags(const struct net_device *);
1225extern int dev_change_flags(struct net_device *, unsigned); 1227extern int dev_change_flags(struct net_device *, unsigned);
1226extern int dev_change_name(struct net_device *, char *); 1228extern int dev_change_name(struct net_device *, char *);
1229extern int dev_set_alias(struct net_device *, const char *, size_t);
1227extern int dev_change_net_namespace(struct net_device *, 1230extern int dev_change_net_namespace(struct net_device *,
1228 struct net *, const char *); 1231 struct net *, const char *);
1229extern int dev_set_mtu(struct net_device *, int); 1232extern int dev_set_mtu(struct net_device *, int);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9ff1b54908f3..cbba7760545b 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -220,7 +220,7 @@ struct netlink_callback
220 int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); 220 int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
221 int (*done)(struct netlink_callback *cb); 221 int (*done)(struct netlink_callback *cb);
222 int family; 222 int family;
223 long args[6]; 223 long args[7];
224}; 224};
225 225
226struct netlink_notify 226struct netlink_notify
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2be7c63bc0f2..9bad65400fba 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -89,6 +89,22 @@
89 * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC 89 * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
90 * or, if no MAC address given, all mesh paths, on the interface identified 90 * or, if no MAC address given, all mesh paths, on the interface identified
91 * by %NL80211_ATTR_IFINDEX. 91 * by %NL80211_ATTR_IFINDEX.
92 * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
93 * %NL80211_ATTR_IFINDEX.
94 *
95 * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
96 * after being queried by the kernel. CRDA replies by sending a regulatory
97 * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
98 * current alpha2 if it found a match. It also provides
99 * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each
100 * regulatory rule is a nested set of attributes given by
101 * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and
102 * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by
103 * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
104 * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
105 * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
106 * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
107 * store this as a valid request and then query userspace for it.
92 * 108 *
93 * @NL80211_CMD_MAX: highest used command number 109 * @NL80211_CMD_MAX: highest used command number
94 * @__NL80211_CMD_AFTER_LAST: internal use 110 * @__NL80211_CMD_AFTER_LAST: internal use
@@ -127,13 +143,23 @@ enum nl80211_commands {
127 NL80211_CMD_NEW_MPATH, 143 NL80211_CMD_NEW_MPATH,
128 NL80211_CMD_DEL_MPATH, 144 NL80211_CMD_DEL_MPATH,
129 145
130 /* add commands here */ 146 NL80211_CMD_SET_BSS,
147
148 NL80211_CMD_SET_REG,
149 NL80211_CMD_REQ_SET_REG,
150
151 /* add new commands above here */
131 152
132 /* used to define NL80211_CMD_MAX below */ 153 /* used to define NL80211_CMD_MAX below */
133 __NL80211_CMD_AFTER_LAST, 154 __NL80211_CMD_AFTER_LAST,
134 NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 155 NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
135}; 156};
136 157
158/*
159 * Allow user space programs to use #ifdef on new commands by defining them
160 * here
161 */
162#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
137 163
138/** 164/**
139 * enum nl80211_attrs - nl80211 netlink attributes 165 * enum nl80211_attrs - nl80211 netlink attributes
@@ -188,10 +214,34 @@ enum nl80211_commands {
188 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at 214 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at
189 * &enum nl80211_mpath_info. 215 * &enum nl80211_mpath_info.
190 * 216 *
191 *
192 * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of 217 * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of
193 * &enum nl80211_mntr_flags. 218 * &enum nl80211_mntr_flags.
194 * 219 *
220 * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the
221 * current regulatory domain should be set to or is already set to.
222 * For example, 'CR', for Costa Rica. This attribute is used by the kernel
223 * to query the CRDA to retrieve one regulatory domain. This attribute can
224 * also be used by userspace to query the kernel for the currently set
225 * regulatory domain. We chose an alpha2 as that is also used by the
226 * IEEE-802.11d country information element to identify a country.
227 * Users can also simply ask the wireless core to set regulatory domain
228 * to a specific alpha2.
229 * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory
230 * rules.
231 *
232 * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1)
233 * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled
234 * (u8, 0 or 1)
235 * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
236 * (u8, 0 or 1)
237 *
238 * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
239 * association request when used with NL80211_CMD_NEW_STATION)
240 *
241 * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all
242 * supported interface types, each a flag attribute with the number
243 * of the interface mode.
244 *
195 * @NL80211_ATTR_MAX: highest attribute number currently defined 245 * @NL80211_ATTR_MAX: highest attribute number currently defined
196 * @__NL80211_ATTR_AFTER_LAST: internal use 246 * @__NL80211_ATTR_AFTER_LAST: internal use
197 */ 247 */
@@ -235,16 +285,35 @@ enum nl80211_attrs {
235 NL80211_ATTR_MPATH_NEXT_HOP, 285 NL80211_ATTR_MPATH_NEXT_HOP,
236 NL80211_ATTR_MPATH_INFO, 286 NL80211_ATTR_MPATH_INFO,
237 287
288 NL80211_ATTR_BSS_CTS_PROT,
289 NL80211_ATTR_BSS_SHORT_PREAMBLE,
290 NL80211_ATTR_BSS_SHORT_SLOT_TIME,
291
292 NL80211_ATTR_HT_CAPABILITY,
293
294 NL80211_ATTR_SUPPORTED_IFTYPES,
295
296 NL80211_ATTR_REG_ALPHA2,
297 NL80211_ATTR_REG_RULES,
298
238 /* add attributes here, update the policy in nl80211.c */ 299 /* add attributes here, update the policy in nl80211.c */
239 300
240 __NL80211_ATTR_AFTER_LAST, 301 __NL80211_ATTR_AFTER_LAST,
241 NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 302 NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
242}; 303};
243 304
305/*
306 * Allow user space programs to use #ifdef on new attributes by defining them
307 * here
308 */
309#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
310
244#define NL80211_MAX_SUPP_RATES 32 311#define NL80211_MAX_SUPP_RATES 32
312#define NL80211_MAX_SUPP_REG_RULES 32
245#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 313#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
246#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 314#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
247#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 315#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
316#define NL80211_HT_CAPABILITY_LEN 26
248 317
249/** 318/**
250 * enum nl80211_iftype - (virtual) interface types 319 * enum nl80211_iftype - (virtual) interface types
@@ -436,6 +505,66 @@ enum nl80211_bitrate_attr {
436}; 505};
437 506
438/** 507/**
508 * enum nl80211_reg_rule_attr - regulatory rule attributes
509 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
510 * considerations for a given frequency range. These are the
511 * &enum nl80211_reg_rule_flags.
512 * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory
513 * rule in KHz. This is not a center of frequency but an actual regulatory
514 * band edge.
515 * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule
516 * in KHz. This is not a center a frequency but an actual regulatory
517 * band edge.
518 * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
519 * frequency range, in KHz.
520 * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
521 * for a given frequency range. The value is in mBi (100 * dBi).
522 * If you don't have one then don't send this.
523 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
524 * a given frequency range. The value is in mBm (100 * dBm).
525 */
526enum nl80211_reg_rule_attr {
527 __NL80211_REG_RULE_ATTR_INVALID,
528 NL80211_ATTR_REG_RULE_FLAGS,
529
530 NL80211_ATTR_FREQ_RANGE_START,
531 NL80211_ATTR_FREQ_RANGE_END,
532 NL80211_ATTR_FREQ_RANGE_MAX_BW,
533
534 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
535 NL80211_ATTR_POWER_RULE_MAX_EIRP,
536
537 /* keep last */
538 __NL80211_REG_RULE_ATTR_AFTER_LAST,
539 NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
540};
541
542/**
543 * enum nl80211_reg_rule_flags - regulatory rule flags
544 *
545 * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed
546 * @NL80211_RRF_NO_CCK: CCK modulation not allowed
547 * @NL80211_RRF_NO_INDOOR: indoor operation not allowed
548 * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed
549 * @NL80211_RRF_DFS: DFS support is required to be used
550 * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links
551 * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links
552 * @NL80211_RRF_PASSIVE_SCAN: passive scan is required
553 * @NL80211_RRF_NO_IBSS: no IBSS is allowed
554 */
555enum nl80211_reg_rule_flags {
556 NL80211_RRF_NO_OFDM = 1<<0,
557 NL80211_RRF_NO_CCK = 1<<1,
558 NL80211_RRF_NO_INDOOR = 1<<2,
559 NL80211_RRF_NO_OUTDOOR = 1<<3,
560 NL80211_RRF_DFS = 1<<4,
561 NL80211_RRF_PTP_ONLY = 1<<5,
562 NL80211_RRF_PTMP_ONLY = 1<<6,
563 NL80211_RRF_PASSIVE_SCAN = 1<<7,
564 NL80211_RRF_NO_IBSS = 1<<8,
565};
566
567/**
439 * enum nl80211_mntr_flags - monitor configuration flags 568 * enum nl80211_mntr_flags - monitor configuration flags
440 * 569 *
441 * Monitor configuration flags. 570 * Monitor configuration flags.
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f1624b396754..a65b082a888a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1411,6 +1411,8 @@
1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
1413 1413
1414#define PCI_VENDOR_ID_CISCO 0x1137
1415
1414#define PCI_VENDOR_ID_ZIATECH 0x1138 1416#define PCI_VENDOR_ID_ZIATECH 0x1138
1415#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 1417#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
1416 1418
@@ -2213,6 +2215,7 @@
2213 2215
2214#define PCI_VENDOR_ID_ATTANSIC 0x1969 2216#define PCI_VENDOR_ID_ATTANSIC 0x1969
2215#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 2217#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048
2218#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048
2216 2219
2217#define PCI_VENDOR_ID_JMICRON 0x197B 2220#define PCI_VENDOR_ID_JMICRON 0x197B
2218#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 2221#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
@@ -2244,6 +2247,16 @@
2244#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 2247#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
2245#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 2248#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
2246 2249
2250#define PCI_VENDOR_ID_NETXEN 0x4040
2251#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001
2252#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002
2253#define PCI_DEVICE_ID_NX2031_4GCU 0x0003
2254#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004
2255#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005
2256#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024
2257#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025
2258#define PCI_DEVICE_ID_NX3031 0x0100
2259
2247#define PCI_VENDOR_ID_AKS 0x416c 2260#define PCI_VENDOR_ID_AKS 0x416c
2248#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 2261#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
2249 2262
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
new file mode 100644
index 000000000000..3a027f588a4a
--- /dev/null
+++ b/include/linux/phonet.h
@@ -0,0 +1,160 @@
1/**
2 * file phonet.h
3 *
4 * Phonet sockets kernel interface
5 *
6 * Copyright (C) 2008 Nokia Corporation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef LINUX_PHONET_H
24#define LINUX_PHONET_H
25
26/* Automatic protocol selection */
27#define PN_PROTO_TRANSPORT 0
28/* Phonet datagram socket */
29#define PN_PROTO_PHONET 1
30#define PHONET_NPROTO 2
31
32#define PNADDR_ANY 0
33#define PNPORT_RESOURCE_ROUTING 0
34
35/* ioctls */
36#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
37
38/* Phonet protocol header */
39struct phonethdr {
40 __u8 pn_rdev;
41 __u8 pn_sdev;
42 __u8 pn_res;
43 __be16 pn_length;
44 __u8 pn_robj;
45 __u8 pn_sobj;
46} __attribute__((packed));
47
48/* Common Phonet payload header */
49struct phonetmsg {
50 __u8 pn_trans_id; /* transaction ID */
51 __u8 pn_msg_id; /* message type */
52 union {
53 struct {
54 __u8 pn_submsg_id; /* message subtype */
55 __u8 pn_data[5];
56 } base;
57 struct {
58 __u16 pn_e_res_id; /* extended resource ID */
59 __u8 pn_e_submsg_id; /* message subtype */
60 __u8 pn_e_data[3];
61 } ext;
62 } pn_msg_u;
63};
64#define PN_COMMON_MESSAGE 0xF0
65#define PN_PREFIX 0xE0 /* resource for extended messages */
66#define pn_submsg_id pn_msg_u.base.pn_submsg_id
67#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id
68#define pn_e_res_id pn_msg_u.ext.pn_e_res_id
69#define pn_data pn_msg_u.base.pn_data
70#define pn_e_data pn_msg_u.ext.pn_e_data
71
72/* data for unreachable errors */
73#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01
74#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14
75#define pn_orig_msg_id pn_data[0]
76#define pn_status pn_data[1]
77#define pn_e_orig_msg_id pn_e_data[0]
78#define pn_e_status pn_e_data[1]
79
80/* Phonet socket address structure */
81struct sockaddr_pn {
82 sa_family_t spn_family;
83 __u8 spn_obj;
84 __u8 spn_dev;
85 __u8 spn_resource;
86 __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
87} __attribute__ ((packed));
88
89static inline __u16 pn_object(__u8 addr, __u16 port)
90{
91 return (addr << 8) | (port & 0x3ff);
92}
93
94static inline __u8 pn_obj(__u16 handle)
95{
96 return handle & 0xff;
97}
98
99static inline __u8 pn_dev(__u16 handle)
100{
101 return handle >> 8;
102}
103
104static inline __u16 pn_port(__u16 handle)
105{
106 return handle & 0x3ff;
107}
108
109static inline __u8 pn_addr(__u16 handle)
110{
111 return (handle >> 8) & 0xfc;
112}
113
114static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr)
115{
116 spn->spn_dev &= 0x03;
117 spn->spn_dev |= addr & 0xfc;
118}
119
120static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port)
121{
122 spn->spn_dev &= 0xfc;
123 spn->spn_dev |= (port >> 8) & 0x03;
124 spn->spn_obj = port & 0xff;
125}
126
127static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn,
128 __u16 handle)
129{
130 spn->spn_dev = pn_dev(handle);
131 spn->spn_obj = pn_obj(handle);
132}
133
134static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn,
135 __u8 resource)
136{
137 spn->spn_resource = resource;
138}
139
140static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn)
141{
142 return spn->spn_dev & 0xfc;
143}
144
145static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn)
146{
147 return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj;
148}
149
150static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn)
151{
152 return pn_object(spn->spn_dev, spn->spn_obj);
153}
154
155static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn)
156{
157 return spn->spn_resource;
158}
159
160#endif
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 236ffd317394..1ba0661561a4 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -3,22 +3,6 @@
3 3
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6#ifndef __KERNEL__
7struct pim {
8#if defined(__LITTLE_ENDIAN_BITFIELD)
9 __u8 pim_type:4, /* PIM message type */
10 pim_ver:4; /* PIM version */
11#elif defined(__BIG_ENDIAN_BITFIELD)
12 __u8 pim_ver:4; /* PIM version */
13 pim_type:4; /* PIM message type */
14#endif
15 __u8 pim_rsv; /* Reserved */
16 __be16 pim_cksum; /* Checksum */
17};
18
19#define PIM_MINLEN 8
20#endif
21
22/* Message types - V1 */ 6/* Message types - V1 */
23#define PIM_V1_VERSION __constant_htonl(0x10000000) 7#define PIM_V1_VERSION __constant_htonl(0x10000000)
24#define PIM_V1_REGISTER 1 8#define PIM_V1_REGISTER 1
@@ -27,7 +11,6 @@ struct pim {
27#define PIM_VERSION 2 11#define PIM_VERSION 2
28#define PIM_REGISTER 1 12#define PIM_REGISTER 1
29 13
30#if defined(__KERNEL__)
31#define PIM_NULL_REGISTER __constant_htonl(0x40000000) 14#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
32 15
33/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ 16/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
@@ -42,4 +25,3 @@ struct pimreghdr
42struct sk_buff; 25struct sk_buff;
43extern int pim_rcv_v1(struct sk_buff *); 26extern int pim_rcv_v1(struct sk_buff *);
44#endif 27#endif
45#endif
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index e5de421ac7b4..5d921fa91a5b 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -123,6 +123,13 @@ struct tc_prio_qopt
123 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ 123 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
124}; 124};
125 125
126/* MULTIQ section */
127
128struct tc_multiq_qopt {
129 __u16 bands; /* Number of bands */
130 __u16 max_bands; /* Maximum number of queues */
131};
132
126/* TBF section */ 133/* TBF section */
127 134
128struct tc_tbf_qopt 135struct tc_tbf_qopt
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
index 39b66713a0bb..bd466439c588 100644
--- a/include/linux/quicklist.h
+++ b/include/linux/quicklist.h
@@ -80,6 +80,13 @@ void quicklist_trim(int nr, void (*dtor)(void *),
80 80
81unsigned long quicklist_total_size(void); 81unsigned long quicklist_total_size(void);
82 82
83#else
84
85static inline unsigned long quicklist_total_size(void)
86{
87 return 0;
88}
89
83#endif 90#endif
84 91
85#endif /* LINUX_QUICKLIST_H */ 92#endif /* LINUX_QUICKLIST_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fdeadd9740dc..271c1c2c9f6f 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -166,7 +166,7 @@ static inline int res_counter_set_limit(struct res_counter *cnt,
166 int ret = -EBUSY; 166 int ret = -EBUSY;
167 167
168 spin_lock_irqsave(&cnt->lock, flags); 168 spin_lock_irqsave(&cnt->lock, flags);
169 if (cnt->usage < limit) { 169 if (cnt->usage <= limit) {
170 cnt->limit = limit; 170 cnt->limit = limit;
171 ret = 0; 171 ret = 0;
172 } 172 }
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 741d1a62cc3f..4cd64b0d9825 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -49,6 +49,7 @@ enum rfkill_state {
49 RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ 49 RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */
50 RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ 50 RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */
51 RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ 51 RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */
52 RFKILL_STATE_MAX, /* marker for last valid state */
52}; 53};
53 54
54/* 55/*
@@ -110,12 +111,14 @@ struct rfkill {
110}; 111};
111#define to_rfkill(d) container_of(d, struct rfkill, dev) 112#define to_rfkill(d) container_of(d, struct rfkill, dev)
112 113
113struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type); 114struct rfkill * __must_check rfkill_allocate(struct device *parent,
115 enum rfkill_type type);
114void rfkill_free(struct rfkill *rfkill); 116void rfkill_free(struct rfkill *rfkill);
115int rfkill_register(struct rfkill *rfkill); 117int __must_check rfkill_register(struct rfkill *rfkill);
116void rfkill_unregister(struct rfkill *rfkill); 118void rfkill_unregister(struct rfkill *rfkill);
117 119
118int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); 120int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
121int rfkill_set_default(enum rfkill_type type, enum rfkill_state state);
119 122
120/** 123/**
121 * rfkill_state_complement - return complementar state 124 * rfkill_state_complement - return complementar state
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index ca643b13b026..2b3d51c6ec9c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -582,6 +582,10 @@ enum rtnetlink_groups {
582#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE 582#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE
583 RTNLGRP_ND_USEROPT, 583 RTNLGRP_ND_USEROPT,
584#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT 584#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT
585 RTNLGRP_PHONET_IFADDR,
586#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR
587 RTNLGRP_PHONET_ROUTE,
588#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE
585 __RTNLGRP_MAX 589 __RTNLGRP_MAX
586}; 590};
587#define RTNLGRP_MAX (__RTNLGRP_MAX - 1) 591#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cfb0d87b99fc..3d9120c5ad15 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1475,6 +1475,10 @@ static inline void put_task_struct(struct task_struct *t)
1475 __put_task_struct(t); 1475 __put_task_struct(t);
1476} 1476}
1477 1477
1478extern cputime_t task_utime(struct task_struct *p);
1479extern cputime_t task_stime(struct task_struct *p);
1480extern cputime_t task_gtime(struct task_struct *p);
1481
1478/* 1482/*
1479 * Per process flags 1483 * Per process flags
1480 */ 1484 */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 909923717830..a19ea43fea02 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -146,8 +146,14 @@ struct skb_shared_info {
146 unsigned short gso_segs; 146 unsigned short gso_segs;
147 unsigned short gso_type; 147 unsigned short gso_type;
148 __be32 ip6_frag_id; 148 __be32 ip6_frag_id;
149#ifdef CONFIG_HAS_DMA
150 unsigned int num_dma_maps;
151#endif
149 struct sk_buff *frag_list; 152 struct sk_buff *frag_list;
150 skb_frag_t frags[MAX_SKB_FRAGS]; 153 skb_frag_t frags[MAX_SKB_FRAGS];
154#ifdef CONFIG_HAS_DMA
155 dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
156#endif
151}; 157};
152 158
153/* We divide dataref into two halves. The higher 16 bits hold references 159/* We divide dataref into two halves. The higher 16 bits hold references
@@ -353,6 +359,14 @@ struct sk_buff {
353 359
354#include <asm/system.h> 360#include <asm/system.h>
355 361
362#ifdef CONFIG_HAS_DMA
363#include <linux/dma-mapping.h>
364extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
365 enum dma_data_direction dir);
366extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
367 enum dma_data_direction dir);
368#endif
369
356extern void kfree_skb(struct sk_buff *skb); 370extern void kfree_skb(struct sk_buff *skb);
357extern void __kfree_skb(struct sk_buff *skb); 371extern void __kfree_skb(struct sk_buff *skb);
358extern struct sk_buff *__alloc_skb(unsigned int size, 372extern struct sk_buff *__alloc_skb(unsigned int size,
@@ -459,6 +473,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
459} 473}
460 474
461/** 475/**
476 * skb_queue_is_last - check if skb is the last entry in the queue
477 * @list: queue head
478 * @skb: buffer
479 *
480 * Returns true if @skb is the last buffer on the list.
481 */
482static inline bool skb_queue_is_last(const struct sk_buff_head *list,
483 const struct sk_buff *skb)
484{
485 return (skb->next == (struct sk_buff *) list);
486}
487
488/**
489 * skb_queue_next - return the next packet in the queue
490 * @list: queue head
491 * @skb: current buffer
492 *
493 * Return the next packet in @list after @skb. It is only valid to
494 * call this if skb_queue_is_last() evaluates to false.
495 */
496static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
497 const struct sk_buff *skb)
498{
499 /* This BUG_ON may seem severe, but if we just return then we
500 * are going to dereference garbage.
501 */
502 BUG_ON(skb_queue_is_last(list, skb));
503 return skb->next;
504}
505
506/**
462 * skb_get - reference buffer 507 * skb_get - reference buffer
463 * @skb: buffer to reference 508 * @skb: buffer to reference
464 * 509 *
@@ -646,6 +691,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
646 return list_->qlen; 691 return list_->qlen;
647} 692}
648 693
694/**
695 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
696 * @list: queue to initialize
697 *
698 * This initializes only the list and queue length aspects of
699 * an sk_buff_head object. This allows to initialize the list
700 * aspects of an sk_buff_head without reinitializing things like
701 * the spinlock. It can also be used for on-stack sk_buff_head
702 * objects where the spinlock is known to not be used.
703 */
704static inline void __skb_queue_head_init(struct sk_buff_head *list)
705{
706 list->prev = list->next = (struct sk_buff *)list;
707 list->qlen = 0;
708}
709
649/* 710/*
650 * This function creates a split out lock class for each invocation; 711 * This function creates a split out lock class for each invocation;
651 * this is needed for now since a whole lot of users of the skb-queue 712 * this is needed for now since a whole lot of users of the skb-queue
@@ -657,8 +718,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
657static inline void skb_queue_head_init(struct sk_buff_head *list) 718static inline void skb_queue_head_init(struct sk_buff_head *list)
658{ 719{
659 spin_lock_init(&list->lock); 720 spin_lock_init(&list->lock);
660 list->prev = list->next = (struct sk_buff *)list; 721 __skb_queue_head_init(list);
661 list->qlen = 0;
662} 722}
663 723
664static inline void skb_queue_head_init_class(struct sk_buff_head *list, 724static inline void skb_queue_head_init_class(struct sk_buff_head *list,
@@ -685,6 +745,83 @@ static inline void __skb_insert(struct sk_buff *newsk,
685 list->qlen++; 745 list->qlen++;
686} 746}
687 747
748static inline void __skb_queue_splice(const struct sk_buff_head *list,
749 struct sk_buff *prev,
750 struct sk_buff *next)
751{
752 struct sk_buff *first = list->next;
753 struct sk_buff *last = list->prev;
754
755 first->prev = prev;
756 prev->next = first;
757
758 last->next = next;
759 next->prev = last;
760}
761
762/**
763 * skb_queue_splice - join two skb lists, this is designed for stacks
764 * @list: the new list to add
765 * @head: the place to add it in the first list
766 */
767static inline void skb_queue_splice(const struct sk_buff_head *list,
768 struct sk_buff_head *head)
769{
770 if (!skb_queue_empty(list)) {
771 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
772 head->qlen += list->qlen;
773 }
774}
775
776/**
777 * skb_queue_splice - join two skb lists and reinitialise the emptied list
778 * @list: the new list to add
779 * @head: the place to add it in the first list
780 *
781 * The list at @list is reinitialised
782 */
783static inline void skb_queue_splice_init(struct sk_buff_head *list,
784 struct sk_buff_head *head)
785{
786 if (!skb_queue_empty(list)) {
787 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
788 head->qlen += list->qlen;
789 __skb_queue_head_init(list);
790 }
791}
792
793/**
794 * skb_queue_splice_tail - join two skb lists, each list being a queue
795 * @list: the new list to add
796 * @head: the place to add it in the first list
797 */
798static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
799 struct sk_buff_head *head)
800{
801 if (!skb_queue_empty(list)) {
802 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
803 head->qlen += list->qlen;
804 }
805}
806
807/**
808 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
809 * @list: the new list to add
810 * @head: the place to add it in the first list
811 *
812 * Each of the lists is a queue.
813 * The list at @list is reinitialised
814 */
815static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
816 struct sk_buff_head *head)
817{
818 if (!skb_queue_empty(list)) {
819 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
820 head->qlen += list->qlen;
821 __skb_queue_head_init(list);
822 }
823}
824
688/** 825/**
689 * __skb_queue_after - queue a buffer at the list head 826 * __skb_queue_after - queue a buffer at the list head
690 * @list: list to use 827 * @list: list to use
@@ -1434,6 +1571,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1434 skb != (struct sk_buff *)(queue); \ 1571 skb != (struct sk_buff *)(queue); \
1435 skb = tmp, tmp = skb->next) 1572 skb = tmp, tmp = skb->next)
1436 1573
1574#define skb_queue_walk_from(queue, skb) \
1575 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1576 skb = skb->next)
1577
1578#define skb_queue_walk_from_safe(queue, skb, tmp) \
1579 for (tmp = skb->next; \
1580 skb != (struct sk_buff *)(queue); \
1581 skb = tmp, tmp = skb->next)
1582
1437#define skb_queue_reverse_walk(queue, skb) \ 1583#define skb_queue_reverse_walk(queue, skb) \
1438 for (skb = (queue)->prev; \ 1584 for (skb = (queue)->prev; \
1439 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1585 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
diff --git a/include/linux/socket.h b/include/linux/socket.h
index dc5086fe7736..818ca33bf79f 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -190,7 +190,8 @@ struct ucred {
190#define AF_IUCV 32 /* IUCV sockets */ 190#define AF_IUCV 32 /* IUCV sockets */
191#define AF_RXRPC 33 /* RxRPC sockets */ 191#define AF_RXRPC 33 /* RxRPC sockets */
192#define AF_ISDN 34 /* mISDN sockets */ 192#define AF_ISDN 34 /* mISDN sockets */
193#define AF_MAX 35 /* For now.. */ 193#define AF_PHONET 35 /* Phonet sockets */
194#define AF_MAX 36 /* For now.. */
194 195
195/* Protocol families, same as address families. */ 196/* Protocol families, same as address families. */
196#define PF_UNSPEC AF_UNSPEC 197#define PF_UNSPEC AF_UNSPEC
@@ -227,6 +228,7 @@ struct ucred {
227#define PF_IUCV AF_IUCV 228#define PF_IUCV AF_IUCV
228#define PF_RXRPC AF_RXRPC 229#define PF_RXRPC AF_RXRPC
229#define PF_ISDN AF_ISDN 230#define PF_ISDN AF_ISDN
231#define PF_PHONET AF_PHONET
230#define PF_MAX AF_MAX 232#define PF_MAX AF_MAX
231 233
232/* Maximum queue length specifiable by listen. */ 234/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index ebad0bac9801..99a0f991e850 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -245,8 +245,6 @@
245 245
246/* SPROM Revision 3 (inherits most data from rev 2) */ 246/* SPROM Revision 3 (inherits most data from rev 2) */
247#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ 247#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */
248#define SSB_SPROM3_ET0MAC 0x1050 /* 6 bytes MAC address for Ethernet ?? */
249#define SSB_SPROM3_ET1MAC 0x1050 /* 6 bytes MAC address for 802.11a ?? */
250#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ 248#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
251#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ 249#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
252#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ 250#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
@@ -267,8 +265,6 @@
267 265
268/* SPROM Revision 4 */ 266/* SPROM Revision 4 */
269#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ 267#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */
270#define SSB_SPROM4_ET0MAC 0x1018 /* 6 bytes MAC address for Ethernet ?? */
271#define SSB_SPROM4_ET1MAC 0x1018 /* 6 bytes MAC address for 802.11a ?? */
272#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ 268#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */
273#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ 269#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
274#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ 270#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
@@ -316,6 +312,21 @@
316#define SSB_SPROM4_PA1B1 0x1090 312#define SSB_SPROM4_PA1B1 0x1090
317#define SSB_SPROM4_PA1B2 0x1092 313#define SSB_SPROM4_PA1B2 0x1092
318 314
315/* SPROM Revision 5 (inherits most data from rev 4) */
316#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */
317#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */
318#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */
319#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */
320#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */
321#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
322#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */
323#define SSB_SPROM5_GPIOA_P1_SHIFT 8
324#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */
325#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */
326#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
327#define SSB_SPROM5_GPIOB_P3_SHIFT 8
328
329
319/* Values for SSB_SPROM1_BINF_CCODE */ 330/* Values for SSB_SPROM1_BINF_CCODE */
320enum { 331enum {
321 SSB_SPROM1CCODE_WORLD = 0, 332 SSB_SPROM1CCODE_WORLD = 0,
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index ef2e3a20bf3b..dc05b54bd3a3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -143,7 +143,6 @@ struct svcxprt_rdma {
143 unsigned long sc_flags; 143 unsigned long sc_flags;
144 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */ 144 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
145 struct list_head sc_read_complete_q; 145 struct list_head sc_read_complete_q;
146 spinlock_t sc_read_complete_lock;
147 struct work_struct sc_work; 146 struct work_struct sc_work;
148}; 147};
149/* sc_flags */ 148/* sc_flags */
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
index 6dac0d7365cc..76990937f4c9 100644
--- a/include/linux/tc_act/Kbuild
+++ b/include/linux/tc_act/Kbuild
@@ -3,3 +3,4 @@ header-y += tc_ipt.h
3header-y += tc_mirred.h 3header-y += tc_mirred.h
4header-y += tc_pedit.h 4header-y += tc_pedit.h
5header-y += tc_nat.h 5header-y += tc_nat.h
6header-y += tc_skbedit.h
diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h
new file mode 100644
index 000000000000..a14e461a7af7
--- /dev/null
+++ b/include/linux/tc_act/tc_skbedit.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#ifndef __LINUX_TC_SKBEDIT_H
21#define __LINUX_TC_SKBEDIT_H
22
23#include <linux/pkt_cls.h>
24
25#define TCA_ACT_SKBEDIT 11
26
27#define SKBEDIT_F_PRIORITY 0x1
28#define SKBEDIT_F_QUEUE_MAPPING 0x2
29
30struct tc_skbedit {
31 tc_gen;
32};
33
34enum {
35 TCA_SKBEDIT_UNSPEC,
36 TCA_SKBEDIT_TM,
37 TCA_SKBEDIT_PARMS,
38 TCA_SKBEDIT_PRIORITY,
39 TCA_SKBEDIT_QUEUE_MAPPING,
40 __TCA_SKBEDIT_MAX
41};
42#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
43
44#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2e2557388e36..767290628292 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -342,7 +342,6 @@ struct tcp_sock {
342 struct sk_buff* lost_skb_hint; 342 struct sk_buff* lost_skb_hint;
343 struct sk_buff *scoreboard_skb_hint; 343 struct sk_buff *scoreboard_skb_hint;
344 struct sk_buff *retransmit_skb_hint; 344 struct sk_buff *retransmit_skb_hint;
345 struct sk_buff *forward_skb_hint;
346 345
347 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 346 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
348 347
@@ -358,7 +357,7 @@ struct tcp_sock {
358 */ 357 */
359 358
360 int lost_cnt_hint; 359 int lost_cnt_hint;
361 int retransmit_cnt_hint; 360 u32 retransmit_high; /* L-bits may be on up to this seqno */
362 361
363 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ 362 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
364 363
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index b48d81969574..6186a789d6c7 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -272,7 +272,7 @@ static inline void tracehook_finish_clone(struct task_struct *child,
272 * tracehook_report_clone_complete(). This must prevent the child from 272 * tracehook_report_clone_complete(). This must prevent the child from
273 * self-reaping if tracehook_report_clone_complete() uses the @child 273 * self-reaping if tracehook_report_clone_complete() uses the @child
274 * pointer; otherwise it might have died and been released by the time 274 * pointer; otherwise it might have died and been released by the time
275 * tracehook_report_report_clone_complete() is called. 275 * tracehook_report_clone_complete() is called.
276 * 276 *
277 * Called with no locks held, but the child cannot run until this returns. 277 * Called with no locks held, but the child cannot run until this returns.
278 */ 278 */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index e65a6bed4e3e..303d93ffd6b2 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -334,6 +334,8 @@ struct v4l2_pix_format {
334#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ 334#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
335#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ 335#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
336#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ 336#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
337#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
338#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
337 339
338/* 340/*
339 * F O R M A T E N U M E R A T I O N 341 * F O R M A T E N U M E R A T I O N
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index cbf751094688..46a43b721dd6 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -325,7 +325,8 @@ int hci_conn_del(struct hci_conn *conn);
325void hci_conn_hash_flush(struct hci_dev *hdev); 325void hci_conn_hash_flush(struct hci_dev *hdev);
326void hci_conn_check_pending(struct hci_dev *hdev); 326void hci_conn_check_pending(struct hci_dev *hdev);
327 327
328struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src); 328struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type);
329int hci_conn_check_link_mode(struct hci_conn *conn);
329int hci_conn_auth(struct hci_conn *conn); 330int hci_conn_auth(struct hci_conn *conn);
330int hci_conn_encrypt(struct hci_conn *conn); 331int hci_conn_encrypt(struct hci_conn *conn);
331int hci_conn_change_link_key(struct hci_conn *conn); 332int hci_conn_change_link_key(struct hci_conn *conn);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e00750836ba5..9f40c4d417d7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -152,6 +152,7 @@ struct station_parameters {
152 u16 aid; 152 u16 aid;
153 u8 supported_rates_len; 153 u8 supported_rates_len;
154 u8 plink_action; 154 u8 plink_action;
155 struct ieee80211_ht_cap *ht_capa;
155}; 156};
156 157
157/** 158/**
@@ -268,6 +269,83 @@ struct mpath_info {
268 u8 flags; 269 u8 flags;
269}; 270};
270 271
272/**
273 * struct bss_parameters - BSS parameters
274 *
275 * Used to change BSS parameters (mainly for AP mode).
276 *
277 * @use_cts_prot: Whether to use CTS protection
278 * (0 = no, 1 = yes, -1 = do not change)
279 * @use_short_preamble: Whether the use of short preambles is allowed
280 * (0 = no, 1 = yes, -1 = do not change)
281 * @use_short_slot_time: Whether the use of short slot time is allowed
282 * (0 = no, 1 = yes, -1 = do not change)
283 */
284struct bss_parameters {
285 int use_cts_prot;
286 int use_short_preamble;
287 int use_short_slot_time;
288};
289
290/**
291 * enum reg_set_by - Indicates who is trying to set the regulatory domain
292 * @REGDOM_SET_BY_INIT: regulatory domain was set by initialization. We will be
293 * using a static world regulatory domain by default.
294 * @REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world regulatory domain.
295 * @REGDOM_SET_BY_USER: User asked the wireless core to set the
296 * regulatory domain.
297 * @REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the wireless core
298 * it thinks its knows the regulatory domain we should be in.
299 * @REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an 802.11 country
300 * information element with regulatory information it thinks we
301 * should consider.
302 */
303enum reg_set_by {
304 REGDOM_SET_BY_INIT,
305 REGDOM_SET_BY_CORE,
306 REGDOM_SET_BY_USER,
307 REGDOM_SET_BY_DRIVER,
308 REGDOM_SET_BY_COUNTRY_IE,
309};
310
311struct ieee80211_freq_range {
312 u32 start_freq_khz;
313 u32 end_freq_khz;
314 u32 max_bandwidth_khz;
315};
316
317struct ieee80211_power_rule {
318 u32 max_antenna_gain;
319 u32 max_eirp;
320};
321
322struct ieee80211_reg_rule {
323 struct ieee80211_freq_range freq_range;
324 struct ieee80211_power_rule power_rule;
325 u32 flags;
326};
327
328struct ieee80211_regdomain {
329 u32 n_reg_rules;
330 char alpha2[2];
331 struct ieee80211_reg_rule reg_rules[];
332};
333
334#define MHZ_TO_KHZ(freq) (freq * 1000)
335#define KHZ_TO_MHZ(freq) (freq / 1000)
336#define DBI_TO_MBI(gain) (gain * 100)
337#define MBI_TO_DBI(gain) (gain / 100)
338#define DBM_TO_MBM(gain) (gain * 100)
339#define MBM_TO_DBM(gain) (gain / 100)
340
341#define REG_RULE(start, end, bw, gain, eirp, reg_flags) { \
342 .freq_range.start_freq_khz = (start) * 1000, \
343 .freq_range.end_freq_khz = (end) * 1000, \
344 .freq_range.max_bandwidth_khz = (bw) * 1000, \
345 .power_rule.max_antenna_gain = (gain) * 100, \
346 .power_rule.max_eirp = (eirp) * 100, \
347 .flags = reg_flags, \
348 }
271 349
272/* from net/wireless.h */ 350/* from net/wireless.h */
273struct wiphy; 351struct wiphy;
@@ -318,6 +396,8 @@ struct wiphy;
318 * @change_station: Modify a given station. 396 * @change_station: Modify a given station.
319 * 397 *
320 * @set_mesh_cfg: set mesh parameters (by now, just mesh id) 398 * @set_mesh_cfg: set mesh parameters (by now, just mesh id)
399 *
400 * @change_bss: Modify parameters for a given BSS.
321 */ 401 */
322struct cfg80211_ops { 402struct cfg80211_ops {
323 int (*add_virtual_intf)(struct wiphy *wiphy, char *name, 403 int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
@@ -370,6 +450,9 @@ struct cfg80211_ops {
370 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, 450 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
371 int idx, u8 *dst, u8 *next_hop, 451 int idx, u8 *dst, u8 *next_hop,
372 struct mpath_info *pinfo); 452 struct mpath_info *pinfo);
453
454 int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
455 struct bss_parameters *params);
373}; 456};
374 457
375#endif /* __NET_CFG80211_H */ 458#endif /* __NET_CFG80211_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 2ff545a56fb5..03cffd9f64e3 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -51,12 +51,14 @@ struct inet_connection_sock_af_ops {
51 char __user *optval, int optlen); 51 char __user *optval, int optlen);
52 int (*getsockopt)(struct sock *sk, int level, int optname, 52 int (*getsockopt)(struct sock *sk, int level, int optname,
53 char __user *optval, int __user *optlen); 53 char __user *optval, int __user *optlen);
54#ifdef CONFIG_COMPAT
54 int (*compat_setsockopt)(struct sock *sk, 55 int (*compat_setsockopt)(struct sock *sk,
55 int level, int optname, 56 int level, int optname,
56 char __user *optval, int optlen); 57 char __user *optval, int optlen);
57 int (*compat_getsockopt)(struct sock *sk, 58 int (*compat_getsockopt)(struct sock *sk,
58 int level, int optname, 59 int level, int optname,
59 char __user *optval, int __user *optlen); 60 char __user *optval, int __user *optlen);
61#endif
60 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 62 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
61 int (*bind_conflict)(const struct sock *sk, 63 int (*bind_conflict)(const struct sock *sk,
62 const struct inet_bind_bucket *tb); 64 const struct inet_bind_bucket *tb);
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 95c660c9719b..91324908fccd 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -208,6 +208,9 @@ extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
208extern void inet_twsk_deschedule(struct inet_timewait_sock *tw, 208extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
209 struct inet_timewait_death_row *twdr); 209 struct inet_timewait_death_row *twdr);
210 210
211extern void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
212 struct inet_timewait_death_row *twdr, int family);
213
211static inline 214static inline
212struct net *twsk_net(const struct inet_timewait_sock *twsk) 215struct net *twsk_net(const struct inet_timewait_sock *twsk)
213{ 216{
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7312c3dd309f..33e2ac6ceb3e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,11 +21,103 @@
21#include <linux/timer.h> 21#include <linux/timer.h>
22 22
23#include <net/checksum.h> 23#include <net/checksum.h>
24#include <linux/netfilter.h> /* for union nf_inet_addr */
25#include <linux/ipv6.h> /* for struct ipv6hdr */
26#include <net/ipv6.h> /* for ipv6_addr_copy */
27
28struct ip_vs_iphdr {
29 int len;
30 __u8 protocol;
31 union nf_inet_addr saddr;
32 union nf_inet_addr daddr;
33};
34
35static inline void
36ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
37{
38#ifdef CONFIG_IP_VS_IPV6
39 if (af == AF_INET6) {
40 const struct ipv6hdr *iph = nh;
41 iphdr->len = sizeof(struct ipv6hdr);
42 iphdr->protocol = iph->nexthdr;
43 ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
44 ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
45 } else
46#endif
47 {
48 const struct iphdr *iph = nh;
49 iphdr->len = iph->ihl * 4;
50 iphdr->protocol = iph->protocol;
51 iphdr->saddr.ip = iph->saddr;
52 iphdr->daddr.ip = iph->daddr;
53 }
54}
55
56static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
57 const union nf_inet_addr *src)
58{
59#ifdef CONFIG_IP_VS_IPV6
60 if (af == AF_INET6)
61 ipv6_addr_copy(&dst->in6, &src->in6);
62 else
63#endif
64 dst->ip = src->ip;
65}
66
67static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
68 const union nf_inet_addr *b)
69{
70#ifdef CONFIG_IP_VS_IPV6
71 if (af == AF_INET6)
72 return ipv6_addr_equal(&a->in6, &b->in6);
73#endif
74 return a->ip == b->ip;
75}
24 76
25#ifdef CONFIG_IP_VS_DEBUG 77#ifdef CONFIG_IP_VS_DEBUG
26#include <linux/net.h> 78#include <linux/net.h>
27 79
28extern int ip_vs_get_debug_level(void); 80extern int ip_vs_get_debug_level(void);
81
82static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
83 const union nf_inet_addr *addr,
84 int *idx)
85{
86 int len;
87#ifdef CONFIG_IP_VS_IPV6
88 if (af == AF_INET6)
89 len = snprintf(&buf[*idx], buf_len - *idx, "[" NIP6_FMT "]",
90 NIP6(addr->in6)) + 1;
91 else
92#endif
93 len = snprintf(&buf[*idx], buf_len - *idx, NIPQUAD_FMT,
94 NIPQUAD(addr->ip)) + 1;
95
96 *idx += len;
97 BUG_ON(*idx > buf_len + 1);
98 return &buf[*idx - len];
99}
100
101#define IP_VS_DBG_BUF(level, msg...) \
102 do { \
103 char ip_vs_dbg_buf[160]; \
104 int ip_vs_dbg_idx = 0; \
105 if (level <= ip_vs_get_debug_level()) \
106 printk(KERN_DEBUG "IPVS: " msg); \
107 } while (0)
108#define IP_VS_ERR_BUF(msg...) \
109 do { \
110 char ip_vs_dbg_buf[160]; \
111 int ip_vs_dbg_idx = 0; \
112 printk(KERN_ERR "IPVS: " msg); \
113 } while (0)
114
115/* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
116#define IP_VS_DBG_ADDR(af, addr) \
117 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \
118 sizeof(ip_vs_dbg_buf), addr, \
119 &ip_vs_dbg_idx)
120
29#define IP_VS_DBG(level, msg...) \ 121#define IP_VS_DBG(level, msg...) \
30 do { \ 122 do { \
31 if (level <= ip_vs_get_debug_level()) \ 123 if (level <= ip_vs_get_debug_level()) \
@@ -48,6 +140,8 @@ extern int ip_vs_get_debug_level(void);
48 pp->debug_packet(pp, skb, ofs, msg); \ 140 pp->debug_packet(pp, skb, ofs, msg); \
49 } while (0) 141 } while (0)
50#else /* NO DEBUGGING at ALL */ 142#else /* NO DEBUGGING at ALL */
143#define IP_VS_DBG_BUF(level, msg...) do {} while (0)
144#define IP_VS_ERR_BUF(msg...) do {} while (0)
51#define IP_VS_DBG(level, msg...) do {} while (0) 145#define IP_VS_DBG(level, msg...) do {} while (0)
52#define IP_VS_DBG_RL(msg...) do {} while (0) 146#define IP_VS_DBG_RL(msg...) do {} while (0)
53#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0) 147#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0)
@@ -160,27 +254,10 @@ struct ip_vs_estimator {
160 254
161struct ip_vs_stats 255struct ip_vs_stats
162{ 256{
163 __u32 conns; /* connections scheduled */ 257 struct ip_vs_stats_user ustats; /* statistics */
164 __u32 inpkts; /* incoming packets */ 258 struct ip_vs_estimator est; /* estimator */
165 __u32 outpkts; /* outgoing packets */
166 __u64 inbytes; /* incoming bytes */
167 __u64 outbytes; /* outgoing bytes */
168
169 __u32 cps; /* current connection rate */
170 __u32 inpps; /* current in packet rate */
171 __u32 outpps; /* current out packet rate */
172 __u32 inbps; /* current in byte rate */
173 __u32 outbps; /* current out byte rate */
174
175 /*
176 * Don't add anything before the lock, because we use memcpy() to copy
177 * the members before the lock to struct ip_vs_stats_user in
178 * ip_vs_ctl.c.
179 */
180 259
181 spinlock_t lock; /* spin lock */ 260 spinlock_t lock; /* spin lock */
182
183 struct ip_vs_estimator est; /* estimator */
184}; 261};
185 262
186struct dst_entry; 263struct dst_entry;
@@ -202,21 +279,23 @@ struct ip_vs_protocol {
202 279
203 void (*exit)(struct ip_vs_protocol *pp); 280 void (*exit)(struct ip_vs_protocol *pp);
204 281
205 int (*conn_schedule)(struct sk_buff *skb, 282 int (*conn_schedule)(int af, struct sk_buff *skb,
206 struct ip_vs_protocol *pp, 283 struct ip_vs_protocol *pp,
207 int *verdict, struct ip_vs_conn **cpp); 284 int *verdict, struct ip_vs_conn **cpp);
208 285
209 struct ip_vs_conn * 286 struct ip_vs_conn *
210 (*conn_in_get)(const struct sk_buff *skb, 287 (*conn_in_get)(int af,
288 const struct sk_buff *skb,
211 struct ip_vs_protocol *pp, 289 struct ip_vs_protocol *pp,
212 const struct iphdr *iph, 290 const struct ip_vs_iphdr *iph,
213 unsigned int proto_off, 291 unsigned int proto_off,
214 int inverse); 292 int inverse);
215 293
216 struct ip_vs_conn * 294 struct ip_vs_conn *
217 (*conn_out_get)(const struct sk_buff *skb, 295 (*conn_out_get)(int af,
296 const struct sk_buff *skb,
218 struct ip_vs_protocol *pp, 297 struct ip_vs_protocol *pp,
219 const struct iphdr *iph, 298 const struct ip_vs_iphdr *iph,
220 unsigned int proto_off, 299 unsigned int proto_off,
221 int inverse); 300 int inverse);
222 301
@@ -226,7 +305,8 @@ struct ip_vs_protocol {
226 int (*dnat_handler)(struct sk_buff *skb, 305 int (*dnat_handler)(struct sk_buff *skb,
227 struct ip_vs_protocol *pp, struct ip_vs_conn *cp); 306 struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
228 307
229 int (*csum_check)(struct sk_buff *skb, struct ip_vs_protocol *pp); 308 int (*csum_check)(int af, struct sk_buff *skb,
309 struct ip_vs_protocol *pp);
230 310
231 const char *(*state_name)(int state); 311 const char *(*state_name)(int state);
232 312
@@ -259,9 +339,10 @@ struct ip_vs_conn {
259 struct list_head c_list; /* hashed list heads */ 339 struct list_head c_list; /* hashed list heads */
260 340
261 /* Protocol, addresses and port numbers */ 341 /* Protocol, addresses and port numbers */
262 __be32 caddr; /* client address */ 342 u16 af; /* address family */
263 __be32 vaddr; /* virtual address */ 343 union nf_inet_addr caddr; /* client address */
264 __be32 daddr; /* destination address */ 344 union nf_inet_addr vaddr; /* virtual address */
345 union nf_inet_addr daddr; /* destination address */
265 __be16 cport; 346 __be16 cport;
266 __be16 vport; 347 __be16 vport;
267 __be16 dport; 348 __be16 dport;
@@ -305,6 +386,45 @@ struct ip_vs_conn {
305 386
306 387
307/* 388/*
389 * Extended internal versions of struct ip_vs_service_user and
390 * ip_vs_dest_user for IPv6 support.
391 *
392 * We need these to conveniently pass around service and destination
393 * options, but unfortunately, we also need to keep the old definitions to
394 * maintain userspace backwards compatibility for the setsockopt interface.
395 */
396struct ip_vs_service_user_kern {
397 /* virtual service addresses */
398 u16 af;
399 u16 protocol;
400 union nf_inet_addr addr; /* virtual ip address */
401 u16 port;
402 u32 fwmark; /* firwall mark of service */
403
404 /* virtual service options */
405 char *sched_name;
406 unsigned flags; /* virtual service flags */
407 unsigned timeout; /* persistent timeout in sec */
408 u32 netmask; /* persistent netmask */
409};
410
411
412struct ip_vs_dest_user_kern {
413 /* destination server address */
414 union nf_inet_addr addr;
415 u16 port;
416
417 /* real server options */
418 unsigned conn_flags; /* connection flags */
419 int weight; /* destination weight */
420
421 /* thresholds for active connections */
422 u32 u_threshold; /* upper threshold */
423 u32 l_threshold; /* lower threshold */
424};
425
426
427/*
308 * The information about the virtual service offered to the net 428 * The information about the virtual service offered to the net
309 * and the forwarding entries 429 * and the forwarding entries
310 */ 430 */
@@ -314,8 +434,9 @@ struct ip_vs_service {
314 atomic_t refcnt; /* reference counter */ 434 atomic_t refcnt; /* reference counter */
315 atomic_t usecnt; /* use counter */ 435 atomic_t usecnt; /* use counter */
316 436
437 u16 af; /* address family */
317 __u16 protocol; /* which protocol (TCP/UDP) */ 438 __u16 protocol; /* which protocol (TCP/UDP) */
318 __be32 addr; /* IP address for virtual service */ 439 union nf_inet_addr addr; /* IP address for virtual service */
319 __be16 port; /* port number for the service */ 440 __be16 port; /* port number for the service */
320 __u32 fwmark; /* firewall mark of the service */ 441 __u32 fwmark; /* firewall mark of the service */
321 unsigned flags; /* service status flags */ 442 unsigned flags; /* service status flags */
@@ -342,7 +463,8 @@ struct ip_vs_dest {
342 struct list_head n_list; /* for the dests in the service */ 463 struct list_head n_list; /* for the dests in the service */
343 struct list_head d_list; /* for table with all the dests */ 464 struct list_head d_list; /* for table with all the dests */
344 465
345 __be32 addr; /* IP address of the server */ 466 u16 af; /* address family */
467 union nf_inet_addr addr; /* IP address of the server */
346 __be16 port; /* port number of the server */ 468 __be16 port; /* port number of the server */
347 volatile unsigned flags; /* dest status flags */ 469 volatile unsigned flags; /* dest status flags */
348 atomic_t conn_flags; /* flags to copy to conn */ 470 atomic_t conn_flags; /* flags to copy to conn */
@@ -366,7 +488,7 @@ struct ip_vs_dest {
366 /* for virtual service */ 488 /* for virtual service */
367 struct ip_vs_service *svc; /* service it belongs to */ 489 struct ip_vs_service *svc; /* service it belongs to */
368 __u16 protocol; /* which protocol (TCP/UDP) */ 490 __u16 protocol; /* which protocol (TCP/UDP) */
369 __be32 vaddr; /* virtual IP address */ 491 union nf_inet_addr vaddr; /* virtual IP address */
370 __be16 vport; /* virtual port number */ 492 __be16 vport; /* virtual port number */
371 __u32 vfwmark; /* firewall mark of service */ 493 __u32 vfwmark; /* firewall mark of service */
372}; 494};
@@ -380,6 +502,9 @@ struct ip_vs_scheduler {
380 char *name; /* scheduler name */ 502 char *name; /* scheduler name */
381 atomic_t refcnt; /* reference counter */ 503 atomic_t refcnt; /* reference counter */
382 struct module *module; /* THIS_MODULE/NULL */ 504 struct module *module; /* THIS_MODULE/NULL */
505#ifdef CONFIG_IP_VS_IPV6
506 int supports_ipv6; /* scheduler has IPv6 support */
507#endif
383 508
384 /* scheduler initializing service */ 509 /* scheduler initializing service */
385 int (*init_service)(struct ip_vs_service *svc); 510 int (*init_service)(struct ip_vs_service *svc);
@@ -479,16 +604,8 @@ extern void ip_vs_init_hash_table(struct list_head *table, int rows);
479#ifndef CONFIG_IP_VS_TAB_BITS 604#ifndef CONFIG_IP_VS_TAB_BITS
480#define CONFIG_IP_VS_TAB_BITS 12 605#define CONFIG_IP_VS_TAB_BITS 12
481#endif 606#endif
482/* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */ 607
483#if CONFIG_IP_VS_TAB_BITS < 8
484#define IP_VS_CONN_TAB_BITS 8
485#endif
486#if CONFIG_IP_VS_TAB_BITS > 20
487#define IP_VS_CONN_TAB_BITS 20
488#endif
489#if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20
490#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS 608#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS
491#endif
492#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS) 609#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS)
493#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1) 610#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1)
494 611
@@ -500,11 +617,16 @@ enum {
500}; 617};
501 618
502extern struct ip_vs_conn *ip_vs_conn_in_get 619extern struct ip_vs_conn *ip_vs_conn_in_get
503(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 620(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
621 const union nf_inet_addr *d_addr, __be16 d_port);
622
504extern struct ip_vs_conn *ip_vs_ct_in_get 623extern struct ip_vs_conn *ip_vs_ct_in_get
505(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 624(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
625 const union nf_inet_addr *d_addr, __be16 d_port);
626
506extern struct ip_vs_conn *ip_vs_conn_out_get 627extern struct ip_vs_conn *ip_vs_conn_out_get
507(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 628(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
629 const union nf_inet_addr *d_addr, __be16 d_port);
508 630
509/* put back the conn without restarting its timer */ 631/* put back the conn without restarting its timer */
510static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 632static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
@@ -515,8 +637,9 @@ extern void ip_vs_conn_put(struct ip_vs_conn *cp);
515extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 637extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
516 638
517extern struct ip_vs_conn * 639extern struct ip_vs_conn *
518ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, 640ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
519 __be32 daddr, __be16 dport, unsigned flags, 641 const union nf_inet_addr *vaddr, __be16 vport,
642 const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
520 struct ip_vs_dest *dest); 643 struct ip_vs_dest *dest);
521extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 644extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
522 645
@@ -532,24 +655,32 @@ static inline void ip_vs_control_del(struct ip_vs_conn *cp)
532{ 655{
533 struct ip_vs_conn *ctl_cp = cp->control; 656 struct ip_vs_conn *ctl_cp = cp->control;
534 if (!ctl_cp) { 657 if (!ctl_cp) {
535 IP_VS_ERR("request control DEL for uncontrolled: " 658 IP_VS_ERR_BUF("request control DEL for uncontrolled: "
536 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 659 "%s:%d to %s:%d\n",
537 NIPQUAD(cp->caddr),ntohs(cp->cport), 660 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
538 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 661 ntohs(cp->cport),
662 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
663 ntohs(cp->vport));
664
539 return; 665 return;
540 } 666 }
541 667
542 IP_VS_DBG(7, "DELeting control for: " 668 IP_VS_DBG_BUF(7, "DELeting control for: "
543 "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", 669 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
544 NIPQUAD(cp->caddr),ntohs(cp->cport), 670 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
545 NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); 671 ntohs(cp->cport),
672 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
673 ntohs(ctl_cp->cport));
546 674
547 cp->control = NULL; 675 cp->control = NULL;
548 if (atomic_read(&ctl_cp->n_control) == 0) { 676 if (atomic_read(&ctl_cp->n_control) == 0) {
549 IP_VS_ERR("BUG control DEL with n=0 : " 677 IP_VS_ERR_BUF("BUG control DEL with n=0 : "
550 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 678 "%s:%d to %s:%d\n",
551 NIPQUAD(cp->caddr),ntohs(cp->cport), 679 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
552 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 680 ntohs(cp->cport),
681 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
682 ntohs(cp->vport));
683
553 return; 684 return;
554 } 685 }
555 atomic_dec(&ctl_cp->n_control); 686 atomic_dec(&ctl_cp->n_control);
@@ -559,17 +690,22 @@ static inline void
559ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 690ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
560{ 691{
561 if (cp->control) { 692 if (cp->control) {
562 IP_VS_ERR("request control ADD for already controlled: " 693 IP_VS_ERR_BUF("request control ADD for already controlled: "
563 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 694 "%s:%d to %s:%d\n",
564 NIPQUAD(cp->caddr),ntohs(cp->cport), 695 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
565 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 696 ntohs(cp->cport),
697 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
698 ntohs(cp->vport));
699
566 ip_vs_control_del(cp); 700 ip_vs_control_del(cp);
567 } 701 }
568 702
569 IP_VS_DBG(7, "ADDing control for: " 703 IP_VS_DBG_BUF(7, "ADDing control for: "
570 "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", 704 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
571 NIPQUAD(cp->caddr),ntohs(cp->cport), 705 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
572 NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); 706 ntohs(cp->cport),
707 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
708 ntohs(ctl_cp->cport));
573 709
574 cp->control = ctl_cp; 710 cp->control = ctl_cp;
575 atomic_inc(&ctl_cp->n_control); 711 atomic_inc(&ctl_cp->n_control);
@@ -647,7 +783,8 @@ extern struct ip_vs_stats ip_vs_stats;
647extern const struct ctl_path net_vs_ctl_path[]; 783extern const struct ctl_path net_vs_ctl_path[];
648 784
649extern struct ip_vs_service * 785extern struct ip_vs_service *
650ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); 786ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
787 const union nf_inet_addr *vaddr, __be16 vport);
651 788
652static inline void ip_vs_service_put(struct ip_vs_service *svc) 789static inline void ip_vs_service_put(struct ip_vs_service *svc)
653{ 790{
@@ -655,14 +792,16 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
655} 792}
656 793
657extern struct ip_vs_dest * 794extern struct ip_vs_dest *
658ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport); 795ip_vs_lookup_real_service(int af, __u16 protocol,
796 const union nf_inet_addr *daddr, __be16 dport);
797
659extern int ip_vs_use_count_inc(void); 798extern int ip_vs_use_count_inc(void);
660extern void ip_vs_use_count_dec(void); 799extern void ip_vs_use_count_dec(void);
661extern int ip_vs_control_init(void); 800extern int ip_vs_control_init(void);
662extern void ip_vs_control_cleanup(void); 801extern void ip_vs_control_cleanup(void);
663extern struct ip_vs_dest * 802extern struct ip_vs_dest *
664ip_vs_find_dest(__be32 daddr, __be16 dport, 803ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
665 __be32 vaddr, __be16 vport, __u16 protocol); 804 const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
666extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); 805extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
667 806
668 807
@@ -683,6 +822,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
683/* 822/*
684 * IPVS rate estimator prototypes (from ip_vs_est.c) 823 * IPVS rate estimator prototypes (from ip_vs_est.c)
685 */ 824 */
825extern int ip_vs_estimator_init(void);
826extern void ip_vs_estimator_cleanup(void);
686extern void ip_vs_new_estimator(struct ip_vs_stats *stats); 827extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
687extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); 828extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
688extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); 829extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
@@ -704,6 +845,19 @@ extern int ip_vs_icmp_xmit
704(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset); 845(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset);
705extern void ip_vs_dst_reset(struct ip_vs_dest *dest); 846extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
706 847
848#ifdef CONFIG_IP_VS_IPV6
849extern int ip_vs_bypass_xmit_v6
850(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
851extern int ip_vs_nat_xmit_v6
852(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
853extern int ip_vs_tunnel_xmit_v6
854(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
855extern int ip_vs_dr_xmit_v6
856(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
857extern int ip_vs_icmp_xmit_v6
858(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
859 int offset);
860#endif
707 861
708/* 862/*
709 * This is a simple mechanism to ignore packets when 863 * This is a simple mechanism to ignore packets when
@@ -748,7 +902,12 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
748} 902}
749 903
750extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 904extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
751 struct ip_vs_conn *cp, int dir); 905 struct ip_vs_conn *cp, int dir);
906
907#ifdef CONFIG_IP_VS_IPV6
908extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
909 struct ip_vs_conn *cp, int dir);
910#endif
752 911
753extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 912extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
754 913
@@ -759,6 +918,17 @@ static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
759 return csum_partial((char *) diff, sizeof(diff), oldsum); 918 return csum_partial((char *) diff, sizeof(diff), oldsum);
760} 919}
761 920
921#ifdef CONFIG_IP_VS_IPV6
922static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
923 __wsum oldsum)
924{
925 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
926 new[3], new[2], new[1], new[0] };
927
928 return csum_partial((char *) diff, sizeof(diff), oldsum);
929}
930#endif
931
762static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 932static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
763{ 933{
764 __be16 diff[2] = { ~old, new }; 934 __be16 diff[2] = { ~old, new };
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ff137fd7714f..003e4a03874e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -158,13 +158,17 @@ struct ieee80211_low_level_stats {
158 * also implies a change in the AID. 158 * also implies a change in the AID.
159 * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed 159 * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed
160 * @BSS_CHANGED_ERP_PREAMBLE: preamble changed 160 * @BSS_CHANGED_ERP_PREAMBLE: preamble changed
161 * @BSS_CHANGED_ERP_SLOT: slot timing changed
161 * @BSS_CHANGED_HT: 802.11n parameters changed 162 * @BSS_CHANGED_HT: 802.11n parameters changed
163 * @BSS_CHANGED_BASIC_RATES: Basic rateset changed
162 */ 164 */
163enum ieee80211_bss_change { 165enum ieee80211_bss_change {
164 BSS_CHANGED_ASSOC = 1<<0, 166 BSS_CHANGED_ASSOC = 1<<0,
165 BSS_CHANGED_ERP_CTS_PROT = 1<<1, 167 BSS_CHANGED_ERP_CTS_PROT = 1<<1,
166 BSS_CHANGED_ERP_PREAMBLE = 1<<2, 168 BSS_CHANGED_ERP_PREAMBLE = 1<<2,
169 BSS_CHANGED_ERP_SLOT = 1<<3,
167 BSS_CHANGED_HT = 1<<4, 170 BSS_CHANGED_HT = 1<<4,
171 BSS_CHANGED_BASIC_RATES = 1<<5,
168}; 172};
169 173
170/** 174/**
@@ -177,6 +181,7 @@ enum ieee80211_bss_change {
177 * @aid: association ID number, valid only when @assoc is true 181 * @aid: association ID number, valid only when @assoc is true
178 * @use_cts_prot: use CTS protection 182 * @use_cts_prot: use CTS protection
179 * @use_short_preamble: use 802.11b short preamble 183 * @use_short_preamble: use 802.11b short preamble
184 * @use_short_slot: use short slot time (only relevant for ERP)
180 * @dtim_period: num of beacons before the next DTIM, for PSM 185 * @dtim_period: num of beacons before the next DTIM, for PSM
181 * @timestamp: beacon timestamp 186 * @timestamp: beacon timestamp
182 * @beacon_int: beacon interval 187 * @beacon_int: beacon interval
@@ -184,6 +189,9 @@ enum ieee80211_bss_change {
184 * @assoc_ht: association in HT mode 189 * @assoc_ht: association in HT mode
185 * @ht_conf: ht capabilities 190 * @ht_conf: ht capabilities
186 * @ht_bss_conf: ht extended capabilities 191 * @ht_bss_conf: ht extended capabilities
192 * @basic_rates: bitmap of basic rates, each bit stands for an
193 * index into the rate table configured by the driver in
194 * the current band.
187 */ 195 */
188struct ieee80211_bss_conf { 196struct ieee80211_bss_conf {
189 /* association related data */ 197 /* association related data */
@@ -192,10 +200,12 @@ struct ieee80211_bss_conf {
192 /* erp related data */ 200 /* erp related data */
193 bool use_cts_prot; 201 bool use_cts_prot;
194 bool use_short_preamble; 202 bool use_short_preamble;
203 bool use_short_slot;
195 u8 dtim_period; 204 u8 dtim_period;
196 u16 beacon_int; 205 u16 beacon_int;
197 u16 assoc_capability; 206 u16 assoc_capability;
198 u64 timestamp; 207 u64 timestamp;
208 u64 basic_rates;
199 /* ht related data */ 209 /* ht related data */
200 bool assoc_ht; 210 bool assoc_ht;
201 struct ieee80211_ht_info *ht_conf; 211 struct ieee80211_ht_info *ht_conf;
@@ -290,6 +300,9 @@ enum mac80211_tx_control_flags {
290 * (2) driver internal use (if applicable) 300 * (2) driver internal use (if applicable)
291 * (3) TX status information - driver tells mac80211 what happened 301 * (3) TX status information - driver tells mac80211 what happened
292 * 302 *
303 * The TX control's sta pointer is only valid during the ->tx call,
304 * it may be NULL.
305 *
293 * @flags: transmit info flags, defined above 306 * @flags: transmit info flags, defined above
294 * @band: TBD 307 * @band: TBD
295 * @tx_rate_idx: TBD 308 * @tx_rate_idx: TBD
@@ -317,10 +330,11 @@ struct ieee80211_tx_info {
317 330
318 union { 331 union {
319 struct { 332 struct {
333 /* NB: vif can be NULL for injected frames */
320 struct ieee80211_vif *vif; 334 struct ieee80211_vif *vif;
321 struct ieee80211_key_conf *hw_key; 335 struct ieee80211_key_conf *hw_key;
336 struct ieee80211_sta *sta;
322 unsigned long jiffies; 337 unsigned long jiffies;
323 u16 aid;
324 s8 rts_cts_rate_idx, alt_retry_rate_idx; 338 s8 rts_cts_rate_idx, alt_retry_rate_idx;
325 u8 retry_limit; 339 u8 retry_limit;
326 u8 icv_len; 340 u8 icv_len;
@@ -363,6 +377,7 @@ static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
363 * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) 377 * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
364 * is valid. This is useful in monitor mode and necessary for beacon frames 378 * is valid. This is useful in monitor mode and necessary for beacon frames
365 * to enable IBSS merging. 379 * to enable IBSS merging.
380 * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
366 */ 381 */
367enum mac80211_rx_flags { 382enum mac80211_rx_flags {
368 RX_FLAG_MMIC_ERROR = 1<<0, 383 RX_FLAG_MMIC_ERROR = 1<<0,
@@ -373,6 +388,7 @@ enum mac80211_rx_flags {
373 RX_FLAG_FAILED_FCS_CRC = 1<<5, 388 RX_FLAG_FAILED_FCS_CRC = 1<<5,
374 RX_FLAG_FAILED_PLCP_CRC = 1<<6, 389 RX_FLAG_FAILED_PLCP_CRC = 1<<6,
375 RX_FLAG_TSFT = 1<<7, 390 RX_FLAG_TSFT = 1<<7,
391 RX_FLAG_SHORTPRE = 1<<8
376}; 392};
377 393
378/** 394/**
@@ -418,6 +434,11 @@ struct ieee80211_rx_status {
418 * @IEEE80211_CONF_PS: Enable 802.11 power save mode 434 * @IEEE80211_CONF_PS: Enable 802.11 power save mode
419 */ 435 */
420enum ieee80211_conf_flags { 436enum ieee80211_conf_flags {
437 /*
438 * TODO: IEEE80211_CONF_SHORT_SLOT_TIME will be removed once drivers
439 * have been converted to use bss_info_changed() for slot time
440 * configuration
441 */
421 IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0), 442 IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0),
422 IEEE80211_CONF_RADIOTAP = (1<<1), 443 IEEE80211_CONF_RADIOTAP = (1<<1),
423 IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2), 444 IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2),
@@ -461,33 +482,6 @@ struct ieee80211_conf {
461}; 482};
462 483
463/** 484/**
464 * enum ieee80211_if_types - types of 802.11 network interfaces
465 *
466 * @IEEE80211_IF_TYPE_INVALID: invalid interface type, not used
467 * by mac80211 itself
468 * @IEEE80211_IF_TYPE_AP: interface in AP mode.
469 * @IEEE80211_IF_TYPE_MGMT: special interface for communication with hostap
470 * daemon. Drivers should never see this type.
471 * @IEEE80211_IF_TYPE_STA: interface in STA (client) mode.
472 * @IEEE80211_IF_TYPE_IBSS: interface in IBSS (ad-hoc) mode.
473 * @IEEE80211_IF_TYPE_MNTR: interface in monitor (rfmon) mode.
474 * @IEEE80211_IF_TYPE_WDS: interface in WDS mode.
475 * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers
476 * will never see this type.
477 * @IEEE80211_IF_TYPE_MESH_POINT: 802.11s mesh point
478 */
479enum ieee80211_if_types {
480 IEEE80211_IF_TYPE_INVALID,
481 IEEE80211_IF_TYPE_AP,
482 IEEE80211_IF_TYPE_STA,
483 IEEE80211_IF_TYPE_IBSS,
484 IEEE80211_IF_TYPE_MESH_POINT,
485 IEEE80211_IF_TYPE_MNTR,
486 IEEE80211_IF_TYPE_WDS,
487 IEEE80211_IF_TYPE_VLAN,
488};
489
490/**
491 * struct ieee80211_vif - per-interface data 485 * struct ieee80211_vif - per-interface data
492 * 486 *
493 * Data in this structure is continually present for driver 487 * Data in this structure is continually present for driver
@@ -498,7 +492,7 @@ enum ieee80211_if_types {
498 * sizeof(void *). 492 * sizeof(void *).
499 */ 493 */
500struct ieee80211_vif { 494struct ieee80211_vif {
501 enum ieee80211_if_types type; 495 enum nl80211_iftype type;
502 /* must be last */ 496 /* must be last */
503 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); 497 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
504}; 498};
@@ -506,7 +500,7 @@ struct ieee80211_vif {
506static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) 500static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
507{ 501{
508#ifdef CONFIG_MAC80211_MESH 502#ifdef CONFIG_MAC80211_MESH
509 return vif->type == IEEE80211_IF_TYPE_MESH_POINT; 503 return vif->type == NL80211_IFTYPE_MESH_POINT;
510#endif 504#endif
511 return false; 505 return false;
512} 506}
@@ -517,7 +511,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
517 * @vif: pointer to a driver-use per-interface structure. The pointer 511 * @vif: pointer to a driver-use per-interface structure. The pointer
518 * itself is also used for various functions including 512 * itself is also used for various functions including
519 * ieee80211_beacon_get() and ieee80211_get_buffered_bc(). 513 * ieee80211_beacon_get() and ieee80211_get_buffered_bc().
520 * @type: one of &enum ieee80211_if_types constants. Determines the type of 514 * @type: one of &enum nl80211_iftype constants. Determines the type of
521 * added/removed interface. 515 * added/removed interface.
522 * @mac_addr: pointer to MAC address of the interface. This pointer is valid 516 * @mac_addr: pointer to MAC address of the interface. This pointer is valid
523 * until the interface is removed (i.e. it cannot be used after 517 * until the interface is removed (i.e. it cannot be used after
@@ -533,7 +527,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
533 * in pure monitor mode. 527 * in pure monitor mode.
534 */ 528 */
535struct ieee80211_if_init_conf { 529struct ieee80211_if_init_conf {
536 enum ieee80211_if_types type; 530 enum nl80211_iftype type;
537 struct ieee80211_vif *vif; 531 struct ieee80211_vif *vif;
538 void *mac_addr; 532 void *mac_addr;
539}; 533};
@@ -662,6 +656,33 @@ enum set_key_cmd {
662}; 656};
663 657
664/** 658/**
659 * struct ieee80211_sta - station table entry
660 *
661 * A station table entry represents a station we are possibly
662 * communicating with. Since stations are RCU-managed in
663 * mac80211, any ieee80211_sta pointer you get access to must
664 * either be protected by rcu_read_lock() explicitly or implicitly,
665 * or you must take good care to not use such a pointer after a
666 * call to your sta_notify callback that removed it.
667 *
668 * @addr: MAC address
669 * @aid: AID we assigned to the station if we're an AP
670 * @supp_rates: Bitmap of supported rates (per band)
671 * @ht_info: HT capabilities of this STA
672 * @drv_priv: data area for driver use, will always be aligned to
673 * sizeof(void *), size is determined in hw information.
674 */
675struct ieee80211_sta {
676 u64 supp_rates[IEEE80211_NUM_BANDS];
677 u8 addr[ETH_ALEN];
678 u16 aid;
679 struct ieee80211_ht_info ht_info;
680
681 /* must be last */
682 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
683};
684
685/**
665 * enum sta_notify_cmd - sta notify command 686 * enum sta_notify_cmd - sta notify command
666 * 687 *
667 * Used with the sta_notify() callback in &struct ieee80211_ops, this 688 * Used with the sta_notify() callback in &struct ieee80211_ops, this
@@ -805,6 +826,8 @@ enum ieee80211_hw_flags {
805 * 826 *
806 * @vif_data_size: size (in bytes) of the drv_priv data area 827 * @vif_data_size: size (in bytes) of the drv_priv data area
807 * within &struct ieee80211_vif. 828 * within &struct ieee80211_vif.
829 * @sta_data_size: size (in bytes) of the drv_priv data area
830 * within &struct ieee80211_sta.
808 */ 831 */
809struct ieee80211_hw { 832struct ieee80211_hw {
810 struct ieee80211_conf conf; 833 struct ieee80211_conf conf;
@@ -816,12 +839,15 @@ struct ieee80211_hw {
816 unsigned int extra_tx_headroom; 839 unsigned int extra_tx_headroom;
817 int channel_change_time; 840 int channel_change_time;
818 int vif_data_size; 841 int vif_data_size;
842 int sta_data_size;
819 u16 queues; 843 u16 queues;
820 u16 ampdu_queues; 844 u16 ampdu_queues;
821 u16 max_listen_interval; 845 u16 max_listen_interval;
822 s8 max_signal; 846 s8 max_signal;
823}; 847};
824 848
849struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy);
850
825/** 851/**
826 * SET_IEEE80211_DEV - set device for 802.11 hardware 852 * SET_IEEE80211_DEV - set device for 802.11 hardware
827 * 853 *
@@ -1097,7 +1123,7 @@ enum ieee80211_ampdu_mlme_action {
1097 * This callback must be implemented and atomic. 1123 * This callback must be implemented and atomic.
1098 * 1124 *
1099 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit 1125 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
1100 * must be set or cleared for a given AID. Must be atomic. 1126 * must be set or cleared for a given STA. Must be atomic.
1101 * 1127 *
1102 * @set_key: See the section "Hardware crypto acceleration" 1128 * @set_key: See the section "Hardware crypto acceleration"
1103 * This callback can sleep, and is only called between add_interface 1129 * This callback can sleep, and is only called between add_interface
@@ -1111,7 +1137,9 @@ enum ieee80211_ampdu_mlme_action {
1111 * @hw_scan: Ask the hardware to service the scan request, no need to start 1137 * @hw_scan: Ask the hardware to service the scan request, no need to start
1112 * the scan state machine in stack. The scan must honour the channel 1138 * the scan state machine in stack. The scan must honour the channel
1113 * configuration done by the regulatory agent in the wiphy's registered 1139 * configuration done by the regulatory agent in the wiphy's registered
1114 * bands. 1140 * bands. When the scan finishes, ieee80211_scan_completed() must be
1141 * called; note that it also must be called when the scan cannot finish
1142 * because the hardware is turned off! Anything else is a bug!
1115 * 1143 *
1116 * @get_stats: return low-level statistics 1144 * @get_stats: return low-level statistics
1117 * 1145 *
@@ -1131,7 +1159,7 @@ enum ieee80211_ampdu_mlme_action {
1131 * of assocaited station or AP. 1159 * of assocaited station or AP.
1132 * 1160 *
1133 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 1161 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1134 * bursting) for a hardware TX queue. Must be atomic. 1162 * bursting) for a hardware TX queue.
1135 * 1163 *
1136 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1164 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1137 * to get number of currently queued packets (queue length), maximum queue 1165 * to get number of currently queued packets (queue length), maximum queue
@@ -1181,7 +1209,8 @@ struct ieee80211_ops {
1181 unsigned int changed_flags, 1209 unsigned int changed_flags,
1182 unsigned int *total_flags, 1210 unsigned int *total_flags,
1183 int mc_count, struct dev_addr_list *mc_list); 1211 int mc_count, struct dev_addr_list *mc_list);
1184 int (*set_tim)(struct ieee80211_hw *hw, int aid, int set); 1212 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
1213 bool set);
1185 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1214 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1186 const u8 *local_address, const u8 *address, 1215 const u8 *local_address, const u8 *address,
1187 struct ieee80211_key_conf *key); 1216 struct ieee80211_key_conf *key);
@@ -1198,7 +1227,7 @@ struct ieee80211_ops {
1198 int (*set_retry_limit)(struct ieee80211_hw *hw, 1227 int (*set_retry_limit)(struct ieee80211_hw *hw,
1199 u32 short_retry, u32 long_retr); 1228 u32 short_retry, u32 long_retr);
1200 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1229 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1201 enum sta_notify_cmd, const u8 *addr); 1230 enum sta_notify_cmd, struct ieee80211_sta *sta);
1202 int (*conf_tx)(struct ieee80211_hw *hw, u16 queue, 1231 int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
1203 const struct ieee80211_tx_queue_params *params); 1232 const struct ieee80211_tx_queue_params *params);
1204 int (*get_tx_stats)(struct ieee80211_hw *hw, 1233 int (*get_tx_stats)(struct ieee80211_hw *hw,
@@ -1208,7 +1237,7 @@ struct ieee80211_ops {
1208 int (*tx_last_beacon)(struct ieee80211_hw *hw); 1237 int (*tx_last_beacon)(struct ieee80211_hw *hw);
1209 int (*ampdu_action)(struct ieee80211_hw *hw, 1238 int (*ampdu_action)(struct ieee80211_hw *hw,
1210 enum ieee80211_ampdu_mlme_action action, 1239 enum ieee80211_ampdu_mlme_action action,
1211 const u8 *addr, u16 tid, u16 *ssn); 1240 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
1212}; 1241};
1213 1242
1214/** 1243/**
@@ -1557,16 +1586,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1557unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); 1586unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
1558 1587
1559/** 1588/**
1560 * ieee80211_get_hdrlen - get header length from frame control
1561 *
1562 * This function returns the 802.11 header length in bytes (not including
1563 * encryption headers.)
1564 *
1565 * @fc: the frame control field (in CPU endianness)
1566 */
1567int ieee80211_get_hdrlen(u16 fc);
1568
1569/**
1570 * ieee80211_hdrlen - get header length in bytes from frame control 1589 * ieee80211_hdrlen - get header length in bytes from frame control
1571 * @fc: frame control field in little-endian format 1590 * @fc: frame control field in little-endian format
1572 */ 1591 */
@@ -1608,6 +1627,16 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
1608void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); 1627void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
1609 1628
1610/** 1629/**
1630 * ieee80211_queue_stopped - test status of the queue
1631 * @hw: pointer as obtained from ieee80211_alloc_hw().
1632 * @queue: queue number (counted from zero).
1633 *
1634 * Drivers should use this function instead of netif_stop_queue.
1635 */
1636
1637int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
1638
1639/**
1611 * ieee80211_stop_queues - stop all queues 1640 * ieee80211_stop_queues - stop all queues
1612 * @hw: pointer as obtained from ieee80211_alloc_hw(). 1641 * @hw: pointer as obtained from ieee80211_alloc_hw().
1613 * 1642 *
@@ -1758,4 +1787,17 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra,
1758 */ 1787 */
1759void ieee80211_notify_mac(struct ieee80211_hw *hw, 1788void ieee80211_notify_mac(struct ieee80211_hw *hw,
1760 enum ieee80211_notification_types notif_type); 1789 enum ieee80211_notification_types notif_type);
1790
1791/**
1792 * ieee80211_find_sta - find a station
1793 *
1794 * @hw: pointer as obtained from ieee80211_alloc_hw()
1795 * @addr: station's address
1796 *
1797 * This function must be called under RCU lock and the
1798 * resulting pointer is only valid under RCU lock as well.
1799 */
1800struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
1801 const u8 *addr);
1802
1761#endif /* MAC80211_H */ 1803#endif /* MAC80211_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 18024b8cecb8..76c43ff38f64 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -119,9 +119,6 @@
119 * Nested Attributes Construction: 119 * Nested Attributes Construction:
120 * nla_nest_start(skb, type) start a nested attribute 120 * nla_nest_start(skb, type) start a nested attribute
121 * nla_nest_end(skb, nla) finalize a nested attribute 121 * nla_nest_end(skb, nla) finalize a nested attribute
122 * nla_nest_compat_start(skb, type, start a nested compat attribute
123 * len, data)
124 * nla_nest_compat_end(skb, type) finalize a nested compat attribute
125 * nla_nest_cancel(skb, nla) cancel nested attribute construction 122 * nla_nest_cancel(skb, nla) cancel nested attribute construction
126 * 123 *
127 * Attribute Length Calculations: 124 * Attribute Length Calculations:
@@ -156,7 +153,6 @@
156 * nla_find_nested() find attribute in nested attributes 153 * nla_find_nested() find attribute in nested attributes
157 * nla_parse() parse and validate stream of attrs 154 * nla_parse() parse and validate stream of attrs
158 * nla_parse_nested() parse nested attribuets 155 * nla_parse_nested() parse nested attribuets
159 * nla_parse_nested_compat() parse nested compat attributes
160 * nla_for_each_attr() loop over all attributes 156 * nla_for_each_attr() loop over all attributes
161 * nla_for_each_nested() loop over the nested attributes 157 * nla_for_each_nested() loop over the nested attributes
162 *========================================================================= 158 *=========================================================================
@@ -752,39 +748,6 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
752} 748}
753 749
754/** 750/**
755 * nla_parse_nested_compat - parse nested compat attributes
756 * @tb: destination array with maxtype+1 elements
757 * @maxtype: maximum attribute type to be expected
758 * @nla: attribute containing the nested attributes
759 * @data: pointer to point to contained structure
760 * @len: length of contained structure
761 * @policy: validation policy
762 *
763 * Parse a nested compat attribute. The compat attribute contains a structure
764 * and optionally a set of nested attributes. On success the data pointer
765 * points to the nested data and tb contains the parsed attributes
766 * (see nla_parse).
767 */
768static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
769 struct nlattr *nla,
770 const struct nla_policy *policy,
771 int len)
772{
773 int nested_len = nla_len(nla) - NLA_ALIGN(len);
774
775 if (nested_len < 0)
776 return -EINVAL;
777 if (nested_len >= nla_attr_size(0))
778 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
779 nested_len, policy);
780 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
781 return 0;
782}
783
784#define nla_parse_nested_compat(tb, maxtype, nla, policy, data, len) \
785({ data = nla_len(nla) >= len ? nla_data(nla) : NULL; \
786 __nla_parse_nested_compat(tb, maxtype, nla, policy, len); })
787/**
788 * nla_put_u8 - Add a u8 netlink attribute to a socket buffer 751 * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
789 * @skb: socket buffer to add attribute to 752 * @skb: socket buffer to add attribute to
790 * @attrtype: attribute type 753 * @attrtype: attribute type
@@ -1031,51 +994,6 @@ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
1031} 994}
1032 995
1033/** 996/**
1034 * nla_nest_compat_start - Start a new level of nested compat attributes
1035 * @skb: socket buffer to add attributes to
1036 * @attrtype: attribute type of container
1037 * @attrlen: length of structure
1038 * @data: pointer to structure
1039 *
1040 * Start a nested compat attribute that contains both a structure and
1041 * a set of nested attributes.
1042 *
1043 * Returns the container attribute
1044 */
1045static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb,
1046 int attrtype, int attrlen,
1047 const void *data)
1048{
1049 struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
1050
1051 if (nla_put(skb, attrtype, attrlen, data) < 0)
1052 return NULL;
1053 if (nla_nest_start(skb, attrtype) == NULL) {
1054 nlmsg_trim(skb, start);
1055 return NULL;
1056 }
1057 return start;
1058}
1059
1060/**
1061 * nla_nest_compat_end - Finalize nesting of compat attributes
1062 * @skb: socket buffer the attributes are stored in
1063 * @start: container attribute
1064 *
1065 * Corrects the container attribute header to include the all
1066 * appeneded attributes.
1067 *
1068 * Returns the total data length of the skb.
1069 */
1070static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
1071{
1072 struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len);
1073
1074 start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
1075 return nla_nest_end(skb, nest);
1076}
1077
1078/**
1079 * nla_nest_cancel - Cancel nesting of attributes 997 * nla_nest_cancel - Cancel nesting of attributes
1080 * @skb: socket buffer the message is stored in 998 * @skb: socket buffer the message is stored in
1081 * @start: container attribute 999 * @start: container attribute
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
new file mode 100644
index 000000000000..d4e72508e145
--- /dev/null
+++ b/include/net/phonet/phonet.h
@@ -0,0 +1,112 @@
1/*
2 * File: af_phonet.h
3 *
4 * Phonet sockets kernel definitions
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef AF_PHONET_H
24#define AF_PHONET_H
25
26/*
27 * The lower layers may not require more space, ever. Make sure it's
28 * enough.
29 */
30#define MAX_PHONET_HEADER 8
31
32/*
33 * Every Phonet* socket has this structure first in its
34 * protocol-specific structure under name c.
35 */
36struct pn_sock {
37 struct sock sk;
38 u16 sobject;
39 u8 resource;
40};
41
42static inline struct pn_sock *pn_sk(struct sock *sk)
43{
44 return (struct pn_sock *)sk;
45}
46
47extern const struct proto_ops phonet_dgram_ops;
48
49struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *sa);
50void phonet_get_local_port_range(int *min, int *max);
51void pn_sock_hash(struct sock *sk);
52void pn_sock_unhash(struct sock *sk);
53int pn_sock_get_port(struct sock *sk, unsigned short sport);
54
55int pn_skb_send(struct sock *sk, struct sk_buff *skb,
56 const struct sockaddr_pn *target);
57
58static inline struct phonethdr *pn_hdr(struct sk_buff *skb)
59{
60 return (struct phonethdr *)skb_network_header(skb);
61}
62
63static inline struct phonetmsg *pn_msg(struct sk_buff *skb)
64{
65 return (struct phonetmsg *)skb_transport_header(skb);
66}
67
68/*
69 * Get the other party's sockaddr from received skb. The skb begins
70 * with a Phonet header.
71 */
72static inline
73void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
74{
75 struct phonethdr *ph = pn_hdr(skb);
76 u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj);
77
78 sa->spn_family = AF_PHONET;
79 pn_sockaddr_set_object(sa, obj);
80 pn_sockaddr_set_resource(sa, ph->pn_res);
81 memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
82}
83
84static inline
85void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
86{
87 struct phonethdr *ph = pn_hdr(skb);
88 u16 obj = pn_object(ph->pn_rdev, ph->pn_robj);
89
90 sa->spn_family = AF_PHONET;
91 pn_sockaddr_set_object(sa, obj);
92 pn_sockaddr_set_resource(sa, ph->pn_res);
93 memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
94}
95
96/* Protocols in Phonet protocol family. */
97struct phonet_protocol {
98 const struct proto_ops *ops;
99 struct proto *prot;
100 int sock_type;
101};
102
103int phonet_proto_register(int protocol, struct phonet_protocol *pp);
104void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
105
106int phonet_sysctl_init(void);
107void phonet_sysctl_exit(void);
108void phonet_netlink_register(void);
109int isi_register(void);
110void isi_unregister(void);
111
112#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
new file mode 100644
index 000000000000..bbd2a836e04c
--- /dev/null
+++ b/include/net/phonet/pn_dev.h
@@ -0,0 +1,50 @@
1/*
2 * File: pn_dev.h
3 *
4 * Phonet network device
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef PN_DEV_H
24#define PN_DEV_H
25
26struct phonet_device_list {
27 struct list_head list;
28 spinlock_t lock;
29};
30
31extern struct phonet_device_list pndevs;
32
33struct phonet_device {
34 struct list_head list;
35 struct net_device *netdev;
36 DECLARE_BITMAP(addrs, 64);
37};
38
39void phonet_device_init(void);
40void phonet_device_exit(void);
41struct net_device *phonet_device_get(struct net *net);
42
43int phonet_address_add(struct net_device *dev, u8 addr);
44int phonet_address_del(struct net_device *dev, u8 addr);
45u8 phonet_address_get(struct net_device *dev, u8 addr);
46int phonet_address_lookup(u8 addr);
47
48#define PN_NO_ADDR 0xff
49
50#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b786a5b09253..4082f39f5079 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,10 +90,7 @@ extern void __qdisc_run(struct Qdisc *q);
90 90
91static inline void qdisc_run(struct Qdisc *q) 91static inline void qdisc_run(struct Qdisc *q)
92{ 92{
93 struct netdev_queue *txq = q->dev_queue; 93 if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
94
95 if (!netif_tx_queue_stopped(txq) &&
96 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
97 __qdisc_run(q); 94 __qdisc_run(q);
98} 95}
99 96
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e5569625d2a5..3b983e8a0555 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,7 +52,7 @@ struct Qdisc
52 u32 parent; 52 u32 parent;
53 atomic_t refcnt; 53 atomic_t refcnt;
54 unsigned long state; 54 unsigned long state;
55 struct sk_buff *gso_skb; 55 struct sk_buff_head requeue;
56 struct sk_buff_head q; 56 struct sk_buff_head q;
57 struct netdev_queue *dev_queue; 57 struct netdev_queue *dev_queue;
58 struct Qdisc *next_sched; 58 struct Qdisc *next_sched;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 17b932b8a55a..703305d00365 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -406,10 +406,7 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
406 406
407/* A macro to walk a list of skbs. */ 407/* A macro to walk a list of skbs. */
408#define sctp_skb_for_each(pos, head, tmp) \ 408#define sctp_skb_for_each(pos, head, tmp) \
409for (pos = (head)->next;\ 409 skb_queue_walk_safe(head, pos, tmp)
410 tmp = (pos)->next, pos != ((struct sk_buff *)(head));\
411 pos = tmp)
412
413 410
414/* A helper to append an entire skb list (list) to another (head). */ 411/* A helper to append an entire skb list (list) to another (head). */
415static inline void sctp_skb_list_tail(struct sk_buff_head *list, 412static inline void sctp_skb_list_tail(struct sk_buff_head *list,
@@ -420,10 +417,7 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
420 sctp_spin_lock_irqsave(&head->lock, flags); 417 sctp_spin_lock_irqsave(&head->lock, flags);
421 sctp_spin_lock(&list->lock); 418 sctp_spin_lock(&list->lock);
422 419
423 list_splice((struct list_head *)list, (struct list_head *)head->prev); 420 skb_queue_splice_tail_init(list, head);
424
425 head->qlen += list->qlen;
426 list->qlen = 0;
427 421
428 sctp_spin_unlock(&list->lock); 422 sctp_spin_unlock(&list->lock);
429 sctp_spin_unlock_irqrestore(&head->lock, flags); 423 sctp_spin_unlock_irqrestore(&head->lock, flags);
diff --git a/include/net/sock.h b/include/net/sock.h
index 06c5259aff30..75a312d3888a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -532,6 +532,7 @@ struct proto {
532 int (*getsockopt)(struct sock *sk, int level, 532 int (*getsockopt)(struct sock *sk, int level,
533 int optname, char __user *optval, 533 int optname, char __user *optval,
534 int __user *option); 534 int __user *option);
535#ifdef CONFIG_COMPAT
535 int (*compat_setsockopt)(struct sock *sk, 536 int (*compat_setsockopt)(struct sock *sk,
536 int level, 537 int level,
537 int optname, char __user *optval, 538 int optname, char __user *optval,
@@ -540,6 +541,7 @@ struct proto {
540 int level, 541 int level,
541 int optname, char __user *optval, 542 int optname, char __user *optval,
542 int __user *option); 543 int __user *option);
544#endif
543 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 545 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
544 struct msghdr *msg, size_t len); 546 struct msghdr *msg, size_t len);
545 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 547 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
new file mode 100644
index 000000000000..6abb3ed3ebf7
--- /dev/null
+++ b/include/net/tc_act/tc_skbedit.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#ifndef __NET_TC_SKBEDIT_H
21#define __NET_TC_SKBEDIT_H
22
23#include <net/act_api.h>
24
25struct tcf_skbedit {
26 struct tcf_common common;
27 u32 flags;
28 u32 priority;
29 u16 queue_mapping;
30};
31#define to_skbedit(pc) \
32 container_of(pc, struct tcf_skbedit, common)
33
34#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8983386356a5..12c9b4fec040 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -472,6 +472,8 @@ extern void tcp_send_delayed_ack(struct sock *sk);
472 472
473/* tcp_input.c */ 473/* tcp_input.c */
474extern void tcp_cwnd_application_limited(struct sock *sk); 474extern void tcp_cwnd_application_limited(struct sock *sk);
475extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
476 struct sk_buff *skb);
475 477
476/* tcp_timer.c */ 478/* tcp_timer.c */
477extern void tcp_init_xmit_timers(struct sock *); 479extern void tcp_init_xmit_timers(struct sock *);
@@ -1039,13 +1041,12 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1039{ 1041{
1040 tp->lost_skb_hint = NULL; 1042 tp->lost_skb_hint = NULL;
1041 tp->scoreboard_skb_hint = NULL; 1043 tp->scoreboard_skb_hint = NULL;
1042 tp->retransmit_skb_hint = NULL;
1043 tp->forward_skb_hint = NULL;
1044} 1044}
1045 1045
1046static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1046static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1047{ 1047{
1048 tcp_clear_retrans_hints_partial(tp); 1048 tcp_clear_retrans_hints_partial(tp);
1049 tp->retransmit_skb_hint = NULL;
1049} 1050}
1050 1051
1051/* MD5 Signature */ 1052/* MD5 Signature */
@@ -1180,49 +1181,45 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1180 1181
1181static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) 1182static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1182{ 1183{
1183 struct sk_buff *skb = sk->sk_write_queue.next; 1184 return skb_peek(&sk->sk_write_queue);
1184 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1185 return NULL;
1186 return skb;
1187} 1185}
1188 1186
1189static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) 1187static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1190{ 1188{
1191 struct sk_buff *skb = sk->sk_write_queue.prev; 1189 return skb_peek_tail(&sk->sk_write_queue);
1192 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1193 return NULL;
1194 return skb;
1195} 1190}
1196 1191
1197static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) 1192static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1198{ 1193{
1199 return skb->next; 1194 return skb_queue_next(&sk->sk_write_queue, skb);
1200} 1195}
1201 1196
1202#define tcp_for_write_queue(skb, sk) \ 1197#define tcp_for_write_queue(skb, sk) \
1203 for (skb = (sk)->sk_write_queue.next; \ 1198 skb_queue_walk(&(sk)->sk_write_queue, skb)
1204 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1205 skb = skb->next)
1206 1199
1207#define tcp_for_write_queue_from(skb, sk) \ 1200#define tcp_for_write_queue_from(skb, sk) \
1208 for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ 1201 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1209 skb = skb->next)
1210 1202
1211#define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1203#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1212 for (tmp = skb->next; \ 1204 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1213 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1214 skb = tmp, tmp = skb->next)
1215 1205
1216static inline struct sk_buff *tcp_send_head(struct sock *sk) 1206static inline struct sk_buff *tcp_send_head(struct sock *sk)
1217{ 1207{
1218 return sk->sk_send_head; 1208 return sk->sk_send_head;
1219} 1209}
1220 1210
1211static inline bool tcp_skb_is_last(const struct sock *sk,
1212 const struct sk_buff *skb)
1213{
1214 return skb_queue_is_last(&sk->sk_write_queue, skb);
1215}
1216
1221static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) 1217static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1222{ 1218{
1223 sk->sk_send_head = skb->next; 1219 if (tcp_skb_is_last(sk, skb))
1224 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
1225 sk->sk_send_head = NULL; 1220 sk->sk_send_head = NULL;
1221 else
1222 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1226} 1223}
1227 1224
1228static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) 1225static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
@@ -1267,12 +1264,12 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1267 __skb_queue_after(&sk->sk_write_queue, skb, buff); 1264 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1268} 1265}
1269 1266
1270/* Insert skb between prev and next on the write queue of sk. */ 1267/* Insert new before skb on the write queue of sk. */
1271static inline void tcp_insert_write_queue_before(struct sk_buff *new, 1268static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1272 struct sk_buff *skb, 1269 struct sk_buff *skb,
1273 struct sock *sk) 1270 struct sock *sk)
1274{ 1271{
1275 __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); 1272 __skb_queue_before(&sk->sk_write_queue, skb, new);
1276 1273
1277 if (sk->sk_send_head == skb) 1274 if (sk->sk_send_head == skb)
1278 sk->sk_send_head = new; 1275 sk->sk_send_head = new;
@@ -1283,12 +1280,6 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1283 __skb_unlink(skb, &sk->sk_write_queue); 1280 __skb_unlink(skb, &sk->sk_write_queue);
1284} 1281}
1285 1282
1286static inline int tcp_skb_is_last(const struct sock *sk,
1287 const struct sk_buff *skb)
1288{
1289 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1290}
1291
1292static inline int tcp_write_queue_empty(struct sock *sk) 1283static inline int tcp_write_queue_empty(struct sock *sk)
1293{ 1284{
1294 return skb_queue_empty(&sk->sk_write_queue); 1285 return skb_queue_empty(&sk->sk_write_queue);
diff --git a/include/net/wireless.h b/include/net/wireless.h
index 9324f8dd183e..e4378cc6bf8e 100644
--- a/include/net/wireless.h
+++ b/include/net/wireless.h
@@ -60,6 +60,7 @@ enum ieee80211_channel_flags {
60 * with cfg80211. 60 * with cfg80211.
61 * 61 *
62 * @center_freq: center frequency in MHz 62 * @center_freq: center frequency in MHz
63 * @max_bandwidth: maximum allowed bandwidth for this channel, in MHz
63 * @hw_value: hardware-specific value for the channel 64 * @hw_value: hardware-specific value for the channel
64 * @flags: channel flags from &enum ieee80211_channel_flags. 65 * @flags: channel flags from &enum ieee80211_channel_flags.
65 * @orig_flags: channel flags at registration time, used by regulatory 66 * @orig_flags: channel flags at registration time, used by regulatory
@@ -73,6 +74,7 @@ enum ieee80211_channel_flags {
73struct ieee80211_channel { 74struct ieee80211_channel {
74 enum ieee80211_band band; 75 enum ieee80211_band band;
75 u16 center_freq; 76 u16 center_freq;
77 u8 max_bandwidth;
76 u16 hw_value; 78 u16 hw_value;
77 u32 flags; 79 u32 flags;
78 int max_antenna_gain; 80 int max_antenna_gain;
@@ -178,6 +180,7 @@ struct ieee80211_supported_band {
178 * struct wiphy - wireless hardware description 180 * struct wiphy - wireless hardware description
179 * @idx: the wiphy index assigned to this item 181 * @idx: the wiphy index assigned to this item
180 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> 182 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
183 * @reg_notifier: the driver's regulatory notification callback
181 */ 184 */
182struct wiphy { 185struct wiphy {
183 /* assign these fields before you register the wiphy */ 186 /* assign these fields before you register the wiphy */
@@ -185,6 +188,9 @@ struct wiphy {
185 /* permanent MAC address */ 188 /* permanent MAC address */
186 u8 perm_addr[ETH_ALEN]; 189 u8 perm_addr[ETH_ALEN];
187 190
191 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
192 u16 interface_modes;
193
188 /* If multiple wiphys are registered and you're handed e.g. 194 /* If multiple wiphys are registered and you're handed e.g.
189 * a regular netdev with assigned ieee80211_ptr, you won't 195 * a regular netdev with assigned ieee80211_ptr, you won't
190 * know whether it points to a wiphy your driver has registered 196 * know whether it points to a wiphy your driver has registered
@@ -194,6 +200,9 @@ struct wiphy {
194 200
195 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; 201 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
196 202
203 /* Lets us get back the wiphy on the callback */
204 int (*reg_notifier)(struct wiphy *wiphy, enum reg_set_by setby);
205
197 /* fields below are read-only, assigned by cfg80211 */ 206 /* fields below are read-only, assigned by cfg80211 */
198 207
199 /* the item in /sys/class/ieee80211/ points to this, 208 /* the item in /sys/class/ieee80211/ points to this,
@@ -319,6 +328,58 @@ extern int ieee80211_frequency_to_channel(int freq);
319 */ 328 */
320extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, 329extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
321 int freq); 330 int freq);
331/**
332 * __regulatory_hint - hint to the wireless core a regulatory domain
333 * @wiphy: if a driver is providing the hint this is the driver's very
334 * own &struct wiphy
335 * @alpha2: the ISO/IEC 3166 alpha2 being claimed the regulatory domain
336 * should be in. If @rd is set this should be NULL
337 * @rd: a complete regulatory domain, if passed the caller need not worry
338 * about freeing it
339 *
340 * The Wireless subsystem can use this function to hint to the wireless core
341 * what it believes should be the current regulatory domain by
342 * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
343 * domain should be in or by providing a completely build regulatory domain.
344 *
345 * Returns -EALREADY if *a regulatory domain* has already been set. Note that
346 * this could be by another driver. It is safe for drivers to continue if
347 * -EALREADY is returned, if drivers are not capable of world roaming they
348 * should not register more channels than they support. Right now we only
349 * support listening to the first driver hint. If the driver is capable
350 * of world roaming but wants to respect its own EEPROM mappings for
351 * specific regulatory domains it should register the @reg_notifier callback
352 * on the &struct wiphy. Returns 0 if the hint went through fine or through an
353 * intersection operation. Otherwise a standard error code is returned.
354 *
355 */
356extern int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
357 const char *alpha2, struct ieee80211_regdomain *rd);
358/**
359 * regulatory_hint - driver hint to the wireless core a regulatory domain
360 * @wiphy: the driver's very own &struct wiphy
361 * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
362 * should be in. If @rd is set this should be NULL. Note that if you
363 * set this to NULL you should still set rd->alpha2 to some accepted
364 * alpha2.
365 * @rd: a complete regulatory domain provided by the driver. If passed
366 * the driver does not need to worry about freeing it.
367 *
368 * Wireless drivers can use this function to hint to the wireless core
369 * what it believes should be the current regulatory domain by
370 * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
371 * domain should be in or by providing a completely build regulatory domain.
372 * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried
373 * for a regulatory domain structure for the respective country. If
374 * a regulatory domain is build and passed you should set the alpha2
375 * if possible, otherwise set it to the special value of "99" which tells
376 * the wireless core it is unknown. If you pass a built regulatory domain
377 * and we return non zero you are in charge of kfree()'ing the structure.
378 *
379 * See __regulatory_hint() documentation for possible return values.
380 */
381extern int regulatory_hint(struct wiphy *wiphy,
382 const char *alpha2, struct ieee80211_regdomain *rd);
322 383
323/** 384/**
324 * ieee80211_get_channel - get channel struct from wiphy for specified frequency 385 * ieee80211_get_channel - get channel struct from wiphy for specified frequency
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2933d7474a79..48630b266593 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -120,9 +120,11 @@ extern struct mutex xfrm_cfg_mutex;
120/* Full description of state of transformer. */ 120/* Full description of state of transformer. */
121struct xfrm_state 121struct xfrm_state
122{ 122{
123 /* Note: bydst is re-used during gc */
124 struct list_head all; 123 struct list_head all;
125 struct hlist_node bydst; 124 union {
125 struct list_head gclist;
126 struct hlist_node bydst;
127 };
126 struct hlist_node bysrc; 128 struct hlist_node bysrc;
127 struct hlist_node byspi; 129 struct hlist_node byspi;
128 130
@@ -1244,6 +1246,8 @@ struct xfrm6_tunnel {
1244}; 1246};
1245 1247
1246struct xfrm_state_walk { 1248struct xfrm_state_walk {
1249 struct list_head list;
1250 unsigned long genid;
1247 struct xfrm_state *state; 1251 struct xfrm_state *state;
1248 int count; 1252 int count;
1249 u8 proto; 1253 u8 proto;
@@ -1279,23 +1283,10 @@ static inline void xfrm6_fini(void)
1279extern int xfrm_proc_init(void); 1283extern int xfrm_proc_init(void);
1280#endif 1284#endif
1281 1285
1282static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) 1286extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1283{
1284 walk->proto = proto;
1285 walk->state = NULL;
1286 walk->count = 0;
1287}
1288
1289static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1290{
1291 if (walk->state != NULL) {
1292 xfrm_state_put(walk->state);
1293 walk->state = NULL;
1294 }
1295}
1296
1297extern int xfrm_state_walk(struct xfrm_state_walk *walk, 1287extern int xfrm_state_walk(struct xfrm_state_walk *walk,
1298 int (*func)(struct xfrm_state *, int, void*), void *); 1288 int (*func)(struct xfrm_state *, int, void*), void *);
1289extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1299extern struct xfrm_state *xfrm_state_alloc(void); 1290extern struct xfrm_state *xfrm_state_alloc(void);
1300extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 1291extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1301 struct flowi *fl, struct xfrm_tmpl *tmpl, 1292 struct flowi *fl, struct xfrm_tmpl *tmpl,
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 972f8e61d36a..59cedfb040e7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -243,10 +243,11 @@ static inline int open_arg(int flags, int mask)
243 243
244static int audit_match_perm(struct audit_context *ctx, int mask) 244static int audit_match_perm(struct audit_context *ctx, int mask)
245{ 245{
246 unsigned n;
246 if (unlikely(!ctx)) 247 if (unlikely(!ctx))
247 return 0; 248 return 0;
248 249
249 unsigned n = ctx->major; 250 n = ctx->major;
250 switch (audit_classify_syscall(ctx->arch, n)) { 251 switch (audit_classify_syscall(ctx->arch, n)) {
251 case 0: /* native */ 252 case 0: /* native */
252 if ((mask & AUDIT_PERM_WRITE) && 253 if ((mask & AUDIT_PERM_WRITE) &&
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d5ab79cf516d..f227bc172690 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -14,6 +14,8 @@
14 * 2003-10-22 Updates by Stephen Hemminger. 14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson. 15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups 16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
17 * 19 *
18 * This file is subject to the terms and conditions of the GNU General Public 20 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file COPYING in the main directory of the Linux 21 * License. See the file COPYING in the main directory of the Linux
@@ -236,9 +238,11 @@ static struct cpuset top_cpuset = {
236 238
237static DEFINE_MUTEX(callback_mutex); 239static DEFINE_MUTEX(callback_mutex);
238 240
239/* This is ugly, but preserves the userspace API for existing cpuset 241/*
242 * This is ugly, but preserves the userspace API for existing cpuset
240 * users. If someone tries to mount the "cpuset" filesystem, we 243 * users. If someone tries to mount the "cpuset" filesystem, we
241 * silently switch it to mount "cgroup" instead */ 244 * silently switch it to mount "cgroup" instead
245 */
242static int cpuset_get_sb(struct file_system_type *fs_type, 246static int cpuset_get_sb(struct file_system_type *fs_type,
243 int flags, const char *unused_dev_name, 247 int flags, const char *unused_dev_name,
244 void *data, struct vfsmount *mnt) 248 void *data, struct vfsmount *mnt)
@@ -473,10 +477,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
473} 477}
474 478
475/* 479/*
476 * Helper routine for rebuild_sched_domains(). 480 * Helper routine for generate_sched_domains().
477 * Do cpusets a, b have overlapping cpus_allowed masks? 481 * Do cpusets a, b have overlapping cpus_allowed masks?
478 */ 482 */
479
480static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 483static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
481{ 484{
482 return cpus_intersects(a->cpus_allowed, b->cpus_allowed); 485 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
@@ -518,26 +521,15 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
518} 521}
519 522
520/* 523/*
521 * rebuild_sched_domains() 524 * generate_sched_domains()
522 * 525 *
523 * This routine will be called to rebuild the scheduler's dynamic 526 * This function builds a partial partition of the systems CPUs
524 * sched domains: 527 * A 'partial partition' is a set of non-overlapping subsets whose
525 * - if the flag 'sched_load_balance' of any cpuset with non-empty 528 * union is a subset of that set.
526 * 'cpus' changes, 529 * The output of this function needs to be passed to kernel/sched.c
527 * - or if the 'cpus' allowed changes in any cpuset which has that 530 * partition_sched_domains() routine, which will rebuild the scheduler's
528 * flag enabled, 531 * load balancing domains (sched domains) as specified by that partial
529 * - or if the 'sched_relax_domain_level' of any cpuset which has 532 * partition.
530 * that flag enabled and with non-empty 'cpus' changes,
531 * - or if any cpuset with non-empty 'cpus' is removed,
532 * - or if a cpu gets offlined.
533 *
534 * This routine builds a partial partition of the systems CPUs
535 * (the set of non-overlappping cpumask_t's in the array 'part'
536 * below), and passes that partial partition to the kernel/sched.c
537 * partition_sched_domains() routine, which will rebuild the
538 * schedulers load balancing domains (sched domains) as specified
539 * by that partial partition. A 'partial partition' is a set of
540 * non-overlapping subsets whose union is a subset of that set.
541 * 533 *
542 * See "What is sched_load_balance" in Documentation/cpusets.txt 534 * See "What is sched_load_balance" in Documentation/cpusets.txt
543 * for a background explanation of this. 535 * for a background explanation of this.
@@ -547,13 +539,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
547 * domains when operating in the severe memory shortage situations 539 * domains when operating in the severe memory shortage situations
548 * that could cause allocation failures below. 540 * that could cause allocation failures below.
549 * 541 *
550 * Call with cgroup_mutex held. May take callback_mutex during 542 * Must be called with cgroup_lock held.
551 * call due to the kfifo_alloc() and kmalloc() calls. May nest
552 * a call to the get_online_cpus()/put_online_cpus() pair.
553 * Must not be called holding callback_mutex, because we must not
554 * call get_online_cpus() while holding callback_mutex. Elsewhere
555 * the kernel nests callback_mutex inside get_online_cpus() calls.
556 * So the reverse nesting would risk an ABBA deadlock.
557 * 543 *
558 * The three key local variables below are: 544 * The three key local variables below are:
559 * q - a linked-list queue of cpuset pointers, used to implement a 545 * q - a linked-list queue of cpuset pointers, used to implement a
@@ -588,10 +574,10 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
588 * element of the partition (one sched domain) to be passed to 574 * element of the partition (one sched domain) to be passed to
589 * partition_sched_domains(). 575 * partition_sched_domains().
590 */ 576 */
591 577static int generate_sched_domains(cpumask_t **domains,
592void rebuild_sched_domains(void) 578 struct sched_domain_attr **attributes)
593{ 579{
594 LIST_HEAD(q); /* queue of cpusets to be scanned*/ 580 LIST_HEAD(q); /* queue of cpusets to be scanned */
595 struct cpuset *cp; /* scans q */ 581 struct cpuset *cp; /* scans q */
596 struct cpuset **csa; /* array of all cpuset ptrs */ 582 struct cpuset **csa; /* array of all cpuset ptrs */
597 int csn; /* how many cpuset ptrs in csa so far */ 583 int csn; /* how many cpuset ptrs in csa so far */
@@ -601,23 +587,26 @@ void rebuild_sched_domains(void)
601 int ndoms; /* number of sched domains in result */ 587 int ndoms; /* number of sched domains in result */
602 int nslot; /* next empty doms[] cpumask_t slot */ 588 int nslot; /* next empty doms[] cpumask_t slot */
603 589
604 csa = NULL; 590 ndoms = 0;
605 doms = NULL; 591 doms = NULL;
606 dattr = NULL; 592 dattr = NULL;
593 csa = NULL;
607 594
608 /* Special case for the 99% of systems with one, full, sched domain */ 595 /* Special case for the 99% of systems with one, full, sched domain */
609 if (is_sched_load_balance(&top_cpuset)) { 596 if (is_sched_load_balance(&top_cpuset)) {
610 ndoms = 1;
611 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 597 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
612 if (!doms) 598 if (!doms)
613 goto rebuild; 599 goto done;
600
614 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 601 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
615 if (dattr) { 602 if (dattr) {
616 *dattr = SD_ATTR_INIT; 603 *dattr = SD_ATTR_INIT;
617 update_domain_attr_tree(dattr, &top_cpuset); 604 update_domain_attr_tree(dattr, &top_cpuset);
618 } 605 }
619 *doms = top_cpuset.cpus_allowed; 606 *doms = top_cpuset.cpus_allowed;
620 goto rebuild; 607
608 ndoms = 1;
609 goto done;
621 } 610 }
622 611
623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 612 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
@@ -680,61 +669,141 @@ restart:
680 } 669 }
681 } 670 }
682 671
683 /* Convert <csn, csa> to <ndoms, doms> */ 672 /*
673 * Now we know how many domains to create.
674 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
675 */
684 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); 676 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
685 if (!doms) 677 if (!doms) {
686 goto rebuild; 678 ndoms = 0;
679 goto done;
680 }
681
682 /*
683 * The rest of the code, including the scheduler, can deal with
684 * dattr==NULL case. No need to abort if alloc fails.
685 */
687 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); 686 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
688 687
689 for (nslot = 0, i = 0; i < csn; i++) { 688 for (nslot = 0, i = 0; i < csn; i++) {
690 struct cpuset *a = csa[i]; 689 struct cpuset *a = csa[i];
690 cpumask_t *dp;
691 int apn = a->pn; 691 int apn = a->pn;
692 692
693 if (apn >= 0) { 693 if (apn < 0) {
694 cpumask_t *dp = doms + nslot; 694 /* Skip completed partitions */
695 695 continue;
696 if (nslot == ndoms) { 696 }
697 static int warnings = 10; 697
698 if (warnings) { 698 dp = doms + nslot;
699 printk(KERN_WARNING 699
700 "rebuild_sched_domains confused:" 700 if (nslot == ndoms) {
701 " nslot %d, ndoms %d, csn %d, i %d," 701 static int warnings = 10;
702 " apn %d\n", 702 if (warnings) {
703 nslot, ndoms, csn, i, apn); 703 printk(KERN_WARNING
704 warnings--; 704 "rebuild_sched_domains confused:"
705 } 705 " nslot %d, ndoms %d, csn %d, i %d,"
706 continue; 706 " apn %d\n",
707 nslot, ndoms, csn, i, apn);
708 warnings--;
707 } 709 }
710 continue;
711 }
708 712
709 cpus_clear(*dp); 713 cpus_clear(*dp);
710 if (dattr) 714 if (dattr)
711 *(dattr + nslot) = SD_ATTR_INIT; 715 *(dattr + nslot) = SD_ATTR_INIT;
712 for (j = i; j < csn; j++) { 716 for (j = i; j < csn; j++) {
713 struct cpuset *b = csa[j]; 717 struct cpuset *b = csa[j];
714 718
715 if (apn == b->pn) { 719 if (apn == b->pn) {
716 cpus_or(*dp, *dp, b->cpus_allowed); 720 cpus_or(*dp, *dp, b->cpus_allowed);
717 b->pn = -1; 721 if (dattr)
718 if (dattr) 722 update_domain_attr_tree(dattr + nslot, b);
719 update_domain_attr_tree(dattr 723
720 + nslot, b); 724 /* Done with this partition */
721 } 725 b->pn = -1;
722 } 726 }
723 nslot++;
724 } 727 }
728 nslot++;
725 } 729 }
726 BUG_ON(nslot != ndoms); 730 BUG_ON(nslot != ndoms);
727 731
728rebuild: 732done:
729 /* Have scheduler rebuild sched domains */ 733 kfree(csa);
734
735 *domains = doms;
736 *attributes = dattr;
737 return ndoms;
738}
739
740/*
741 * Rebuild scheduler domains.
742 *
743 * Call with neither cgroup_mutex held nor within get_online_cpus().
744 * Takes both cgroup_mutex and get_online_cpus().
745 *
746 * Cannot be directly called from cpuset code handling changes
747 * to the cpuset pseudo-filesystem, because it cannot be called
748 * from code that already holds cgroup_mutex.
749 */
750static void do_rebuild_sched_domains(struct work_struct *unused)
751{
752 struct sched_domain_attr *attr;
753 cpumask_t *doms;
754 int ndoms;
755
730 get_online_cpus(); 756 get_online_cpus();
731 partition_sched_domains(ndoms, doms, dattr); 757
758 /* Generate domain masks and attrs */
759 cgroup_lock();
760 ndoms = generate_sched_domains(&doms, &attr);
761 cgroup_unlock();
762
763 /* Have scheduler rebuild the domains */
764 partition_sched_domains(ndoms, doms, attr);
765
732 put_online_cpus(); 766 put_online_cpus();
767}
733 768
734done: 769static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
735 kfree(csa); 770
736 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 771/*
737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 772 * Rebuild scheduler domains, asynchronously via workqueue.
773 *
774 * If the flag 'sched_load_balance' of any cpuset with non-empty
775 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
776 * which has that flag enabled, or if any cpuset with a non-empty
777 * 'cpus' is removed, then call this routine to rebuild the
778 * scheduler's dynamic sched domains.
779 *
780 * The rebuild_sched_domains() and partition_sched_domains()
781 * routines must nest cgroup_lock() inside get_online_cpus(),
782 * but such cpuset changes as these must nest that locking the
783 * other way, holding cgroup_lock() for much of the code.
784 *
785 * So in order to avoid an ABBA deadlock, the cpuset code handling
786 * these user changes delegates the actual sched domain rebuilding
787 * to a separate workqueue thread, which ends up processing the
788 * above do_rebuild_sched_domains() function.
789 */
790static void async_rebuild_sched_domains(void)
791{
792 schedule_work(&rebuild_sched_domains_work);
793}
794
795/*
796 * Accomplishes the same scheduler domain rebuild as the above
797 * async_rebuild_sched_domains(), however it directly calls the
798 * rebuild routine synchronously rather than calling it via an
799 * asynchronous work thread.
800 *
801 * This can only be called from code that is not holding
802 * cgroup_mutex (not nested in a cgroup_lock() call.)
803 */
804void rebuild_sched_domains(void)
805{
806 do_rebuild_sched_domains(NULL);
738} 807}
739 808
740/** 809/**
@@ -863,7 +932,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
863 return retval; 932 return retval;
864 933
865 if (is_load_balanced) 934 if (is_load_balanced)
866 rebuild_sched_domains(); 935 async_rebuild_sched_domains();
867 return 0; 936 return 0;
868} 937}
869 938
@@ -1090,7 +1159,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1090 if (val != cs->relax_domain_level) { 1159 if (val != cs->relax_domain_level) {
1091 cs->relax_domain_level = val; 1160 cs->relax_domain_level = val;
1092 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) 1161 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
1093 rebuild_sched_domains(); 1162 async_rebuild_sched_domains();
1094 } 1163 }
1095 1164
1096 return 0; 1165 return 0;
@@ -1131,7 +1200,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1131 mutex_unlock(&callback_mutex); 1200 mutex_unlock(&callback_mutex);
1132 1201
1133 if (cpus_nonempty && balance_flag_changed) 1202 if (cpus_nonempty && balance_flag_changed)
1134 rebuild_sched_domains(); 1203 async_rebuild_sched_domains();
1135 1204
1136 return 0; 1205 return 0;
1137} 1206}
@@ -1492,6 +1561,9 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1492 default: 1561 default:
1493 BUG(); 1562 BUG();
1494 } 1563 }
1564
1565 /* Unreachable but makes gcc happy */
1566 return 0;
1495} 1567}
1496 1568
1497static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) 1569static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
@@ -1504,6 +1576,9 @@ static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1504 default: 1576 default:
1505 BUG(); 1577 BUG();
1506 } 1578 }
1579
1580 /* Unrechable but makes gcc happy */
1581 return 0;
1507} 1582}
1508 1583
1509 1584
@@ -1692,15 +1767,9 @@ static struct cgroup_subsys_state *cpuset_create(
1692} 1767}
1693 1768
1694/* 1769/*
1695 * Locking note on the strange update_flag() call below:
1696 *
1697 * If the cpuset being removed has its flag 'sched_load_balance' 1770 * If the cpuset being removed has its flag 'sched_load_balance'
1698 * enabled, then simulate turning sched_load_balance off, which 1771 * enabled, then simulate turning sched_load_balance off, which
1699 * will call rebuild_sched_domains(). The get_online_cpus() 1772 * will call async_rebuild_sched_domains().
1700 * call in rebuild_sched_domains() must not be made while holding
1701 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
1702 * get_online_cpus() calls. So the reverse nesting would risk an
1703 * ABBA deadlock.
1704 */ 1773 */
1705 1774
1706static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) 1775static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
@@ -1719,7 +1788,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1719struct cgroup_subsys cpuset_subsys = { 1788struct cgroup_subsys cpuset_subsys = {
1720 .name = "cpuset", 1789 .name = "cpuset",
1721 .create = cpuset_create, 1790 .create = cpuset_create,
1722 .destroy = cpuset_destroy, 1791 .destroy = cpuset_destroy,
1723 .can_attach = cpuset_can_attach, 1792 .can_attach = cpuset_can_attach,
1724 .attach = cpuset_attach, 1793 .attach = cpuset_attach,
1725 .populate = cpuset_populate, 1794 .populate = cpuset_populate,
@@ -1811,7 +1880,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1811} 1880}
1812 1881
1813/* 1882/*
1814 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs 1883 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1815 * or memory nodes, we need to walk over the cpuset hierarchy, 1884 * or memory nodes, we need to walk over the cpuset hierarchy,
1816 * removing that CPU or node from all cpusets. If this removes the 1885 * removing that CPU or node from all cpusets. If this removes the
1817 * last CPU or node from a cpuset, then move the tasks in the empty 1886 * last CPU or node from a cpuset, then move the tasks in the empty
@@ -1903,35 +1972,6 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1903} 1972}
1904 1973
1905/* 1974/*
1906 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
1907 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
1908 * track what's online after any CPU or memory node hotplug or unplug event.
1909 *
1910 * Since there are two callers of this routine, one for CPU hotplug
1911 * events and one for memory node hotplug events, we could have coded
1912 * two separate routines here. We code it as a single common routine
1913 * in order to minimize text size.
1914 */
1915
1916static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1917{
1918 cgroup_lock();
1919
1920 top_cpuset.cpus_allowed = cpu_online_map;
1921 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1922 scan_for_empty_cpusets(&top_cpuset);
1923
1924 /*
1925 * Scheduler destroys domains on hotplug events.
1926 * Rebuild them based on the current settings.
1927 */
1928 if (rebuild_sd)
1929 rebuild_sched_domains();
1930
1931 cgroup_unlock();
1932}
1933
1934/*
1935 * The top_cpuset tracks what CPUs and Memory Nodes are online, 1975 * The top_cpuset tracks what CPUs and Memory Nodes are online,
1936 * period. This is necessary in order to make cpusets transparent 1976 * period. This is necessary in order to make cpusets transparent
1937 * (of no affect) on systems that are actively using CPU hotplug 1977 * (of no affect) on systems that are actively using CPU hotplug
@@ -1939,40 +1979,52 @@ static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
1939 * 1979 *
1940 * This routine ensures that top_cpuset.cpus_allowed tracks 1980 * This routine ensures that top_cpuset.cpus_allowed tracks
1941 * cpu_online_map on each CPU hotplug (cpuhp) event. 1981 * cpu_online_map on each CPU hotplug (cpuhp) event.
1982 *
1983 * Called within get_online_cpus(). Needs to call cgroup_lock()
1984 * before calling generate_sched_domains().
1942 */ 1985 */
1943 1986static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
1944static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1945 unsigned long phase, void *unused_cpu) 1987 unsigned long phase, void *unused_cpu)
1946{ 1988{
1989 struct sched_domain_attr *attr;
1990 cpumask_t *doms;
1991 int ndoms;
1992
1947 switch (phase) { 1993 switch (phase) {
1948 case CPU_UP_CANCELED:
1949 case CPU_UP_CANCELED_FROZEN:
1950 case CPU_DOWN_FAILED:
1951 case CPU_DOWN_FAILED_FROZEN:
1952 case CPU_ONLINE: 1994 case CPU_ONLINE:
1953 case CPU_ONLINE_FROZEN: 1995 case CPU_ONLINE_FROZEN:
1954 case CPU_DEAD: 1996 case CPU_DEAD:
1955 case CPU_DEAD_FROZEN: 1997 case CPU_DEAD_FROZEN:
1956 common_cpu_mem_hotplug_unplug(1);
1957 break; 1998 break;
1999
1958 default: 2000 default:
1959 return NOTIFY_DONE; 2001 return NOTIFY_DONE;
1960 } 2002 }
1961 2003
2004 cgroup_lock();
2005 top_cpuset.cpus_allowed = cpu_online_map;
2006 scan_for_empty_cpusets(&top_cpuset);
2007 ndoms = generate_sched_domains(&doms, &attr);
2008 cgroup_unlock();
2009
2010 /* Have scheduler rebuild the domains */
2011 partition_sched_domains(ndoms, doms, attr);
2012
1962 return NOTIFY_OK; 2013 return NOTIFY_OK;
1963} 2014}
1964 2015
1965#ifdef CONFIG_MEMORY_HOTPLUG 2016#ifdef CONFIG_MEMORY_HOTPLUG
1966/* 2017/*
1967 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. 2018 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
1968 * Call this routine anytime after you change 2019 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
1969 * node_states[N_HIGH_MEMORY]. 2020 * See also the previous routine cpuset_track_online_cpus().
1970 * See also the previous routine cpuset_handle_cpuhp().
1971 */ 2021 */
1972
1973void cpuset_track_online_nodes(void) 2022void cpuset_track_online_nodes(void)
1974{ 2023{
1975 common_cpu_mem_hotplug_unplug(0); 2024 cgroup_lock();
2025 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2026 scan_for_empty_cpusets(&top_cpuset);
2027 cgroup_unlock();
1976} 2028}
1977#endif 2029#endif
1978 2030
@@ -1987,7 +2039,7 @@ void __init cpuset_init_smp(void)
1987 top_cpuset.cpus_allowed = cpu_online_map; 2039 top_cpuset.cpus_allowed = cpu_online_map;
1988 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2040 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1989 2041
1990 hotcpu_notifier(cpuset_handle_cpuhp, 0); 2042 hotcpu_notifier(cpuset_track_online_cpus, 0);
1991} 2043}
1992 2044
1993/** 2045/**
diff --git a/kernel/exit.c b/kernel/exit.c
index 75c647387639..16395644a98f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -112,9 +112,9 @@ static void __exit_signal(struct task_struct *tsk)
112 * We won't ever get here for the group leader, since it 112 * We won't ever get here for the group leader, since it
113 * will have been the last reference on the signal_struct. 113 * will have been the last reference on the signal_struct.
114 */ 114 */
115 sig->utime = cputime_add(sig->utime, tsk->utime); 115 sig->utime = cputime_add(sig->utime, task_utime(tsk));
116 sig->stime = cputime_add(sig->stime, tsk->stime); 116 sig->stime = cputime_add(sig->stime, task_stime(tsk));
117 sig->gtime = cputime_add(sig->gtime, tsk->gtime); 117 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
118 sig->min_flt += tsk->min_flt; 118 sig->min_flt += tsk->min_flt;
119 sig->maj_flt += tsk->maj_flt; 119 sig->maj_flt += tsk->maj_flt;
120 sig->nvcsw += tsk->nvcsw; 120 sig->nvcsw += tsk->nvcsw;
@@ -831,26 +831,50 @@ static void reparent_thread(struct task_struct *p, struct task_struct *father)
831 * the child reaper process (ie "init") in our pid 831 * the child reaper process (ie "init") in our pid
832 * space. 832 * space.
833 */ 833 */
834static struct task_struct *find_new_reaper(struct task_struct *father)
835{
836 struct pid_namespace *pid_ns = task_active_pid_ns(father);
837 struct task_struct *thread;
838
839 thread = father;
840 while_each_thread(father, thread) {
841 if (thread->flags & PF_EXITING)
842 continue;
843 if (unlikely(pid_ns->child_reaper == father))
844 pid_ns->child_reaper = thread;
845 return thread;
846 }
847
848 if (unlikely(pid_ns->child_reaper == father)) {
849 write_unlock_irq(&tasklist_lock);
850 if (unlikely(pid_ns == &init_pid_ns))
851 panic("Attempted to kill init!");
852
853 zap_pid_ns_processes(pid_ns);
854 write_lock_irq(&tasklist_lock);
855 /*
856 * We can not clear ->child_reaper or leave it alone.
857 * There may by stealth EXIT_DEAD tasks on ->children,
858 * forget_original_parent() must move them somewhere.
859 */
860 pid_ns->child_reaper = init_pid_ns.child_reaper;
861 }
862
863 return pid_ns->child_reaper;
864}
865
834static void forget_original_parent(struct task_struct *father) 866static void forget_original_parent(struct task_struct *father)
835{ 867{
836 struct task_struct *p, *n, *reaper = father; 868 struct task_struct *p, *n, *reaper;
837 LIST_HEAD(ptrace_dead); 869 LIST_HEAD(ptrace_dead);
838 870
839 write_lock_irq(&tasklist_lock); 871 write_lock_irq(&tasklist_lock);
840 872 reaper = find_new_reaper(father);
841 /* 873 /*
842 * First clean up ptrace if we were using it. 874 * First clean up ptrace if we were using it.
843 */ 875 */
844 ptrace_exit(father, &ptrace_dead); 876 ptrace_exit(father, &ptrace_dead);
845 877
846 do {
847 reaper = next_thread(reaper);
848 if (reaper == father) {
849 reaper = task_child_reaper(father);
850 break;
851 }
852 } while (reaper->flags & PF_EXITING);
853
854 list_for_each_entry_safe(p, n, &father->children, sibling) { 878 list_for_each_entry_safe(p, n, &father->children, sibling) {
855 p->real_parent = reaper; 879 p->real_parent = reaper;
856 if (p->parent == father) { 880 if (p->parent == father) {
@@ -959,39 +983,6 @@ static void check_stack_usage(void)
959static inline void check_stack_usage(void) {} 983static inline void check_stack_usage(void) {}
960#endif 984#endif
961 985
962static inline void exit_child_reaper(struct task_struct *tsk)
963{
964 if (likely(tsk->group_leader != task_child_reaper(tsk)))
965 return;
966
967 if (tsk->nsproxy->pid_ns == &init_pid_ns)
968 panic("Attempted to kill init!");
969
970 /*
971 * @tsk is the last thread in the 'cgroup-init' and is exiting.
972 * Terminate all remaining processes in the namespace and reap them
973 * before exiting @tsk.
974 *
975 * Note that @tsk (last thread of cgroup-init) may not necessarily
976 * be the child-reaper (i.e main thread of cgroup-init) of the
977 * namespace i.e the child_reaper may have already exited.
978 *
979 * Even after a child_reaper exits, we let it inherit orphaned children,
980 * because, pid_ns->child_reaper remains valid as long as there is
981 * at least one living sub-thread in the cgroup init.
982
983 * This living sub-thread of the cgroup-init will be notified when
984 * a child inherited by the 'child-reaper' exits (do_notify_parent()
985 * uses __group_send_sig_info()). Further, when reaping child processes,
986 * do_wait() iterates over children of all living sub threads.
987
988 * i.e even though 'child_reaper' thread is listed as the parent of the
989 * orphaned children, any living sub-thread in the cgroup-init can
990 * perform the role of the child_reaper.
991 */
992 zap_pid_ns_processes(tsk->nsproxy->pid_ns);
993}
994
995NORET_TYPE void do_exit(long code) 986NORET_TYPE void do_exit(long code)
996{ 987{
997 struct task_struct *tsk = current; 988 struct task_struct *tsk = current;
@@ -1051,7 +1042,6 @@ NORET_TYPE void do_exit(long code)
1051 } 1042 }
1052 group_dead = atomic_dec_and_test(&tsk->signal->live); 1043 group_dead = atomic_dec_and_test(&tsk->signal->live);
1053 if (group_dead) { 1044 if (group_dead) {
1054 exit_child_reaper(tsk);
1055 hrtimer_cancel(&tsk->signal->real_timer); 1045 hrtimer_cancel(&tsk->signal->real_timer);
1056 exit_itimers(tsk->signal); 1046 exit_itimers(tsk->signal);
1057 } 1047 }
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index ea567b78d1aa..fab8ea86fac3 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -179,9 +179,6 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
179 rc = sys_wait4(-1, NULL, __WALL, NULL); 179 rc = sys_wait4(-1, NULL, __WALL, NULL);
180 } while (rc != -ECHILD); 180 } while (rc != -ECHILD);
181 181
182
183 /* Child reaper for the pid namespace is going away */
184 pid_ns->child_reaper = NULL;
185 acct_exit_ns(pid_ns); 182 acct_exit_ns(pid_ns);
186 return; 183 return;
187} 184}
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index da9c2dda6a4e..dfdec524d1b7 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -43,7 +43,7 @@
43#include <linux/uaccess.h> 43#include <linux/uaccess.h>
44 44
45/* 45/*
46 * locking rule: all changes to target_value or requirements or notifiers lists 46 * locking rule: all changes to requirements or notifiers lists
47 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock 47 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
48 * held, taken with _irqsave. One lock to rule them all 48 * held, taken with _irqsave. One lock to rule them all
49 */ 49 */
@@ -66,7 +66,7 @@ struct pm_qos_object {
66 struct miscdevice pm_qos_power_miscdev; 66 struct miscdevice pm_qos_power_miscdev;
67 char *name; 67 char *name;
68 s32 default_value; 68 s32 default_value;
69 s32 target_value; 69 atomic_t target_value;
70 s32 (*comparitor)(s32, s32); 70 s32 (*comparitor)(s32, s32);
71}; 71};
72 72
@@ -77,7 +77,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
77 .notifiers = &cpu_dma_lat_notifier, 77 .notifiers = &cpu_dma_lat_notifier,
78 .name = "cpu_dma_latency", 78 .name = "cpu_dma_latency",
79 .default_value = 2000 * USEC_PER_SEC, 79 .default_value = 2000 * USEC_PER_SEC,
80 .target_value = 2000 * USEC_PER_SEC, 80 .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
81 .comparitor = min_compare 81 .comparitor = min_compare
82}; 82};
83 83
@@ -87,7 +87,7 @@ static struct pm_qos_object network_lat_pm_qos = {
87 .notifiers = &network_lat_notifier, 87 .notifiers = &network_lat_notifier,
88 .name = "network_latency", 88 .name = "network_latency",
89 .default_value = 2000 * USEC_PER_SEC, 89 .default_value = 2000 * USEC_PER_SEC,
90 .target_value = 2000 * USEC_PER_SEC, 90 .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC),
91 .comparitor = min_compare 91 .comparitor = min_compare
92}; 92};
93 93
@@ -99,7 +99,7 @@ static struct pm_qos_object network_throughput_pm_qos = {
99 .notifiers = &network_throughput_notifier, 99 .notifiers = &network_throughput_notifier,
100 .name = "network_throughput", 100 .name = "network_throughput",
101 .default_value = 0, 101 .default_value = 0,
102 .target_value = 0, 102 .target_value = ATOMIC_INIT(0),
103 .comparitor = max_compare 103 .comparitor = max_compare
104}; 104};
105 105
@@ -150,11 +150,11 @@ static void update_target(int target)
150 extreme_value = pm_qos_array[target]->comparitor( 150 extreme_value = pm_qos_array[target]->comparitor(
151 extreme_value, node->value); 151 extreme_value, node->value);
152 } 152 }
153 if (pm_qos_array[target]->target_value != extreme_value) { 153 if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) {
154 call_notifier = 1; 154 call_notifier = 1;
155 pm_qos_array[target]->target_value = extreme_value; 155 atomic_set(&pm_qos_array[target]->target_value, extreme_value);
156 pr_debug(KERN_ERR "new target for qos %d is %d\n", target, 156 pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
157 pm_qos_array[target]->target_value); 157 atomic_read(&pm_qos_array[target]->target_value));
158 } 158 }
159 spin_unlock_irqrestore(&pm_qos_lock, flags); 159 spin_unlock_irqrestore(&pm_qos_lock, flags);
160 160
@@ -193,14 +193,7 @@ static int find_pm_qos_object_by_minor(int minor)
193 */ 193 */
194int pm_qos_requirement(int pm_qos_class) 194int pm_qos_requirement(int pm_qos_class)
195{ 195{
196 int ret_val; 196 return atomic_read(&pm_qos_array[pm_qos_class]->target_value);
197 unsigned long flags;
198
199 spin_lock_irqsave(&pm_qos_lock, flags);
200 ret_val = pm_qos_array[pm_qos_class]->target_value;
201 spin_unlock_irqrestore(&pm_qos_lock, flags);
202
203 return ret_val;
204} 197}
205EXPORT_SYMBOL_GPL(pm_qos_requirement); 198EXPORT_SYMBOL_GPL(pm_qos_requirement);
206 199
diff --git a/kernel/resource.c b/kernel/resource.c
index f5b518eabefe..03d796c1b2e9 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -362,35 +362,21 @@ int allocate_resource(struct resource *root, struct resource *new,
362 362
363EXPORT_SYMBOL(allocate_resource); 363EXPORT_SYMBOL(allocate_resource);
364 364
365/** 365/*
366 * insert_resource - Inserts a resource in the resource tree 366 * Insert a resource into the resource tree. If successful, return NULL,
367 * @parent: parent of the new resource 367 * otherwise return the conflicting resource (compare to __request_resource())
368 * @new: new resource to insert
369 *
370 * Returns 0 on success, -EBUSY if the resource can't be inserted.
371 *
372 * This function is equivalent to request_resource when no conflict
373 * happens. If a conflict happens, and the conflicting resources
374 * entirely fit within the range of the new resource, then the new
375 * resource is inserted and the conflicting resources become children of
376 * the new resource.
377 */ 368 */
378int insert_resource(struct resource *parent, struct resource *new) 369static struct resource * __insert_resource(struct resource *parent, struct resource *new)
379{ 370{
380 int result;
381 struct resource *first, *next; 371 struct resource *first, *next;
382 372
383 write_lock(&resource_lock);
384
385 for (;; parent = first) { 373 for (;; parent = first) {
386 result = 0;
387 first = __request_resource(parent, new); 374 first = __request_resource(parent, new);
388 if (!first) 375 if (!first)
389 goto out; 376 return first;
390 377
391 result = -EBUSY;
392 if (first == parent) 378 if (first == parent)
393 goto out; 379 return first;
394 380
395 if ((first->start > new->start) || (first->end < new->end)) 381 if ((first->start > new->start) || (first->end < new->end))
396 break; 382 break;
@@ -401,15 +387,13 @@ int insert_resource(struct resource *parent, struct resource *new)
401 for (next = first; ; next = next->sibling) { 387 for (next = first; ; next = next->sibling) {
402 /* Partial overlap? Bad, and unfixable */ 388 /* Partial overlap? Bad, and unfixable */
403 if (next->start < new->start || next->end > new->end) 389 if (next->start < new->start || next->end > new->end)
404 goto out; 390 return next;
405 if (!next->sibling) 391 if (!next->sibling)
406 break; 392 break;
407 if (next->sibling->start > new->end) 393 if (next->sibling->start > new->end)
408 break; 394 break;
409 } 395 }
410 396
411 result = 0;
412
413 new->parent = parent; 397 new->parent = parent;
414 new->sibling = next->sibling; 398 new->sibling = next->sibling;
415 new->child = first; 399 new->child = first;
@@ -426,10 +410,64 @@ int insert_resource(struct resource *parent, struct resource *new)
426 next = next->sibling; 410 next = next->sibling;
427 next->sibling = new; 411 next->sibling = new;
428 } 412 }
413 return NULL;
414}
429 415
430 out: 416/**
417 * insert_resource - Inserts a resource in the resource tree
418 * @parent: parent of the new resource
419 * @new: new resource to insert
420 *
421 * Returns 0 on success, -EBUSY if the resource can't be inserted.
422 *
423 * This function is equivalent to request_resource when no conflict
424 * happens. If a conflict happens, and the conflicting resources
425 * entirely fit within the range of the new resource, then the new
426 * resource is inserted and the conflicting resources become children of
427 * the new resource.
428 */
429int insert_resource(struct resource *parent, struct resource *new)
430{
431 struct resource *conflict;
432
433 write_lock(&resource_lock);
434 conflict = __insert_resource(parent, new);
435 write_unlock(&resource_lock);
436 return conflict ? -EBUSY : 0;
437}
438
439/**
440 * insert_resource_expand_to_fit - Insert a resource into the resource tree
441 * @root: root resource descriptor
442 * @new: new resource to insert
443 *
444 * Insert a resource into the resource tree, possibly expanding it in order
445 * to make it encompass any conflicting resources.
446 */
447void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
448{
449 if (new->parent)
450 return;
451
452 write_lock(&resource_lock);
453 for (;;) {
454 struct resource *conflict;
455
456 conflict = __insert_resource(root, new);
457 if (!conflict)
458 break;
459 if (conflict == root)
460 break;
461
462 /* Ok, expand resource to cover the conflict, then try again .. */
463 if (conflict->start < new->start)
464 new->start = conflict->start;
465 if (conflict->end > new->end)
466 new->end = conflict->end;
467
468 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
469 }
431 write_unlock(&resource_lock); 470 write_unlock(&resource_lock);
432 return result;
433} 471}
434 472
435/** 473/**
diff --git a/kernel/sched.c b/kernel/sched.c
index 9a1ddb84e26d..cc1f81b50b82 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4179,6 +4179,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4179} 4179}
4180 4180
4181/* 4181/*
4182 * Use precise platform statistics if available:
4183 */
4184#ifdef CONFIG_VIRT_CPU_ACCOUNTING
4185cputime_t task_utime(struct task_struct *p)
4186{
4187 return p->utime;
4188}
4189
4190cputime_t task_stime(struct task_struct *p)
4191{
4192 return p->stime;
4193}
4194#else
4195cputime_t task_utime(struct task_struct *p)
4196{
4197 clock_t utime = cputime_to_clock_t(p->utime),
4198 total = utime + cputime_to_clock_t(p->stime);
4199 u64 temp;
4200
4201 /*
4202 * Use CFS's precise accounting:
4203 */
4204 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
4205
4206 if (total) {
4207 temp *= utime;
4208 do_div(temp, total);
4209 }
4210 utime = (clock_t)temp;
4211
4212 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
4213 return p->prev_utime;
4214}
4215
4216cputime_t task_stime(struct task_struct *p)
4217{
4218 clock_t stime;
4219
4220 /*
4221 * Use CFS's precise accounting. (we subtract utime from
4222 * the total, to make sure the total observed by userspace
4223 * grows monotonically - apps rely on that):
4224 */
4225 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
4226 cputime_to_clock_t(task_utime(p));
4227
4228 if (stime >= 0)
4229 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
4230
4231 return p->prev_stime;
4232}
4233#endif
4234
4235inline cputime_t task_gtime(struct task_struct *p)
4236{
4237 return p->gtime;
4238}
4239
4240/*
4182 * This function gets called by the timer code, with HZ frequency. 4241 * This function gets called by the timer code, with HZ frequency.
4183 * We call it with interrupts disabled. 4242 * We call it with interrupts disabled.
4184 * 4243 *
@@ -7637,24 +7696,27 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7637 * and partition_sched_domains() will fallback to the single partition 7696 * and partition_sched_domains() will fallback to the single partition
7638 * 'fallback_doms', it also forces the domains to be rebuilt. 7697 * 'fallback_doms', it also forces the domains to be rebuilt.
7639 * 7698 *
7699 * If doms_new==NULL it will be replaced with cpu_online_map.
7700 * ndoms_new==0 is a special case for destroying existing domains.
7701 * It will not create the default domain.
7702 *
7640 * Call with hotplug lock held 7703 * Call with hotplug lock held
7641 */ 7704 */
7642void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 7705void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7643 struct sched_domain_attr *dattr_new) 7706 struct sched_domain_attr *dattr_new)
7644{ 7707{
7645 int i, j; 7708 int i, j, n;
7646 7709
7647 mutex_lock(&sched_domains_mutex); 7710 mutex_lock(&sched_domains_mutex);
7648 7711
7649 /* always unregister in case we don't destroy any domains */ 7712 /* always unregister in case we don't destroy any domains */
7650 unregister_sched_domain_sysctl(); 7713 unregister_sched_domain_sysctl();
7651 7714
7652 if (doms_new == NULL) 7715 n = doms_new ? ndoms_new : 0;
7653 ndoms_new = 0;
7654 7716
7655 /* Destroy deleted domains */ 7717 /* Destroy deleted domains */
7656 for (i = 0; i < ndoms_cur; i++) { 7718 for (i = 0; i < ndoms_cur; i++) {
7657 for (j = 0; j < ndoms_new; j++) { 7719 for (j = 0; j < n; j++) {
7658 if (cpus_equal(doms_cur[i], doms_new[j]) 7720 if (cpus_equal(doms_cur[i], doms_new[j])
7659 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7721 && dattrs_equal(dattr_cur, i, dattr_new, j))
7660 goto match1; 7722 goto match1;
@@ -7667,7 +7729,6 @@ match1:
7667 7729
7668 if (doms_new == NULL) { 7730 if (doms_new == NULL) {
7669 ndoms_cur = 0; 7731 ndoms_cur = 0;
7670 ndoms_new = 1;
7671 doms_new = &fallback_doms; 7732 doms_new = &fallback_doms;
7672 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7733 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7673 dattr_new = NULL; 7734 dattr_new = NULL;
@@ -7704,8 +7765,13 @@ match2:
7704int arch_reinit_sched_domains(void) 7765int arch_reinit_sched_domains(void)
7705{ 7766{
7706 get_online_cpus(); 7767 get_online_cpus();
7768
7769 /* Destroy domains first to force the rebuild */
7770 partition_sched_domains(0, NULL, NULL);
7771
7707 rebuild_sched_domains(); 7772 rebuild_sched_domains();
7708 put_online_cpus(); 7773 put_online_cpus();
7774
7709 return 0; 7775 return 0;
7710} 7776}
7711 7777
@@ -7789,7 +7855,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7789 case CPU_ONLINE_FROZEN: 7855 case CPU_ONLINE_FROZEN:
7790 case CPU_DEAD: 7856 case CPU_DEAD:
7791 case CPU_DEAD_FROZEN: 7857 case CPU_DEAD_FROZEN:
7792 partition_sched_domains(0, NULL, NULL); 7858 partition_sched_domains(1, NULL, NULL);
7793 return NOTIFY_OK; 7859 return NOTIFY_OK;
7794 7860
7795 default: 7861 default:
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index b75b492fbfcf..cb838ee93a82 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -233,7 +233,8 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
233 do_each_thread(g, t) { 233 do_each_thread(g, t) {
234 if (!--max_count) 234 if (!--max_count)
235 goto unlock; 235 goto unlock;
236 if (t->state & TASK_UNINTERRUPTIBLE) 236 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
237 if (t->state == TASK_UNINTERRUPTIBLE)
237 check_hung_task(t, now); 238 check_hung_task(t, now);
238 } while_each_thread(g, t); 239 } while_each_thread(g, t);
239 unlock: 240 unlock:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index fe4713347275..50ec0886fa3d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -159,6 +159,7 @@ static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *
159static struct ctl_table root_table[]; 159static struct ctl_table root_table[];
160static struct ctl_table_root sysctl_table_root; 160static struct ctl_table_root sysctl_table_root;
161static struct ctl_table_header root_table_header = { 161static struct ctl_table_header root_table_header = {
162 .count = 1,
162 .ctl_table = root_table, 163 .ctl_table = root_table,
163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), 164 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
164 .root = &sysctl_table_root, 165 .root = &sysctl_table_root,
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 3d1e3e1a1971..1876b526c778 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -177,7 +177,7 @@ void clockevents_register_device(struct clock_event_device *dev)
177/* 177/*
178 * Noop handler when we shut down an event device 178 * Noop handler when we shut down an event device
179 */ 179 */
180static void clockevents_handle_noop(struct clock_event_device *dev) 180void clockevents_handle_noop(struct clock_event_device *dev)
181{ 181{
182} 182}
183 183
@@ -199,7 +199,6 @@ void clockevents_exchange_device(struct clock_event_device *old,
199 * released list and do a notify add later. 199 * released list and do a notify add later.
200 */ 200 */
201 if (old) { 201 if (old) {
202 old->event_handler = clockevents_handle_noop;
203 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 202 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
204 list_del(&old->list); 203 list_del(&old->list);
205 list_add(&old->list, &clockevents_released); 204 list_add(&old->list, &clockevents_released);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 5125ddd8196b..1ad46f3df6e7 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy)
245 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 245 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
246 fail = update_persistent_clock(now); 246 fail = update_persistent_clock(now);
247 247
248 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; 248 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
249 if (next.tv_nsec <= 0) 249 if (next.tv_nsec <= 0)
250 next.tv_nsec += NSEC_PER_SEC; 250 next.tv_nsec += NSEC_PER_SEC;
251 251
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 31463d370b94..2f5a38294bf9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void)
175 */ 175 */
176static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 176static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
177{ 177{
178 ktime_t next;
179
178 tick_do_periodic_broadcast(); 180 tick_do_periodic_broadcast();
179 181
180 /* 182 /*
@@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
185 187
186 /* 188 /*
187 * Setup the next period for devices, which do not have 189 * Setup the next period for devices, which do not have
188 * periodic mode: 190 * periodic mode. We read dev->next_event first and add to it
191 * when the event alrady expired. clockevents_program_event()
192 * sets dev->next_event only when the event is really
193 * programmed to the device.
189 */ 194 */
190 for (;;) { 195 for (next = dev->next_event; ;) {
191 ktime_t next = ktime_add(dev->next_event, tick_period); 196 next = ktime_add(next, tick_period);
192 197
193 if (!clockevents_program_event(dev, next, ktime_get())) 198 if (!clockevents_program_event(dev, next, ktime_get()))
194 return; 199 return;
@@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why)
205 struct clock_event_device *bc, *dev; 210 struct clock_event_device *bc, *dev;
206 struct tick_device *td; 211 struct tick_device *td;
207 unsigned long flags, *reason = why; 212 unsigned long flags, *reason = why;
208 int cpu; 213 int cpu, bc_stopped;
209 214
210 spin_lock_irqsave(&tick_broadcast_lock, flags); 215 spin_lock_irqsave(&tick_broadcast_lock, flags);
211 216
@@ -223,6 +228,8 @@ static void tick_do_broadcast_on_off(void *why)
223 if (!tick_device_is_functional(dev)) 228 if (!tick_device_is_functional(dev))
224 goto out; 229 goto out;
225 230
231 bc_stopped = cpus_empty(tick_broadcast_mask);
232
226 switch (*reason) { 233 switch (*reason) {
227 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
228 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
@@ -245,9 +252,10 @@ static void tick_do_broadcast_on_off(void *why)
245 break; 252 break;
246 } 253 }
247 254
248 if (cpus_empty(tick_broadcast_mask)) 255 if (cpus_empty(tick_broadcast_mask)) {
249 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 256 if (!bc_stopped)
250 else { 257 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
258 } else if (bc_stopped) {
251 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 259 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
252 tick_broadcast_start_periodic(bc); 260 tick_broadcast_start_periodic(bc);
253 else 261 else
@@ -364,16 +372,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void)
364static int tick_broadcast_set_event(ktime_t expires, int force) 372static int tick_broadcast_set_event(ktime_t expires, int force)
365{ 373{
366 struct clock_event_device *bc = tick_broadcast_device.evtdev; 374 struct clock_event_device *bc = tick_broadcast_device.evtdev;
367 ktime_t now = ktime_get(); 375
368 int res; 376 return tick_dev_program_event(bc, expires, force);
369
370 for(;;) {
371 res = clockevents_program_event(bc, expires, now);
372 if (!res || !force)
373 return res;
374 now = ktime_get();
375 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns));
376 }
377} 377}
378 378
379int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 379int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -491,14 +491,52 @@ static void tick_broadcast_clear_oneshot(int cpu)
491 cpu_clear(cpu, tick_broadcast_oneshot_mask); 491 cpu_clear(cpu, tick_broadcast_oneshot_mask);
492} 492}
493 493
494static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires)
495{
496 struct tick_device *td;
497 int cpu;
498
499 for_each_cpu_mask_nr(cpu, *mask) {
500 td = &per_cpu(tick_cpu_device, cpu);
501 if (td->evtdev)
502 td->evtdev->next_event = expires;
503 }
504}
505
494/** 506/**
495 * tick_broadcast_setup_oneshot - setup the broadcast device 507 * tick_broadcast_setup_oneshot - setup the broadcast device
496 */ 508 */
497void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 509void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
498{ 510{
499 bc->event_handler = tick_handle_oneshot_broadcast; 511 /* Set it up only once ! */
500 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 512 if (bc->event_handler != tick_handle_oneshot_broadcast) {
501 bc->next_event.tv64 = KTIME_MAX; 513 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
514 int cpu = smp_processor_id();
515 cpumask_t mask;
516
517 bc->event_handler = tick_handle_oneshot_broadcast;
518 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
519
520 /* Take the do_timer update */
521 tick_do_timer_cpu = cpu;
522
523 /*
524 * We must be careful here. There might be other CPUs
525 * waiting for periodic broadcast. We need to set the
526 * oneshot_mask bits for those and program the
527 * broadcast device to fire.
528 */
529 mask = tick_broadcast_mask;
530 cpu_clear(cpu, mask);
531 cpus_or(tick_broadcast_oneshot_mask,
532 tick_broadcast_oneshot_mask, mask);
533
534 if (was_periodic && !cpus_empty(mask)) {
535 tick_broadcast_init_next_event(&mask, tick_next_period);
536 tick_broadcast_set_event(tick_next_period, 1);
537 } else
538 bc->next_event.tv64 = KTIME_MAX;
539 }
502} 540}
503 541
504/* 542/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 80c4336f4188..c4777193d567 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -161,6 +161,7 @@ static void tick_setup_device(struct tick_device *td,
161 } else { 161 } else {
162 handler = td->evtdev->event_handler; 162 handler = td->evtdev->event_handler;
163 next_event = td->evtdev->next_event; 163 next_event = td->evtdev->next_event;
164 td->evtdev->event_handler = clockevents_handle_noop;
164 } 165 }
165 166
166 td->evtdev = newdev; 167 td->evtdev = newdev;
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f13f2b7f4fd4..0ffc2918ea6f 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -17,6 +17,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev);
17extern void tick_setup_oneshot(struct clock_event_device *newdev, 17extern void tick_setup_oneshot(struct clock_event_device *newdev,
18 void (*handler)(struct clock_event_device *), 18 void (*handler)(struct clock_event_device *),
19 ktime_t nextevt); 19 ktime_t nextevt);
20extern int tick_dev_program_event(struct clock_event_device *dev,
21 ktime_t expires, int force);
20extern int tick_program_event(ktime_t expires, int force); 22extern int tick_program_event(ktime_t expires, int force);
21extern void tick_oneshot_notify(void); 23extern void tick_oneshot_notify(void);
22extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); 24extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 450c04935b66..2e35501e61dd 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -23,24 +23,58 @@
23#include "tick-internal.h" 23#include "tick-internal.h"
24 24
25/** 25/**
26 * tick_program_event 26 * tick_program_event internal worker function
27 */ 27 */
28int tick_program_event(ktime_t expires, int force) 28int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
29 int force)
29{ 30{
30 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
31 ktime_t now = ktime_get(); 31 ktime_t now = ktime_get();
32 int i;
32 33
33 while (1) { 34 for (i = 0;;) {
34 int ret = clockevents_program_event(dev, expires, now); 35 int ret = clockevents_program_event(dev, expires, now);
35 36
36 if (!ret || !force) 37 if (!ret || !force)
37 return ret; 38 return ret;
39
40 /*
41 * We tried 2 times to program the device with the given
42 * min_delta_ns. If that's not working then we double it
43 * and emit a warning.
44 */
45 if (++i > 2) {
46 printk(KERN_WARNING "CE: __tick_program_event of %s is "
47 "stuck %llx %llx\n", dev->name ? dev->name : "?",
48 now.tv64, expires.tv64);
49 printk(KERN_WARNING
50 "CE: increasing min_delta_ns %ld to %ld nsec\n",
51 dev->min_delta_ns, dev->min_delta_ns << 1);
52 WARN_ON(1);
53
54 /* Double the min. delta and try again */
55 if (!dev->min_delta_ns)
56 dev->min_delta_ns = 5000;
57 else
58 dev->min_delta_ns <<= 1;
59 i = 0;
60 }
61
38 now = ktime_get(); 62 now = ktime_get();
39 expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); 63 expires = ktime_add_ns(now, dev->min_delta_ns);
40 } 64 }
41} 65}
42 66
43/** 67/**
68 * tick_program_event
69 */
70int tick_program_event(ktime_t expires, int force)
71{
72 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
73
74 return tick_dev_program_event(dev, expires, force);
75}
76
77/**
44 * tick_resume_onshot - resume oneshot mode 78 * tick_resume_onshot - resume oneshot mode
45 */ 79 */
46void tick_resume_oneshot(void) 80void tick_resume_oneshot(void)
@@ -61,7 +95,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
61{ 95{
62 newdev->event_handler = handler; 96 newdev->event_handler = handler;
63 clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); 97 clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT);
64 clockevents_program_event(newdev, next_event, ktime_get()); 98 tick_dev_program_event(newdev, next_event, 1);
65} 99}
66 100
67/** 101/**
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7a46bde78c66..a87b0468568b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -162,6 +162,8 @@ void tick_nohz_stop_idle(int cpu)
162 ts->idle_lastupdate = now; 162 ts->idle_lastupdate = now;
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); 163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 ts->idle_active = 0; 164 ts->idle_active = 0;
165
166 sched_clock_idle_wakeup_event(0);
165 } 167 }
166} 168}
167 169
@@ -177,6 +179,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
177 } 179 }
178 ts->idle_entrytime = now; 180 ts->idle_entrytime = now;
179 ts->idle_active = 1; 181 ts->idle_active = 1;
182 sched_clock_idle_sleep_event();
180 return now; 183 return now;
181} 184}
182 185
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8b5a7d304a5f..0b504814e378 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -394,7 +394,7 @@ config LOCKDEP
394 bool 394 bool
395 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 395 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
396 select STACKTRACE 396 select STACKTRACE
397 select FRAME_POINTER if !X86 && !MIPS 397 select FRAME_POINTER if !X86 && !MIPS && !PPC
398 select KALLSYMS 398 select KALLSYMS
399 select KALLSYMS_ALL 399 select KALLSYMS_ALL
400 400
@@ -676,13 +676,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
676 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 676 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
677 depends on !X86_64 677 depends on !X86_64
678 select STACKTRACE 678 select STACKTRACE
679 select FRAME_POINTER 679 select FRAME_POINTER if !PPC
680 help 680 help
681 Provide stacktrace filter for fault-injection capabilities 681 Provide stacktrace filter for fault-injection capabilities
682 682
683config LATENCYTOP 683config LATENCYTOP
684 bool "Latency measuring infrastructure" 684 bool "Latency measuring infrastructure"
685 select FRAME_POINTER if !MIPS 685 select FRAME_POINTER if !MIPS && !PPC
686 select KALLSYMS 686 select KALLSYMS
687 select KALLSYMS_ALL 687 select KALLSYMS_ALL
688 select STACKTRACE 688 select STACKTRACE
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 45a6bde762d1..e3ab374e1334 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
112 112
113/* 113/*
114 * Allocate a new object. If the pool is empty, switch off the debugger. 114 * Allocate a new object. If the pool is empty, switch off the debugger.
115 * Must be called with interrupts disabled.
115 */ 116 */
116static struct debug_obj * 117static struct debug_obj *
117alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 118alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
148static void free_object(struct debug_obj *obj) 149static void free_object(struct debug_obj *obj)
149{ 150{
150 unsigned long idx = (unsigned long)(obj - obj_static_pool); 151 unsigned long idx = (unsigned long)(obj - obj_static_pool);
152 unsigned long flags;
151 153
152 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 154 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
153 spin_lock(&pool_lock); 155 spin_lock_irqsave(&pool_lock, flags);
154 hlist_add_head(&obj->node, &obj_pool); 156 hlist_add_head(&obj->node, &obj_pool);
155 obj_pool_free++; 157 obj_pool_free++;
156 obj_pool_used--; 158 obj_pool_used--;
157 spin_unlock(&pool_lock); 159 spin_unlock_irqrestore(&pool_lock, flags);
158 } else { 160 } else {
159 spin_lock(&pool_lock); 161 spin_lock_irqsave(&pool_lock, flags);
160 obj_pool_used--; 162 obj_pool_used--;
161 spin_unlock(&pool_lock); 163 spin_unlock_irqrestore(&pool_lock, flags);
162 kmem_cache_free(obj_cache, obj); 164 kmem_cache_free(obj_cache, obj);
163 } 165 }
164} 166}
@@ -171,6 +173,7 @@ static void debug_objects_oom(void)
171{ 173{
172 struct debug_bucket *db = obj_hash; 174 struct debug_bucket *db = obj_hash;
173 struct hlist_node *node, *tmp; 175 struct hlist_node *node, *tmp;
176 HLIST_HEAD(freelist);
174 struct debug_obj *obj; 177 struct debug_obj *obj;
175 unsigned long flags; 178 unsigned long flags;
176 int i; 179 int i;
@@ -179,11 +182,14 @@ static void debug_objects_oom(void)
179 182
180 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 183 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
181 spin_lock_irqsave(&db->lock, flags); 184 spin_lock_irqsave(&db->lock, flags);
182 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 185 hlist_move_list(&db->list, &freelist);
186 spin_unlock_irqrestore(&db->lock, flags);
187
188 /* Now free them */
189 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
183 hlist_del(&obj->node); 190 hlist_del(&obj->node);
184 free_object(obj); 191 free_object(obj);
185 } 192 }
186 spin_unlock_irqrestore(&db->lock, flags);
187 } 193 }
188} 194}
189 195
@@ -498,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
498 return; 504 return;
499 default: 505 default:
500 hlist_del(&obj->node); 506 hlist_del(&obj->node);
507 spin_unlock_irqrestore(&db->lock, flags);
501 free_object(obj); 508 free_object(obj);
502 break; 509 return;
503 } 510 }
504out_unlock: 511out_unlock:
505 spin_unlock_irqrestore(&db->lock, flags); 512 spin_unlock_irqrestore(&db->lock, flags);
@@ -510,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
510{ 517{
511 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 518 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
512 struct hlist_node *node, *tmp; 519 struct hlist_node *node, *tmp;
520 HLIST_HEAD(freelist);
513 struct debug_obj_descr *descr; 521 struct debug_obj_descr *descr;
514 enum debug_obj_state state; 522 enum debug_obj_state state;
515 struct debug_bucket *db; 523 struct debug_bucket *db;
@@ -545,11 +553,18 @@ repeat:
545 goto repeat; 553 goto repeat;
546 default: 554 default:
547 hlist_del(&obj->node); 555 hlist_del(&obj->node);
548 free_object(obj); 556 hlist_add_head(&obj->node, &freelist);
549 break; 557 break;
550 } 558 }
551 } 559 }
552 spin_unlock_irqrestore(&db->lock, flags); 560 spin_unlock_irqrestore(&db->lock, flags);
561
562 /* Now free them */
563 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
564 hlist_del(&obj->node);
565 free_object(obj);
566 }
567
553 if (cnt > debug_objects_maxchain) 568 if (cnt > debug_objects_maxchain)
554 debug_objects_maxchain = cnt; 569 debug_objects_maxchain = cnt;
555 } 570 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 54e968650855..876bc595d0f8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2129,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2129 * After a write we want buffered reads to be sure to go to disk to get 2129 * After a write we want buffered reads to be sure to go to disk to get
2130 * the new data. We invalidate clean cached page from the region we're 2130 * the new data. We invalidate clean cached page from the region we're
2131 * about to write. We do this *before* the write so that we can return 2131 * about to write. We do this *before* the write so that we can return
2132 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2132 * without clobbering -EIOCBQUEUED from ->direct_IO().
2133 */ 2133 */
2134 if (mapping->nrpages) { 2134 if (mapping->nrpages) {
2135 written = invalidate_inode_pages2_range(mapping, 2135 written = invalidate_inode_pages2_range(mapping,
2136 pos >> PAGE_CACHE_SHIFT, end); 2136 pos >> PAGE_CACHE_SHIFT, end);
2137 if (written) 2137 /*
2138 * If a page can not be invalidated, return 0 to fall back
2139 * to buffered write.
2140 */
2141 if (written) {
2142 if (written == -EBUSY)
2143 return 0;
2138 goto out; 2144 goto out;
2145 }
2139 } 2146 }
2140 2147
2141 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2148 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
diff --git a/mm/mmap.c b/mm/mmap.c
index 339cf5c4d5d8..e7a5a68a9c2e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1030,6 +1030,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
1030 } else { 1030 } else {
1031 switch (flags & MAP_TYPE) { 1031 switch (flags & MAP_TYPE) {
1032 case MAP_SHARED: 1032 case MAP_SHARED:
1033 /*
1034 * Ignore pgoff.
1035 */
1036 pgoff = 0;
1033 vm_flags |= VM_SHARED | VM_MAYSHARE; 1037 vm_flags |= VM_SHARED | VM_MAYSHARE;
1034 break; 1038 break;
1035 case MAP_PRIVATE: 1039 case MAP_PRIVATE:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index af982f7cdb2a..e293c58bea58 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -694,6 +694,9 @@ static int move_freepages(struct zone *zone,
694#endif 694#endif
695 695
696 for (page = start_page; page <= end_page;) { 696 for (page = start_page; page <= end_page;) {
697 /* Make sure we are not inadvertently changing nodes */
698 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
699
697 if (!pfn_valid_within(page_to_pfn(page))) { 700 if (!pfn_valid_within(page_to_pfn(page))) {
698 page++; 701 page++;
699 continue; 702 continue;
@@ -2516,6 +2519,10 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2516 continue; 2519 continue;
2517 page = pfn_to_page(pfn); 2520 page = pfn_to_page(pfn);
2518 2521
2522 /* Watch out for overlapping nodes */
2523 if (page_to_nid(page) != zone_to_nid(zone))
2524 continue;
2525
2519 /* Blocks with reserved pages will never free, skip them. */ 2526 /* Blocks with reserved pages will never free, skip them. */
2520 if (PageReserved(page)) 2527 if (PageReserved(page))
2521 continue; 2528 continue;
@@ -4064,7 +4071,7 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
4064} 4071}
4065 4072
4066#ifndef CONFIG_NEED_MULTIPLE_NODES 4073#ifndef CONFIG_NEED_MULTIPLE_NODES
4067struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; 4074struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4068EXPORT_SYMBOL(contig_page_data); 4075EXPORT_SYMBOL(contig_page_data);
4069#endif 4076#endif
4070 4077
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3444b58033c8..c69f84fe038d 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -2,7 +2,6 @@
2 * linux/mm/page_isolation.c 2 * linux/mm/page_isolation.c
3 */ 3 */
4 4
5#include <stddef.h>
6#include <linux/mm.h> 5#include <linux/mm.h>
7#include <linux/page-isolation.h> 6#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h> 7#include <linux/pageblock-flags.h>
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 3f703f7cb398..8dbb6805ef35 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -26,7 +26,10 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
26static unsigned long max_pages(unsigned long min_pages) 26static unsigned long max_pages(unsigned long min_pages)
27{ 27{
28 unsigned long node_free_pages, max; 28 unsigned long node_free_pages, max;
29 struct zone *zones = NODE_DATA(numa_node_id())->node_zones; 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node);
30 33
31 node_free_pages = 34 node_free_pages =
32#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
@@ -38,6 +41,10 @@ static unsigned long max_pages(unsigned long min_pages)
38 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); 41 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
39 42
40 max = node_free_pages / FRACTION_OF_NODE_MEM; 43 max = node_free_pages / FRACTION_OF_NODE_MEM;
44
45 num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
46 max /= num_cpus_on_node;
47
41 return max(max, min_pages); 48 return max(max, min_pages);
42} 49}
43 50
diff --git a/mm/truncate.c b/mm/truncate.c
index 250505091d37..6650c1d878b4 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -380,7 +380,7 @@ static int do_launder_page(struct address_space *mapping, struct page *page)
380 * Any pages which are found to be mapped into pagetables are unmapped prior to 380 * Any pages which are found to be mapped into pagetables are unmapped prior to
381 * invalidation. 381 * invalidation.
382 * 382 *
383 * Returns -EIO if any pages could not be invalidated. 383 * Returns -EBUSY if any pages could not be invalidated.
384 */ 384 */
385int invalidate_inode_pages2_range(struct address_space *mapping, 385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end) 386 pgoff_t start, pgoff_t end)
@@ -440,7 +440,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
440 ret2 = do_launder_page(mapping, page); 440 ret2 = do_launder_page(mapping, page);
441 if (ret2 == 0) { 441 if (ret2 == 0) {
442 if (!invalidate_complete_page2(mapping, page)) 442 if (!invalidate_complete_page2(mapping, page))
443 ret2 = -EIO; 443 ret2 = -EBUSY;
444 } 444 }
445 if (ret2 < 0) 445 if (ret2 < 0)
446 ret = ret2; 446 ret = ret2;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b661f47bf10a..f0e335aa20df 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -394,6 +394,7 @@ static void vlan_transfer_features(struct net_device *dev,
394 394
395 vlandev->features &= ~dev->vlan_features; 395 vlandev->features &= ~dev->vlan_features;
396 vlandev->features |= dev->features & dev->vlan_features; 396 vlandev->features |= dev->features & dev->vlan_features;
397 vlandev->gso_max_size = dev->gso_max_size;
397 398
398 if (old_features != vlandev->features) 399 if (old_features != vlandev->features)
399 netdev_features_change(vlandev); 400 netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bf014e51f8c..8883e9c8a223 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -48,7 +48,7 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
48 48
49 switch (veth->h_vlan_encapsulated_proto) { 49 switch (veth->h_vlan_encapsulated_proto) {
50#ifdef CONFIG_INET 50#ifdef CONFIG_INET
51 case __constant_htons(ETH_P_IP): 51 case htons(ETH_P_IP):
52 52
53 /* TODO: Confirm this will work with VLAN headers... */ 53 /* TODO: Confirm this will work with VLAN headers... */
54 return arp_find(veth->h_dest, skb); 54 return arp_find(veth->h_dest, skb);
@@ -607,6 +607,7 @@ static int vlan_dev_init(struct net_device *dev)
607 (1<<__LINK_STATE_PRESENT); 607 (1<<__LINK_STATE_PRESENT);
608 608
609 dev->features |= real_dev->features & real_dev->vlan_features; 609 dev->features |= real_dev->features & real_dev->vlan_features;
610 dev->gso_max_size = real_dev->gso_max_size;
610 611
611 /* ipv6 shared card related stuff */ 612 /* ipv6 shared card related stuff */
612 dev->dev_id = real_dev->dev_id; 613 dev->dev_id = real_dev->dev_id;
diff --git a/net/Kconfig b/net/Kconfig
index 7612cc8c337c..9103a16a77be 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,18 +232,23 @@ source "net/can/Kconfig"
232source "net/irda/Kconfig" 232source "net/irda/Kconfig"
233source "net/bluetooth/Kconfig" 233source "net/bluetooth/Kconfig"
234source "net/rxrpc/Kconfig" 234source "net/rxrpc/Kconfig"
235source "net/phonet/Kconfig"
235 236
236config FIB_RULES 237config FIB_RULES
237 bool 238 bool
238 239
239menu "Wireless" 240menuconfig WIRELESS
241 bool "Wireless"
240 depends on !S390 242 depends on !S390
243 default y
244
245if WIRELESS
241 246
242source "net/wireless/Kconfig" 247source "net/wireless/Kconfig"
243source "net/mac80211/Kconfig" 248source "net/mac80211/Kconfig"
244source "net/ieee80211/Kconfig" 249source "net/ieee80211/Kconfig"
245 250
246endmenu 251endif # WIRELESS
247 252
248source "net/rfkill/Kconfig" 253source "net/rfkill/Kconfig"
249source "net/9p/Kconfig" 254source "net/9p/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 4f43e7f874f3..acaf819f24aa 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_DECNET) += decnet/ 43obj-$(CONFIG_DECNET) += decnet/
44obj-$(CONFIG_ECONET) += econet/ 44obj-$(CONFIG_ECONET) += econet/
45obj-$(CONFIG_PHONET) += phonet/
45ifneq ($(CONFIG_VLAN_8021Q),) 46ifneq ($(CONFIG_VLAN_8021Q),)
46obj-y += 8021q/ 47obj-y += 8021q/
47endif 48endif
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 8d9a6f158880..280de481edc7 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -375,11 +375,11 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
375 if (memcmp 375 if (memcmp
376 (skb->data + 6, ethertype_ipv6, 376 (skb->data + 6, ethertype_ipv6,
377 sizeof(ethertype_ipv6)) == 0) 377 sizeof(ethertype_ipv6)) == 0)
378 skb->protocol = __constant_htons(ETH_P_IPV6); 378 skb->protocol = htons(ETH_P_IPV6);
379 else if (memcmp 379 else if (memcmp
380 (skb->data + 6, ethertype_ipv4, 380 (skb->data + 6, ethertype_ipv4,
381 sizeof(ethertype_ipv4)) == 0) 381 sizeof(ethertype_ipv4)) == 0)
382 skb->protocol = __constant_htons(ETH_P_IP); 382 skb->protocol = htons(ETH_P_IP);
383 else 383 else
384 goto error; 384 goto error;
385 skb_pull(skb, sizeof(llc_oui_ipv4)); 385 skb_pull(skb, sizeof(llc_oui_ipv4));
@@ -404,9 +404,9 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
404 skb_reset_network_header(skb); 404 skb_reset_network_header(skb);
405 iph = ip_hdr(skb); 405 iph = ip_hdr(skb);
406 if (iph->version == 4) 406 if (iph->version == 4)
407 skb->protocol = __constant_htons(ETH_P_IP); 407 skb->protocol = htons(ETH_P_IP);
408 else if (iph->version == 6) 408 else if (iph->version == 6)
409 skb->protocol = __constant_htons(ETH_P_IPV6); 409 skb->protocol = htons(ETH_P_IPV6);
410 else 410 else
411 goto error; 411 goto error;
412 skb->pkt_type = PACKET_HOST; 412 skb->pkt_type = PACKET_HOST;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5799fb52365a..8f701cde5945 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1931,7 +1931,6 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1931 switch (priv->lane_version) { 1931 switch (priv->lane_version) {
1932 case 1: 1932 case 1:
1933 return priv->mcast_vcc; 1933 return priv->mcast_vcc;
1934 break;
1935 case 2: /* LANE2 wants arp for multicast addresses */ 1934 case 2: /* LANE2 wants arp for multicast addresses */
1936 if (!compare_ether_addr(mac_to_find, bus_mac)) 1935 if (!compare_ether_addr(mac_to_find, bus_mac))
1937 return priv->mcast_vcc; 1936 return priv->mcast_vcc;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 1edfdf4c095b..f6348e078aa4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -49,7 +49,7 @@
49#define BT_DBG(D...) 49#define BT_DBG(D...)
50#endif 50#endif
51 51
52#define VERSION "2.12" 52#define VERSION "2.13"
53 53
54/* Bluetooth sockets */ 54/* Bluetooth sockets */
55#define BT_MAX_PROTO 8 55#define BT_MAX_PROTO 8
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ca8d05245ca0..b7002429f152 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -330,7 +330,7 @@ EXPORT_SYMBOL(hci_get_route);
330 330
331/* Create SCO or ACL connection. 331/* Create SCO or ACL connection.
332 * Device _must_ be locked */ 332 * Device _must_ be locked */
333struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) 333struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 auth_type)
334{ 334{
335 struct hci_conn *acl; 335 struct hci_conn *acl;
336 struct hci_conn *sco; 336 struct hci_conn *sco;
@@ -344,8 +344,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
344 344
345 hci_conn_hold(acl); 345 hci_conn_hold(acl);
346 346
347 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) 347 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
348 acl->auth_type = auth_type;
348 hci_acl_connect(acl); 349 hci_acl_connect(acl);
350 }
349 351
350 if (type == ACL_LINK) 352 if (type == ACL_LINK)
351 return acl; 353 return acl;
@@ -374,6 +376,19 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
374} 376}
375EXPORT_SYMBOL(hci_connect); 377EXPORT_SYMBOL(hci_connect);
376 378
379/* Check link security requirement */
380int hci_conn_check_link_mode(struct hci_conn *conn)
381{
382 BT_DBG("conn %p", conn);
383
384 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
385 !(conn->link_mode & HCI_LM_ENCRYPT))
386 return 0;
387
388 return 1;
389}
390EXPORT_SYMBOL(hci_conn_check_link_mode);
391
377/* Authenticate remote device */ 392/* Authenticate remote device */
378int hci_conn_auth(struct hci_conn *conn) 393int hci_conn_auth(struct hci_conn *conn)
379{ 394{
@@ -381,7 +396,7 @@ int hci_conn_auth(struct hci_conn *conn)
381 396
382 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) { 397 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0) {
383 if (!(conn->auth_type & 0x01)) { 398 if (!(conn->auth_type & 0x01)) {
384 conn->auth_type = HCI_AT_GENERAL_BONDING_MITM; 399 conn->auth_type |= 0x01;
385 conn->link_mode &= ~HCI_LM_AUTH; 400 conn->link_mode &= ~HCI_LM_AUTH;
386 } 401 }
387 } 402 }
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0e3db289f4be..ad7a553d7713 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1605,14 +1605,11 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
1605 1605
1606 if (conn->state == BT_CONFIG) { 1606 if (conn->state == BT_CONFIG) {
1607 if (!ev->status && hdev->ssp_mode > 0 && 1607 if (!ev->status && hdev->ssp_mode > 0 &&
1608 conn->ssp_mode > 0) { 1608 conn->ssp_mode > 0 && conn->out) {
1609 if (conn->out) { 1609 struct hci_cp_auth_requested cp;
1610 struct hci_cp_auth_requested cp; 1610 cp.handle = ev->handle;
1611 cp.handle = ev->handle; 1611 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1612 hci_send_cmd(hdev,
1613 HCI_OP_AUTH_REQUESTED,
1614 sizeof(cp), &cp); 1612 sizeof(cp), &cp);
1615 }
1616 } else { 1613 } else {
1617 conn->state = BT_CONNECTED; 1614 conn->state = BT_CONNECTED;
1618 hci_proto_connect_cfm(conn, ev->status); 1615 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 3396d5bdef1c..9610a9c85b98 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -55,7 +55,7 @@
55#define BT_DBG(D...) 55#define BT_DBG(D...)
56#endif 56#endif
57 57
58#define VERSION "2.10" 58#define VERSION "2.11"
59 59
60static u32 l2cap_feat_mask = 0x0000; 60static u32 l2cap_feat_mask = 0x0000;
61 61
@@ -778,6 +778,7 @@ static int l2cap_do_connect(struct sock *sk)
778 struct l2cap_conn *conn; 778 struct l2cap_conn *conn;
779 struct hci_conn *hcon; 779 struct hci_conn *hcon;
780 struct hci_dev *hdev; 780 struct hci_dev *hdev;
781 __u8 auth_type;
781 int err = 0; 782 int err = 0;
782 783
783 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); 784 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
@@ -789,7 +790,21 @@ static int l2cap_do_connect(struct sock *sk)
789 790
790 err = -ENOMEM; 791 err = -ENOMEM;
791 792
792 hcon = hci_connect(hdev, ACL_LINK, dst); 793 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
794 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
795 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING_MITM;
798 else
799 auth_type = HCI_AT_GENERAL_BONDING_MITM;
800 } else {
801 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
802 auth_type = HCI_AT_NO_BONDING;
803 else
804 auth_type = HCI_AT_GENERAL_BONDING;
805 }
806
807 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
793 if (!hcon) 808 if (!hcon)
794 goto done; 809 goto done;
795 810
@@ -1553,10 +1568,10 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1553 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 1568 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1554 struct l2cap_conn_rsp rsp; 1569 struct l2cap_conn_rsp rsp;
1555 struct sock *sk, *parent; 1570 struct sock *sk, *parent;
1556 int result, status = 0; 1571 int result, status = L2CAP_CS_NO_INFO;
1557 1572
1558 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 1573 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1559 __le16 psm = req->psm; 1574 __le16 psm = req->psm;
1560 1575
1561 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 1576 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1562 1577
@@ -1567,6 +1582,13 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1567 goto sendresp; 1582 goto sendresp;
1568 } 1583 }
1569 1584
1585 /* Check if the ACL is secure enough (if not SDP) */
1586 if (psm != cpu_to_le16(0x0001) &&
1587 !hci_conn_check_link_mode(conn->hcon)) {
1588 result = L2CAP_CR_SEC_BLOCK;
1589 goto response;
1590 }
1591
1570 result = L2CAP_CR_NO_MEM; 1592 result = L2CAP_CR_NO_MEM;
1571 1593
1572 /* Check for backlog size */ 1594 /* Check for backlog size */
@@ -2224,7 +2246,7 @@ static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2224 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2246 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2225 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2247 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2226 rsp.result = cpu_to_le16(result); 2248 rsp.result = cpu_to_le16(result);
2227 rsp.status = cpu_to_le16(0); 2249 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2228 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 2250 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2229 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2251 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2230 } 2252 }
@@ -2296,7 +2318,7 @@ static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2296 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2318 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2297 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2319 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2298 rsp.result = cpu_to_le16(result); 2320 rsp.result = cpu_to_le16(result);
2299 rsp.status = cpu_to_le16(0); 2321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2300 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 2322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2301 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2302 } 2324 }
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index a16011fedc1d..0cc91e6da76d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -200,7 +200,7 @@ static int sco_connect(struct sock *sk)
200 else 200 else
201 type = SCO_LINK; 201 type = SCO_LINK;
202 202
203 hcon = hci_connect(hdev, type, dst); 203 hcon = hci_connect(hdev, type, dst, HCI_AT_NO_BONDING);
204 if (!hcon) 204 if (!hcon)
205 goto done; 205 goto done;
206 206
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 573acdf6f9ff..4d2c1f1cb524 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -28,6 +28,10 @@ static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 28 .rcv = br_stp_rcv,
29}; 29};
30 30
31static struct pernet_operations br_net_ops = {
32 .exit = br_net_exit,
33};
34
31static int __init br_init(void) 35static int __init br_init(void)
32{ 36{
33 int err; 37 int err;
@@ -42,18 +46,22 @@ static int __init br_init(void)
42 if (err) 46 if (err)
43 goto err_out; 47 goto err_out;
44 48
45 err = br_netfilter_init(); 49 err = register_pernet_subsys(&br_net_ops);
46 if (err) 50 if (err)
47 goto err_out1; 51 goto err_out1;
48 52
49 err = register_netdevice_notifier(&br_device_notifier); 53 err = br_netfilter_init();
50 if (err) 54 if (err)
51 goto err_out2; 55 goto err_out2;
52 56
53 err = br_netlink_init(); 57 err = register_netdevice_notifier(&br_device_notifier);
54 if (err) 58 if (err)
55 goto err_out3; 59 goto err_out3;
56 60
61 err = br_netlink_init();
62 if (err)
63 goto err_out4;
64
57 brioctl_set(br_ioctl_deviceless_stub); 65 brioctl_set(br_ioctl_deviceless_stub);
58 br_handle_frame_hook = br_handle_frame; 66 br_handle_frame_hook = br_handle_frame;
59 67
@@ -61,10 +69,12 @@ static int __init br_init(void)
61 br_fdb_put_hook = br_fdb_put; 69 br_fdb_put_hook = br_fdb_put;
62 70
63 return 0; 71 return 0;
64err_out3: 72err_out4:
65 unregister_netdevice_notifier(&br_device_notifier); 73 unregister_netdevice_notifier(&br_device_notifier);
66err_out2: 74err_out3:
67 br_netfilter_fini(); 75 br_netfilter_fini();
76err_out2:
77 unregister_pernet_subsys(&br_net_ops);
68err_out1: 78err_out1:
69 br_fdb_fini(); 79 br_fdb_fini();
70err_out: 80err_out:
@@ -80,7 +90,7 @@ static void __exit br_deinit(void)
80 unregister_netdevice_notifier(&br_device_notifier); 90 unregister_netdevice_notifier(&br_device_notifier);
81 brioctl_set(NULL); 91 brioctl_set(NULL);
82 92
83 br_cleanup_bridges(); 93 unregister_pernet_subsys(&br_net_ops);
84 94
85 synchronize_net(); 95 synchronize_net();
86 96
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4f52c3d50ebe..22ba8632196f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -178,5 +178,6 @@ void br_dev_setup(struct net_device *dev)
178 dev->priv_flags = IFF_EBRIDGE; 178 dev->priv_flags = IFF_EBRIDGE;
179 179
180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX; 181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
182 NETIF_F_NETNS_LOCAL;
182} 183}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 63c18aacde8c..573e20f7dba4 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -168,7 +168,7 @@ static void del_br(struct net_bridge *br)
168 unregister_netdevice(br->dev); 168 unregister_netdevice(br->dev);
169} 169}
170 170
171static struct net_device *new_bridge_dev(const char *name) 171static struct net_device *new_bridge_dev(struct net *net, const char *name)
172{ 172{
173 struct net_bridge *br; 173 struct net_bridge *br;
174 struct net_device *dev; 174 struct net_device *dev;
@@ -178,6 +178,7 @@ static struct net_device *new_bridge_dev(const char *name)
178 178
179 if (!dev) 179 if (!dev)
180 return NULL; 180 return NULL;
181 dev_net_set(dev, net);
181 182
182 br = netdev_priv(dev); 183 br = netdev_priv(dev);
183 br->dev = dev; 184 br->dev = dev;
@@ -262,12 +263,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
262 return p; 263 return p;
263} 264}
264 265
265int br_add_bridge(const char *name) 266int br_add_bridge(struct net *net, const char *name)
266{ 267{
267 struct net_device *dev; 268 struct net_device *dev;
268 int ret; 269 int ret;
269 270
270 dev = new_bridge_dev(name); 271 dev = new_bridge_dev(net, name);
271 if (!dev) 272 if (!dev)
272 return -ENOMEM; 273 return -ENOMEM;
273 274
@@ -294,13 +295,13 @@ out_free:
294 goto out; 295 goto out;
295} 296}
296 297
297int br_del_bridge(const char *name) 298int br_del_bridge(struct net *net, const char *name)
298{ 299{
299 struct net_device *dev; 300 struct net_device *dev;
300 int ret = 0; 301 int ret = 0;
301 302
302 rtnl_lock(); 303 rtnl_lock();
303 dev = __dev_get_by_name(&init_net, name); 304 dev = __dev_get_by_name(net, name);
304 if (dev == NULL) 305 if (dev == NULL)
305 ret = -ENXIO; /* Could not find device */ 306 ret = -ENXIO; /* Could not find device */
306 307
@@ -445,13 +446,13 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
445 return 0; 446 return 0;
446} 447}
447 448
448void __exit br_cleanup_bridges(void) 449void br_net_exit(struct net *net)
449{ 450{
450 struct net_device *dev; 451 struct net_device *dev;
451 452
452 rtnl_lock(); 453 rtnl_lock();
453restart: 454restart:
454 for_each_netdev(&init_net, dev) { 455 for_each_netdev(net, dev) {
455 if (dev->priv_flags & IFF_EBRIDGE) { 456 if (dev->priv_flags & IFF_EBRIDGE) {
456 del_br(dev->priv); 457 del_br(dev->priv);
457 goto restart; 458 goto restart;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index eeee218eed80..6a6433daaf27 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,12 +21,12 @@
21#include "br_private.h" 21#include "br_private.h"
22 22
23/* called with RTNL */ 23/* called with RTNL */
24static int get_bridge_ifindices(int *indices, int num) 24static int get_bridge_ifindices(struct net *net, int *indices, int num)
25{ 25{
26 struct net_device *dev; 26 struct net_device *dev;
27 int i = 0; 27 int i = 0;
28 28
29 for_each_netdev(&init_net, dev) { 29 for_each_netdev(net, dev) {
30 if (i >= num) 30 if (i >= num)
31 break; 31 break;
32 if (dev->priv_flags & IFF_EBRIDGE) 32 if (dev->priv_flags & IFF_EBRIDGE)
@@ -89,7 +89,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
89 if (!capable(CAP_NET_ADMIN)) 89 if (!capable(CAP_NET_ADMIN))
90 return -EPERM; 90 return -EPERM;
91 91
92 dev = dev_get_by_index(&init_net, ifindex); 92 dev = dev_get_by_index(dev_net(br->dev), ifindex);
93 if (dev == NULL) 93 if (dev == NULL)
94 return -EINVAL; 94 return -EINVAL;
95 95
@@ -188,15 +188,21 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
188 return 0; 188 return 0;
189 189
190 case BRCTL_SET_BRIDGE_HELLO_TIME: 190 case BRCTL_SET_BRIDGE_HELLO_TIME:
191 {
192 unsigned long t = clock_t_to_jiffies(args[1]);
191 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
192 return -EPERM; 194 return -EPERM;
193 195
196 if (t < HZ)
197 return -EINVAL;
198
194 spin_lock_bh(&br->lock); 199 spin_lock_bh(&br->lock);
195 br->bridge_hello_time = clock_t_to_jiffies(args[1]); 200 br->bridge_hello_time = t;
196 if (br_is_root_bridge(br)) 201 if (br_is_root_bridge(br))
197 br->hello_time = br->bridge_hello_time; 202 br->hello_time = br->bridge_hello_time;
198 spin_unlock_bh(&br->lock); 203 spin_unlock_bh(&br->lock);
199 return 0; 204 return 0;
205 }
200 206
201 case BRCTL_SET_BRIDGE_MAX_AGE: 207 case BRCTL_SET_BRIDGE_MAX_AGE:
202 if (!capable(CAP_NET_ADMIN)) 208 if (!capable(CAP_NET_ADMIN))
@@ -309,7 +315,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
309 return -EOPNOTSUPP; 315 return -EOPNOTSUPP;
310} 316}
311 317
312static int old_deviceless(void __user *uarg) 318static int old_deviceless(struct net *net, void __user *uarg)
313{ 319{
314 unsigned long args[3]; 320 unsigned long args[3];
315 321
@@ -331,7 +337,7 @@ static int old_deviceless(void __user *uarg)
331 if (indices == NULL) 337 if (indices == NULL)
332 return -ENOMEM; 338 return -ENOMEM;
333 339
334 args[2] = get_bridge_ifindices(indices, args[2]); 340 args[2] = get_bridge_ifindices(net, indices, args[2]);
335 341
336 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 342 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
337 ? -EFAULT : args[2]; 343 ? -EFAULT : args[2];
@@ -354,9 +360,9 @@ static int old_deviceless(void __user *uarg)
354 buf[IFNAMSIZ-1] = 0; 360 buf[IFNAMSIZ-1] = 0;
355 361
356 if (args[0] == BRCTL_ADD_BRIDGE) 362 if (args[0] == BRCTL_ADD_BRIDGE)
357 return br_add_bridge(buf); 363 return br_add_bridge(net, buf);
358 364
359 return br_del_bridge(buf); 365 return br_del_bridge(net, buf);
360 } 366 }
361 } 367 }
362 368
@@ -368,7 +374,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
368 switch (cmd) { 374 switch (cmd) {
369 case SIOCGIFBR: 375 case SIOCGIFBR:
370 case SIOCSIFBR: 376 case SIOCSIFBR:
371 return old_deviceless(uarg); 377 return old_deviceless(net, uarg);
372 378
373 case SIOCBRADDBR: 379 case SIOCBRADDBR:
374 case SIOCBRDELBR: 380 case SIOCBRDELBR:
@@ -383,9 +389,9 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
383 389
384 buf[IFNAMSIZ-1] = 0; 390 buf[IFNAMSIZ-1] = 0;
385 if (cmd == SIOCBRADDBR) 391 if (cmd == SIOCBRADDBR)
386 return br_add_bridge(buf); 392 return br_add_bridge(net, buf);
387 393
388 return br_del_bridge(buf); 394 return br_del_bridge(net, buf);
389 } 395 }
390 } 396 }
391 return -EOPNOTSUPP; 397 return -EOPNOTSUPP;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f155e6ce8a21..ba7be195803c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -82,6 +82,7 @@ nla_put_failure:
82 */ 82 */
83void br_ifinfo_notify(int event, struct net_bridge_port *port) 83void br_ifinfo_notify(int event, struct net_bridge_port *port)
84{ 84{
85 struct net *net = dev_net(port->dev);
85 struct sk_buff *skb; 86 struct sk_buff *skb;
86 int err = -ENOBUFS; 87 int err = -ENOBUFS;
87 88
@@ -97,10 +98,10 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
97 kfree_skb(skb); 98 kfree_skb(skb);
98 goto errout; 99 goto errout;
99 } 100 }
100 err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 101 err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
101errout: 102errout:
102 if (err < 0) 103 if (err < 0)
103 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); 104 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
104} 105}
105 106
106/* 107/*
@@ -112,11 +113,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
112 struct net_device *dev; 113 struct net_device *dev;
113 int idx; 114 int idx;
114 115
115 if (net != &init_net)
116 return 0;
117
118 idx = 0; 116 idx = 0;
119 for_each_netdev(&init_net, dev) { 117 for_each_netdev(net, dev) {
120 /* not a bridge port */ 118 /* not a bridge port */
121 if (dev->br_port == NULL || idx < cb->args[0]) 119 if (dev->br_port == NULL || idx < cb->args[0])
122 goto skip; 120 goto skip;
@@ -147,9 +145,6 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
147 struct net_bridge_port *p; 145 struct net_bridge_port *p;
148 u8 new_state; 146 u8 new_state;
149 147
150 if (net != &init_net)
151 return -EINVAL;
152
153 if (nlmsg_len(nlh) < sizeof(*ifm)) 148 if (nlmsg_len(nlh) < sizeof(*ifm))
154 return -EINVAL; 149 return -EINVAL;
155 150
@@ -165,7 +160,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
165 if (new_state > BR_STATE_BLOCKING) 160 if (new_state > BR_STATE_BLOCKING)
166 return -EINVAL; 161 return -EINVAL;
167 162
168 dev = __dev_get_by_index(&init_net, ifm->ifi_index); 163 dev = __dev_get_by_index(net, ifm->ifi_index);
169 if (!dev) 164 if (!dev)
170 return -ENODEV; 165 return -ENODEV;
171 166
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 76340bdd052e..763a3ec292e5 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -35,9 +35,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
35 struct net_bridge_port *p = dev->br_port; 35 struct net_bridge_port *p = dev->br_port;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 37
38 if (!net_eq(dev_net(dev), &init_net))
39 return NOTIFY_DONE;
40
41 /* not a port of a bridge */ 38 /* not a port of a bridge */
42 if (p == NULL) 39 if (p == NULL)
43 return NOTIFY_DONE; 40 return NOTIFY_DONE;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c3dc18ddc043..b6c3b71974dc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -178,9 +178,9 @@ extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
178 178
179/* br_if.c */ 179/* br_if.c */
180extern void br_port_carrier_check(struct net_bridge_port *p); 180extern void br_port_carrier_check(struct net_bridge_port *p);
181extern int br_add_bridge(const char *name); 181extern int br_add_bridge(struct net *net, const char *name);
182extern int br_del_bridge(const char *name); 182extern int br_del_bridge(struct net *net, const char *name);
183extern void br_cleanup_bridges(void); 183extern void br_net_exit(struct net *net);
184extern int br_add_if(struct net_bridge *br, 184extern int br_add_if(struct net_bridge *br,
185 struct net_device *dev); 185 struct net_device *dev);
186extern int br_del_if(struct net_bridge *br, 186extern int br_del_if(struct net_bridge *br,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8b200f96f722..81ae40b3f655 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -140,9 +140,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
140 struct net_bridge *br; 140 struct net_bridge *br;
141 const unsigned char *buf; 141 const unsigned char *buf;
142 142
143 if (!net_eq(dev_net(dev), &init_net))
144 goto err;
145
146 if (!p) 143 if (!p)
147 goto err; 144 goto err;
148 145
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 27d6a511c8c1..158dee8b4965 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -29,11 +29,12 @@
29 */ 29 */
30static ssize_t store_bridge_parm(struct device *d, 30static ssize_t store_bridge_parm(struct device *d,
31 const char *buf, size_t len, 31 const char *buf, size_t len,
32 void (*set)(struct net_bridge *, unsigned long)) 32 int (*set)(struct net_bridge *, unsigned long))
33{ 33{
34 struct net_bridge *br = to_bridge(d); 34 struct net_bridge *br = to_bridge(d);
35 char *endp; 35 char *endp;
36 unsigned long val; 36 unsigned long val;
37 int err;
37 38
38 if (!capable(CAP_NET_ADMIN)) 39 if (!capable(CAP_NET_ADMIN))
39 return -EPERM; 40 return -EPERM;
@@ -43,9 +44,9 @@ static ssize_t store_bridge_parm(struct device *d,
43 return -EINVAL; 44 return -EINVAL;
44 45
45 spin_lock_bh(&br->lock); 46 spin_lock_bh(&br->lock);
46 (*set)(br, val); 47 err = (*set)(br, val);
47 spin_unlock_bh(&br->lock); 48 spin_unlock_bh(&br->lock);
48 return len; 49 return err ? err : len;
49} 50}
50 51
51 52
@@ -56,12 +57,13 @@ static ssize_t show_forward_delay(struct device *d,
56 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
57} 58}
58 59
59static void set_forward_delay(struct net_bridge *br, unsigned long val) 60static int set_forward_delay(struct net_bridge *br, unsigned long val)
60{ 61{
61 unsigned long delay = clock_t_to_jiffies(val); 62 unsigned long delay = clock_t_to_jiffies(val);
62 br->forward_delay = delay; 63 br->forward_delay = delay;
63 if (br_is_root_bridge(br)) 64 if (br_is_root_bridge(br))
64 br->bridge_forward_delay = delay; 65 br->bridge_forward_delay = delay;
66 return 0;
65} 67}
66 68
67static ssize_t store_forward_delay(struct device *d, 69static ssize_t store_forward_delay(struct device *d,
@@ -80,12 +82,17 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
80 jiffies_to_clock_t(to_bridge(d)->hello_time)); 82 jiffies_to_clock_t(to_bridge(d)->hello_time));
81} 83}
82 84
83static void set_hello_time(struct net_bridge *br, unsigned long val) 85static int set_hello_time(struct net_bridge *br, unsigned long val)
84{ 86{
85 unsigned long t = clock_t_to_jiffies(val); 87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
86 br->hello_time = t; 92 br->hello_time = t;
87 if (br_is_root_bridge(br)) 93 if (br_is_root_bridge(br))
88 br->bridge_hello_time = t; 94 br->bridge_hello_time = t;
95 return 0;
89} 96}
90 97
91static ssize_t store_hello_time(struct device *d, 98static ssize_t store_hello_time(struct device *d,
@@ -104,12 +111,13 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
104 jiffies_to_clock_t(to_bridge(d)->max_age)); 111 jiffies_to_clock_t(to_bridge(d)->max_age));
105} 112}
106 113
107static void set_max_age(struct net_bridge *br, unsigned long val) 114static int set_max_age(struct net_bridge *br, unsigned long val)
108{ 115{
109 unsigned long t = clock_t_to_jiffies(val); 116 unsigned long t = clock_t_to_jiffies(val);
110 br->max_age = t; 117 br->max_age = t;
111 if (br_is_root_bridge(br)) 118 if (br_is_root_bridge(br))
112 br->bridge_max_age = t; 119 br->bridge_max_age = t;
120 return 0;
113} 121}
114 122
115static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 123static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
@@ -126,9 +134,10 @@ static ssize_t show_ageing_time(struct device *d,
126 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); 134 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
127} 135}
128 136
129static void set_ageing_time(struct net_bridge *br, unsigned long val) 137static int set_ageing_time(struct net_bridge *br, unsigned long val)
130{ 138{
131 br->ageing_time = clock_t_to_jiffies(val); 139 br->ageing_time = clock_t_to_jiffies(val);
140 return 0;
132} 141}
133 142
134static ssize_t store_ageing_time(struct device *d, 143static ssize_t store_ageing_time(struct device *d,
@@ -180,9 +189,10 @@ static ssize_t show_priority(struct device *d, struct device_attribute *attr,
180 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); 189 (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
181} 190}
182 191
183static void set_priority(struct net_bridge *br, unsigned long val) 192static int set_priority(struct net_bridge *br, unsigned long val)
184{ 193{
185 br_stp_set_bridge_priority(br, (u16) val); 194 br_stp_set_bridge_priority(br, (u16) val);
195 return 0;
186} 196}
187 197
188static ssize_t store_priority(struct device *d, struct device_attribute *attr, 198static ssize_t store_priority(struct device *d, struct device_attribute *attr,
diff --git a/net/core/Makefile b/net/core/Makefile
index b1332f6d0042..26a37cb31923 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,6 +6,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
6 gen_stats.o gen_estimator.o net_namespace.o 6 gen_stats.o gen_estimator.o net_namespace.o
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
9 10
10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 11obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 60c51f765887..a90737fe2472 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -954,6 +954,37 @@ rollback:
954} 954}
955 955
956/** 956/**
957 * dev_set_alias - change ifalias of a device
958 * @dev: device
959 * @alias: name up to IFALIASZ
960 *
961 * Set ifalias for a device,
962 */
963int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
964{
965 ASSERT_RTNL();
966
967 if (len >= IFALIASZ)
968 return -EINVAL;
969
970 if (!len) {
971 if (dev->ifalias) {
972 kfree(dev->ifalias);
973 dev->ifalias = NULL;
974 }
975 return 0;
976 }
977
978 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
979 if (!dev->ifalias)
980 return -ENOMEM;
981
982 strlcpy(dev->ifalias, alias, len+1);
983 return len;
984}
985
986
987/**
957 * netdev_features_change - device changes features 988 * netdev_features_change - device changes features
958 * @dev: device to cause notification 989 * @dev: device to cause notification
959 * 990 *
@@ -1675,13 +1706,13 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1675 } 1706 }
1676 1707
1677 switch (skb->protocol) { 1708 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP): 1709 case htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol; 1710 ip_proto = ip_hdr(skb)->protocol;
1680 addr1 = ip_hdr(skb)->saddr; 1711 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr; 1712 addr2 = ip_hdr(skb)->daddr;
1682 ihl = ip_hdr(skb)->ihl; 1713 ihl = ip_hdr(skb)->ihl;
1683 break; 1714 break;
1684 case __constant_htons(ETH_P_IPV6): 1715 case htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr; 1716 ip_proto = ipv6_hdr(skb)->nexthdr;
1686 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; 1717 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1687 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; 1718 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@ -1991,8 +2022,13 @@ static void net_tx_action(struct softirq_action *h)
1991 spin_unlock(root_lock); 2022 spin_unlock(root_lock);
1992 } else { 2023 } else {
1993 if (!test_bit(__QDISC_STATE_DEACTIVATED, 2024 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state)) 2025 &q->state)) {
1995 __netif_reschedule(q); 2026 __netif_reschedule(q);
2027 } else {
2028 smp_mb__before_clear_bit();
2029 clear_bit(__QDISC_STATE_SCHED,
2030 &q->state);
2031 }
1996 } 2032 }
1997 } 2033 }
1998 } 2034 }
@@ -4663,6 +4699,12 @@ int netdev_compute_features(unsigned long all, unsigned long one)
4663 one |= NETIF_F_GSO_SOFTWARE; 4699 one |= NETIF_F_GSO_SOFTWARE;
4664 one |= NETIF_F_GSO; 4700 one |= NETIF_F_GSO;
4665 4701
4702 /*
4703 * If even one device supports a GSO protocol with software fallback,
4704 * enable it for all.
4705 */
4706 all |= one & NETIF_F_GSO_SOFTWARE;
4707
4666 /* If even one device supports robust GSO, enable it for all. */ 4708 /* If even one device supports robust GSO, enable it for all. */
4667 if (one & NETIF_F_GSO_ROBUST) 4709 if (one & NETIF_F_GSO_ROBUST)
4668 all |= NETIF_F_GSO_ROBUST; 4710 all |= NETIF_F_GSO_ROBUST;
diff --git a/net/core/dst.c b/net/core/dst.c
index fe03266130b6..09c1530f4681 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -203,6 +203,7 @@ void __dst_free(struct dst_entry * dst)
203 if (dst_garbage.timer_inc > DST_GC_INC) { 203 if (dst_garbage.timer_inc > DST_GC_INC) {
204 dst_garbage.timer_inc = DST_GC_INC; 204 dst_garbage.timer_inc = DST_GC_INC;
205 dst_garbage.timer_expires = DST_GC_MIN; 205 dst_garbage.timer_expires = DST_GC_MIN;
206 cancel_delayed_work(&dst_gc_work);
206 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 207 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
207 } 208 }
208 spin_unlock_bh(&dst_garbage.lock); 209 spin_unlock_bh(&dst_garbage.lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9d92e41826e7..1dc728b38589 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -927,8 +927,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
927 if (skb_queue_len(&neigh->arp_queue) >= 927 if (skb_queue_len(&neigh->arp_queue) >=
928 neigh->parms->queue_len) { 928 neigh->parms->queue_len) {
929 struct sk_buff *buff; 929 struct sk_buff *buff;
930 buff = neigh->arp_queue.next; 930 buff = __skb_dequeue(&neigh->arp_queue);
931 __skb_unlink(buff, &neigh->arp_queue);
932 kfree_skb(buff); 931 kfree_skb(buff);
933 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 932 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
934 } 933 }
@@ -1259,24 +1258,20 @@ static void neigh_proxy_process(unsigned long arg)
1259 struct neigh_table *tbl = (struct neigh_table *)arg; 1258 struct neigh_table *tbl = (struct neigh_table *)arg;
1260 long sched_next = 0; 1259 long sched_next = 0;
1261 unsigned long now = jiffies; 1260 unsigned long now = jiffies;
1262 struct sk_buff *skb; 1261 struct sk_buff *skb, *n;
1263 1262
1264 spin_lock(&tbl->proxy_queue.lock); 1263 spin_lock(&tbl->proxy_queue.lock);
1265 1264
1266 skb = tbl->proxy_queue.next; 1265 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1267 1266 long tdif = NEIGH_CB(skb)->sched_next - now;
1268 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1269 struct sk_buff *back = skb;
1270 long tdif = NEIGH_CB(back)->sched_next - now;
1271 1267
1272 skb = skb->next;
1273 if (tdif <= 0) { 1268 if (tdif <= 0) {
1274 struct net_device *dev = back->dev; 1269 struct net_device *dev = skb->dev;
1275 __skb_unlink(back, &tbl->proxy_queue); 1270 __skb_unlink(skb, &tbl->proxy_queue);
1276 if (tbl->proxy_redo && netif_running(dev)) 1271 if (tbl->proxy_redo && netif_running(dev))
1277 tbl->proxy_redo(back); 1272 tbl->proxy_redo(skb);
1278 else 1273 else
1279 kfree_skb(back); 1274 kfree_skb(skb);
1280 1275
1281 dev_put(dev); 1276 dev_put(dev);
1282 } else if (!sched_next || tdif < sched_next) 1277 } else if (!sched_next || tdif < sched_next)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c1f4e0d428c0..92d6b9467314 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -209,9 +209,44 @@ static ssize_t store_tx_queue_len(struct device *dev,
209 return netdev_store(dev, attr, buf, len, change_tx_queue_len); 209 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
210} 210}
211 211
212static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
213 const char *buf, size_t len)
214{
215 struct net_device *netdev = to_net_dev(dev);
216 size_t count = len;
217 ssize_t ret;
218
219 if (!capable(CAP_NET_ADMIN))
220 return -EPERM;
221
222 /* ignore trailing newline */
223 if (len > 0 && buf[len - 1] == '\n')
224 --count;
225
226 rtnl_lock();
227 ret = dev_set_alias(netdev, buf, count);
228 rtnl_unlock();
229
230 return ret < 0 ? ret : len;
231}
232
233static ssize_t show_ifalias(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 const struct net_device *netdev = to_net_dev(dev);
237 ssize_t ret = 0;
238
239 rtnl_lock();
240 if (netdev->ifalias)
241 ret = sprintf(buf, "%s\n", netdev->ifalias);
242 rtnl_unlock();
243 return ret;
244}
245
212static struct device_attribute net_class_attributes[] = { 246static struct device_attribute net_class_attributes[] = {
213 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 247 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
214 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), 248 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
249 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
215 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 250 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
216 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 251 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
217 __ATTR(features, S_IRUGO, show_features, NULL), 252 __ATTR(features, S_IRUGO, show_features, NULL),
@@ -418,6 +453,7 @@ static void netdev_release(struct device *d)
418 453
419 BUG_ON(dev->reg_state != NETREG_RELEASED); 454 BUG_ON(dev->reg_state != NETREG_RELEASED);
420 455
456 kfree(dev->ifalias);
421 kfree((char *)dev - dev->padded); 457 kfree((char *)dev - dev->padded);
422} 458}
423 459
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b36341..8862498fd4a6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -586,6 +586,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
586{ 586{
587 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 587 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
588 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 588 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
589 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
589 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 590 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
590 + nla_total_size(sizeof(struct rtnl_link_ifmap)) 591 + nla_total_size(sizeof(struct rtnl_link_ifmap))
591 + nla_total_size(sizeof(struct rtnl_link_stats)) 592 + nla_total_size(sizeof(struct rtnl_link_stats))
@@ -640,6 +641,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
640 if (txq->qdisc_sleeping) 641 if (txq->qdisc_sleeping)
641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); 642 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
642 643
644 if (dev->ifalias)
645 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
646
643 if (1) { 647 if (1) {
644 struct rtnl_link_ifmap map = { 648 struct rtnl_link_ifmap map = {
645 .mem_start = dev->mem_start, 649 .mem_start = dev->mem_start,
@@ -713,6 +717,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
713 [IFLA_LINKMODE] = { .type = NLA_U8 }, 717 [IFLA_LINKMODE] = { .type = NLA_U8 },
714 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 718 [IFLA_LINKINFO] = { .type = NLA_NESTED },
715 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 719 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
720 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
716}; 721};
717 722
718static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 723static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -853,6 +858,14 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
853 modified = 1; 858 modified = 1;
854 } 859 }
855 860
861 if (tb[IFLA_IFALIAS]) {
862 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
863 nla_len(tb[IFLA_IFALIAS]));
864 if (err < 0)
865 goto errout;
866 modified = 1;
867 }
868
856 if (tb[IFLA_BROADCAST]) { 869 if (tb[IFLA_BROADCAST]) {
857 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 870 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
858 send_addr_notify = 1; 871 send_addr_notify = 1;
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
new file mode 100644
index 000000000000..1f49afcd8e86
--- /dev/null
+++ b/net/core/skb_dma_map.c
@@ -0,0 +1,66 @@
1/* skb_dma_map.c: DMA mapping helpers for socket buffers.
2 *
3 * Copyright (C) David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/dma-mapping.h>
9#include <linux/skbuff.h>
10
11int skb_dma_map(struct device *dev, struct sk_buff *skb,
12 enum dma_data_direction dir)
13{
14 struct skb_shared_info *sp = skb_shinfo(skb);
15 dma_addr_t map;
16 int i;
17
18 map = dma_map_single(dev, skb->data,
19 skb_headlen(skb), dir);
20 if (dma_mapping_error(dev, map))
21 goto out_err;
22
23 sp->dma_maps[0] = map;
24 for (i = 0; i < sp->nr_frags; i++) {
25 skb_frag_t *fp = &sp->frags[i];
26
27 map = dma_map_page(dev, fp->page, fp->page_offset,
28 fp->size, dir);
29 if (dma_mapping_error(dev, map))
30 goto unwind;
31 sp->dma_maps[i + 1] = map;
32 }
33 sp->num_dma_maps = i + 1;
34
35 return 0;
36
37unwind:
38 while (i-- >= 0) {
39 skb_frag_t *fp = &sp->frags[i];
40
41 dma_unmap_page(dev, sp->dma_maps[i + 1],
42 fp->size, dir);
43 }
44 dma_unmap_single(dev, sp->dma_maps[0],
45 skb_headlen(skb), dir);
46out_err:
47 return -ENOMEM;
48}
49EXPORT_SYMBOL(skb_dma_map);
50
51void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
52 enum dma_data_direction dir)
53{
54 struct skb_shared_info *sp = skb_shinfo(skb);
55 int i;
56
57 dma_unmap_single(dev, sp->dma_maps[0],
58 skb_headlen(skb), dir);
59 for (i = 0; i < sp->nr_frags; i++) {
60 skb_frag_t *fp = &sp->frags[i];
61
62 dma_unmap_page(dev, sp->dma_maps[i + 1],
63 fp->size, dir);
64 }
65}
66EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/sock.c b/net/core/sock.c
index 91f8bbc93526..2d358dd8a03e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = {
154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
157 "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX" 157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
158 "sk_lock-AF_MAX"
158}; 159};
159static const char *af_family_slock_key_strings[AF_MAX+1] = { 160static const char *af_family_slock_key_strings[AF_MAX+1] = {
160 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 161 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -168,7 +169,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
168 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 169 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
169 "slock-27" , "slock-28" , "slock-AF_CAN" , 170 "slock-27" , "slock-28" , "slock-AF_CAN" ,
170 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
171 "slock-AF_RXRPC" , "slock-AF_MAX" 172 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
173 "slock-AF_MAX"
172}; 174};
173static const char *af_family_clock_key_strings[AF_MAX+1] = { 175static const char *af_family_clock_key_strings[AF_MAX+1] = {
174 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 176 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -182,7 +184,8 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
182 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 184 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
183 "clock-27" , "clock-28" , "clock-AF_CAN" , 185 "clock-27" , "clock-28" , "clock-AF_CAN" ,
184 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
185 "clock-AF_RXRPC" , "clock-AF_MAX" 187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
188 "clock-AF_MAX"
186}; 189};
187#endif 190#endif
188 191
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 8e9580874216..9a430734530c 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -783,7 +783,7 @@ static struct ccid_operations ccid2 = {
783}; 783};
784 784
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0444); 786module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); 787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
788#endif 788#endif
789 789
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index f6756e0c9e69..3b8bd7ca6761 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -963,7 +963,7 @@ static struct ccid_operations ccid3 = {
963}; 963};
964 964
965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
966module_param(ccid3_debug, bool, 0444); 966module_param(ccid3_debug, bool, 0644);
967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
968#endif 968#endif
969 969
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index bcd6ac415bb9..5b3ce0688c5c 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -67,7 +67,10 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; 67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ 68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
69 69
70 for (i=0; i <= k; i++) { 70 if (k <= 0)
71 return;
72
73 for (i = 0; i <= k; i++) {
71 i_i = tfrc_lh_get_interval(lh, i); 74 i_i = tfrc_lh_get_interval(lh, i);
72 75
73 if (i < k) { 76 if (i < k) {
@@ -78,7 +81,6 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
78 i_tot1 += i_i * tfrc_lh_weights[i-1]; 81 i_tot1 += i_i * tfrc_lh_weights[i-1];
79 } 82 }
80 83
81 BUG_ON(w_tot == 0);
82 lh->i_mean = max(i_tot0, i_tot1) / w_tot; 84 lh->i_mean = max(i_tot0, i_tot1) / w_tot;
83} 85}
84 86
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 97ecec0a8e76..185916218e07 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -10,7 +10,7 @@
10 10
11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
12int tfrc_debug; 12int tfrc_debug;
13module_param(tfrc_debug, bool, 0444); 13module_param(tfrc_debug, bool, 0644);
14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); 14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages");
15#endif 15#endif
16 16
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 803933ab396d..779d0ed9ae94 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -370,7 +370,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
370 goto discard; 370 goto discard;
371 371
372 if (dccp_parse_options(sk, NULL, skb)) 372 if (dccp_parse_options(sk, NULL, skb))
373 goto discard; 373 return 1;
374 374
375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
376 dccp_event_ack_recv(sk, skb); 376 dccp_event_ack_recv(sk, skb);
@@ -610,7 +610,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
610 * Step 8: Process options and mark acknowledgeable 610 * Step 8: Process options and mark acknowledgeable
611 */ 611 */
612 if (dccp_parse_options(sk, NULL, skb)) 612 if (dccp_parse_options(sk, NULL, skb))
613 goto discard; 613 return 1;
614 614
615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
616 dccp_event_ack_recv(sk, skb); 616 dccp_event_ack_recv(sk, skb);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index dc7c158a2f4b..0809b63cb055 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -81,11 +81,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
81 /* Check if this isn't a single byte option */ 81 /* Check if this isn't a single byte option */
82 if (opt > DCCPO_MAX_RESERVED) { 82 if (opt > DCCPO_MAX_RESERVED) {
83 if (opt_ptr == opt_end) 83 if (opt_ptr == opt_end)
84 goto out_invalid_option; 84 goto out_nonsensical_length;
85 85
86 len = *opt_ptr++; 86 len = *opt_ptr++;
87 if (len < 3) 87 if (len < 2)
88 goto out_invalid_option; 88 goto out_nonsensical_length;
89 /* 89 /*
90 * Remove the type and len fields, leaving 90 * Remove the type and len fields, leaving
91 * just the value size 91 * just the value size
@@ -95,7 +95,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
95 opt_ptr += len; 95 opt_ptr += len;
96 96
97 if (opt_ptr > opt_end) 97 if (opt_ptr > opt_end)
98 goto out_invalid_option; 98 goto out_nonsensical_length;
99 } 99 }
100 100
101 /* 101 /*
@@ -283,12 +283,17 @@ ignore_option:
283 if (mandatory) 283 if (mandatory)
284 goto out_invalid_option; 284 goto out_invalid_option;
285 285
286out_nonsensical_length:
287 /* RFC 4340, 5.8: ignore option and all remaining option space */
286 return 0; 288 return 0;
287 289
288out_invalid_option: 290out_invalid_option:
289 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); 291 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
290 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; 292 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
291 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); 293 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len);
294 DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt;
295 DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0;
296 DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0;
292 return -1; 297 return -1;
293} 298}
294 299
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1ca3b26eed0f..d0bd34819761 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -309,7 +309,9 @@ int dccp_disconnect(struct sock *sk, int flags)
309 sk->sk_err = ECONNRESET; 309 sk->sk_err = ECONNRESET;
310 310
311 dccp_clear_xmit_timers(sk); 311 dccp_clear_xmit_timers(sk);
312
312 __skb_queue_purge(&sk->sk_receive_queue); 313 __skb_queue_purge(&sk->sk_receive_queue);
314 __skb_queue_purge(&sk->sk_write_queue);
313 if (sk->sk_send_head != NULL) { 315 if (sk->sk_send_head != NULL) {
314 __kfree_skb(sk->sk_send_head); 316 __kfree_skb(sk->sk_send_head);
315 sk->sk_send_head = NULL; 317 sk->sk_send_head = NULL;
@@ -1028,7 +1030,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1028 1030
1029#ifdef CONFIG_IP_DCCP_DEBUG 1031#ifdef CONFIG_IP_DCCP_DEBUG
1030int dccp_debug; 1032int dccp_debug;
1031module_param(dccp_debug, bool, 0444); 1033module_param(dccp_debug, bool, 0644);
1032MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); 1034MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1033 1035
1034EXPORT_SYMBOL_GPL(dccp_debug); 1036EXPORT_SYMBOL_GPL(dccp_debug);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a80839b02e3f..647a9edee375 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -129,7 +129,7 @@ int eth_rebuild_header(struct sk_buff *skb)
129 129
130 switch (eth->h_proto) { 130 switch (eth->h_proto) {
131#ifdef CONFIG_INET 131#ifdef CONFIG_INET
132 case __constant_htons(ETH_P_IP): 132 case htons(ETH_P_IP):
133 return arp_find(eth->h_dest, skb); 133 return arp_find(eth->h_dest, skb);
134#endif 134#endif
135 default: 135 default:
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 3bca97f55d47..949772a5a7dc 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -157,7 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
157 err = ieee80211_networks_allocate(ieee); 157 err = ieee80211_networks_allocate(ieee);
158 if (err) { 158 if (err) {
159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); 159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
160 goto failed; 160 goto failed_free_netdev;
161 } 161 }
162 ieee80211_networks_initialize(ieee); 162 ieee80211_networks_initialize(ieee);
163 163
@@ -193,9 +193,9 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
193 193
194 return dev; 194 return dev;
195 195
196 failed: 196failed_free_netdev:
197 if (dev) 197 free_netdev(dev);
198 free_netdev(dev); 198failed:
199 return NULL; 199 return NULL;
200} 200}
201 201
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 91d3d96805d0..b12dae2b0b2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1029,6 +1029,11 @@ skip:
1029 } 1029 }
1030} 1030}
1031 1031
1032static inline bool inetdev_valid_mtu(unsigned mtu)
1033{
1034 return mtu >= 68;
1035}
1036
1032/* Called only under RTNL semaphore */ 1037/* Called only under RTNL semaphore */
1033 1038
1034static int inetdev_event(struct notifier_block *this, unsigned long event, 1039static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1048,6 +1053,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1048 IN_DEV_CONF_SET(in_dev, NOXFRM, 1); 1053 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1049 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); 1054 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1050 } 1055 }
1056 } else if (event == NETDEV_CHANGEMTU) {
1057 /* Re-enabling IP */
1058 if (inetdev_valid_mtu(dev->mtu))
1059 in_dev = inetdev_init(dev);
1051 } 1060 }
1052 goto out; 1061 goto out;
1053 } 1062 }
@@ -1058,7 +1067,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1058 dev->ip_ptr = NULL; 1067 dev->ip_ptr = NULL;
1059 break; 1068 break;
1060 case NETDEV_UP: 1069 case NETDEV_UP:
1061 if (dev->mtu < 68) 1070 if (!inetdev_valid_mtu(dev->mtu))
1062 break; 1071 break;
1063 if (dev->flags & IFF_LOOPBACK) { 1072 if (dev->flags & IFF_LOOPBACK) {
1064 struct in_ifaddr *ifa; 1073 struct in_ifaddr *ifa;
@@ -1080,9 +1089,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1080 ip_mc_down(in_dev); 1089 ip_mc_down(in_dev);
1081 break; 1090 break;
1082 case NETDEV_CHANGEMTU: 1091 case NETDEV_CHANGEMTU:
1083 if (dev->mtu >= 68) 1092 if (inetdev_valid_mtu(dev->mtu))
1084 break; 1093 break;
1085 /* MTU falled under 68, disable IP */ 1094 /* disable IP when MTU is not enough */
1086 case NETDEV_UNREGISTER: 1095 case NETDEV_UNREGISTER:
1087 inetdev_destroy(in_dev); 1096 inetdev_destroy(in_dev);
1088 break; 1097 break;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c10036e7a463..89cb047ab314 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -782,11 +782,15 @@ skip_listen_ht:
782 struct sock *sk; 782 struct sock *sk;
783 struct hlist_node *node; 783 struct hlist_node *node;
784 784
785 num = 0;
786
787 if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
788 continue;
789
785 if (i > s_i) 790 if (i > s_i)
786 s_num = 0; 791 s_num = 0;
787 792
788 read_lock_bh(lock); 793 read_lock_bh(lock);
789 num = 0;
790 sk_for_each(sk, node, &head->chain) { 794 sk_for_each(sk, node, &head->chain) {
791 struct inet_sock *inet = inet_sk(sk); 795 struct inet_sock *inet = inet_sk(sk);
792 796
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index d985bd613d25..743f011b9a84 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -409,3 +409,38 @@ out:
409} 409}
410 410
411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); 411EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
412
413void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
414 struct inet_timewait_death_row *twdr, int family)
415{
416 struct inet_timewait_sock *tw;
417 struct sock *sk;
418 struct hlist_node *node;
419 int h;
420
421 local_bh_disable();
422 for (h = 0; h < (hashinfo->ehash_size); h++) {
423 struct inet_ehash_bucket *head =
424 inet_ehash_bucket(hashinfo, h);
425 rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
426restart:
427 write_lock(lock);
428 sk_for_each(sk, node, &head->twchain) {
429
430 tw = inet_twsk(sk);
431 if (!net_eq(twsk_net(tw), net) ||
432 tw->tw_family != family)
433 continue;
434
435 atomic_inc(&tw->tw_refcnt);
436 write_unlock(lock);
437 inet_twsk_deschedule(tw, twdr);
438 inet_twsk_put(tw);
439
440 goto restart;
441 }
442 write_unlock(lock);
443 }
444 local_bh_enable();
445}
446EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig
index 09d0c3f35669..de6004de80bc 100644
--- a/net/ipv4/ipvs/Kconfig
+++ b/net/ipv4/ipvs/Kconfig
@@ -24,6 +24,14 @@ menuconfig IP_VS
24 24
25if IP_VS 25if IP_VS
26 26
27config IP_VS_IPV6
28 bool "IPv6 support for IPVS (DANGEROUS)"
29 depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
30 ---help---
31 Add IPv6 support to IPVS. This is incomplete and might be dangerous.
32
33 Say N if unsure.
34
27config IP_VS_DEBUG 35config IP_VS_DEBUG
28 bool "IP virtual server debugging" 36 bool "IP virtual server debugging"
29 ---help--- 37 ---help---
@@ -33,7 +41,8 @@ config IP_VS_DEBUG
33 41
34config IP_VS_TAB_BITS 42config IP_VS_TAB_BITS
35 int "IPVS connection table size (the Nth power of 2)" 43 int "IPVS connection table size (the Nth power of 2)"
36 default "12" 44 range 8 20
45 default 12
37 ---help--- 46 ---help---
38 The IPVS connection hash table uses the chaining scheme to handle 47 The IPVS connection hash table uses the chaining scheme to handle
39 hash collisions. Using a big IPVS connection hash table will greatly 48 hash collisions. Using a big IPVS connection hash table will greatly
@@ -71,14 +80,20 @@ config IP_VS_PROTO_UDP
71 This option enables support for load balancing UDP transport 80 This option enables support for load balancing UDP transport
72 protocol. Say Y if unsure. 81 protocol. Say Y if unsure.
73 82
83config IP_VS_PROTO_AH_ESP
84 bool
85 depends on UNDEFINED
86
74config IP_VS_PROTO_ESP 87config IP_VS_PROTO_ESP
75 bool "ESP load balancing support" 88 bool "ESP load balancing support"
89 select IP_VS_PROTO_AH_ESP
76 ---help--- 90 ---help---
77 This option enables support for load balancing ESP (Encapsulation 91 This option enables support for load balancing ESP (Encapsulation
78 Security Payload) transport protocol. Say Y if unsure. 92 Security Payload) transport protocol. Say Y if unsure.
79 93
80config IP_VS_PROTO_AH 94config IP_VS_PROTO_AH
81 bool "AH load balancing support" 95 bool "AH load balancing support"
96 select IP_VS_PROTO_AH_ESP
82 ---help--- 97 ---help---
83 This option enables support for load balancing AH (Authentication 98 This option enables support for load balancing AH (Authentication
84 Header) transport protocol. Say Y if unsure. 99 Header) transport protocol. Say Y if unsure.
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile
index 30e85de9ffff..73a46fe1fe4c 100644
--- a/net/ipv4/ipvs/Makefile
+++ b/net/ipv4/ipvs/Makefile
@@ -6,8 +6,7 @@
6ip_vs_proto-objs-y := 6ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
11 10
12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 44a6872dc245..9a24332fbed8 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -114,9 +114,18 @@ static inline void ct_write_unlock_bh(unsigned key)
114/* 114/*
115 * Returns hash value for IPVS connection entry 115 * Returns hash value for IPVS connection entry
116 */ 116 */
117static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port) 117static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
118 const union nf_inet_addr *addr,
119 __be16 port)
118{ 120{
119 return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd) 121#ifdef CONFIG_IP_VS_IPV6
122 if (af == AF_INET6)
123 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
124 (__force u32)port, proto, ip_vs_conn_rnd)
125 & IP_VS_CONN_TAB_MASK;
126#endif
127 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
128 ip_vs_conn_rnd)
120 & IP_VS_CONN_TAB_MASK; 129 & IP_VS_CONN_TAB_MASK;
121} 130}
122 131
@@ -131,7 +140,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
131 int ret; 140 int ret;
132 141
133 /* Hash by protocol, client address and port */ 142 /* Hash by protocol, client address and port */
134 hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); 143 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
135 144
136 ct_write_lock(hash); 145 ct_write_lock(hash);
137 146
@@ -162,7 +171,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
162 int ret; 171 int ret;
163 172
164 /* unhash it and decrease its reference counter */ 173 /* unhash it and decrease its reference counter */
165 hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); 174 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
166 175
167 ct_write_lock(hash); 176 ct_write_lock(hash);
168 177
@@ -187,20 +196,23 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
187 * d_addr, d_port: pkt dest address (load balancer) 196 * d_addr, d_port: pkt dest address (load balancer)
188 */ 197 */
189static inline struct ip_vs_conn *__ip_vs_conn_in_get 198static inline struct ip_vs_conn *__ip_vs_conn_in_get
190(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 199(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
200 const union nf_inet_addr *d_addr, __be16 d_port)
191{ 201{
192 unsigned hash; 202 unsigned hash;
193 struct ip_vs_conn *cp; 203 struct ip_vs_conn *cp;
194 204
195 hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); 205 hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
196 206
197 ct_read_lock(hash); 207 ct_read_lock(hash);
198 208
199 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 209 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
200 if (s_addr==cp->caddr && s_port==cp->cport && 210 if (cp->af == af &&
201 d_port==cp->vport && d_addr==cp->vaddr && 211 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
212 ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
213 s_port == cp->cport && d_port == cp->vport &&
202 ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && 214 ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
203 protocol==cp->protocol) { 215 protocol == cp->protocol) {
204 /* HIT */ 216 /* HIT */
205 atomic_inc(&cp->refcnt); 217 atomic_inc(&cp->refcnt);
206 ct_read_unlock(hash); 218 ct_read_unlock(hash);
@@ -214,39 +226,44 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get
214} 226}
215 227
216struct ip_vs_conn *ip_vs_conn_in_get 228struct ip_vs_conn *ip_vs_conn_in_get
217(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 229(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
230 const union nf_inet_addr *d_addr, __be16 d_port)
218{ 231{
219 struct ip_vs_conn *cp; 232 struct ip_vs_conn *cp;
220 233
221 cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port); 234 cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port);
222 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) 235 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt))
223 cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port); 236 cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr,
237 d_port);
224 238
225 IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 239 IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
226 ip_vs_proto_name(protocol), 240 ip_vs_proto_name(protocol),
227 NIPQUAD(s_addr), ntohs(s_port), 241 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
228 NIPQUAD(d_addr), ntohs(d_port), 242 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
229 cp?"hit":"not hit"); 243 cp ? "hit" : "not hit");
230 244
231 return cp; 245 return cp;
232} 246}
233 247
234/* Get reference to connection template */ 248/* Get reference to connection template */
235struct ip_vs_conn *ip_vs_ct_in_get 249struct ip_vs_conn *ip_vs_ct_in_get
236(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 250(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
251 const union nf_inet_addr *d_addr, __be16 d_port)
237{ 252{
238 unsigned hash; 253 unsigned hash;
239 struct ip_vs_conn *cp; 254 struct ip_vs_conn *cp;
240 255
241 hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); 256 hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
242 257
243 ct_read_lock(hash); 258 ct_read_lock(hash);
244 259
245 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
246 if (s_addr==cp->caddr && s_port==cp->cport && 261 if (cp->af == af &&
247 d_port==cp->vport && d_addr==cp->vaddr && 262 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
263 ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
264 s_port == cp->cport && d_port == cp->vport &&
248 cp->flags & IP_VS_CONN_F_TEMPLATE && 265 cp->flags & IP_VS_CONN_F_TEMPLATE &&
249 protocol==cp->protocol) { 266 protocol == cp->protocol) {
250 /* HIT */ 267 /* HIT */
251 atomic_inc(&cp->refcnt); 268 atomic_inc(&cp->refcnt);
252 goto out; 269 goto out;
@@ -257,11 +274,11 @@ struct ip_vs_conn *ip_vs_ct_in_get
257 out: 274 out:
258 ct_read_unlock(hash); 275 ct_read_unlock(hash);
259 276
260 IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 277 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
261 ip_vs_proto_name(protocol), 278 ip_vs_proto_name(protocol),
262 NIPQUAD(s_addr), ntohs(s_port), 279 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
263 NIPQUAD(d_addr), ntohs(d_port), 280 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
264 cp?"hit":"not hit"); 281 cp ? "hit" : "not hit");
265 282
266 return cp; 283 return cp;
267} 284}
@@ -273,7 +290,8 @@ struct ip_vs_conn *ip_vs_ct_in_get
273 * d_addr, d_port: pkt dest address (foreign host) 290 * d_addr, d_port: pkt dest address (foreign host)
274 */ 291 */
275struct ip_vs_conn *ip_vs_conn_out_get 292struct ip_vs_conn *ip_vs_conn_out_get
276(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 293(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
294 const union nf_inet_addr *d_addr, __be16 d_port)
277{ 295{
278 unsigned hash; 296 unsigned hash;
279 struct ip_vs_conn *cp, *ret=NULL; 297 struct ip_vs_conn *cp, *ret=NULL;
@@ -281,13 +299,15 @@ struct ip_vs_conn *ip_vs_conn_out_get
281 /* 299 /*
282 * Check for "full" addressed entries 300 * Check for "full" addressed entries
283 */ 301 */
284 hash = ip_vs_conn_hashkey(protocol, d_addr, d_port); 302 hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port);
285 303
286 ct_read_lock(hash); 304 ct_read_lock(hash);
287 305
288 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 306 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
289 if (d_addr == cp->caddr && d_port == cp->cport && 307 if (cp->af == af &&
290 s_port == cp->dport && s_addr == cp->daddr && 308 ip_vs_addr_equal(af, d_addr, &cp->caddr) &&
309 ip_vs_addr_equal(af, s_addr, &cp->daddr) &&
310 d_port == cp->cport && s_port == cp->dport &&
291 protocol == cp->protocol) { 311 protocol == cp->protocol) {
292 /* HIT */ 312 /* HIT */
293 atomic_inc(&cp->refcnt); 313 atomic_inc(&cp->refcnt);
@@ -298,11 +318,11 @@ struct ip_vs_conn *ip_vs_conn_out_get
298 318
299 ct_read_unlock(hash); 319 ct_read_unlock(hash);
300 320
301 IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 321 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
302 ip_vs_proto_name(protocol), 322 ip_vs_proto_name(protocol),
303 NIPQUAD(s_addr), ntohs(s_port), 323 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
304 NIPQUAD(d_addr), ntohs(d_port), 324 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
305 ret?"hit":"not hit"); 325 ret ? "hit" : "not hit");
306 326
307 return ret; 327 return ret;
308} 328}
@@ -369,6 +389,33 @@ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
369 } 389 }
370} 390}
371 391
392#ifdef CONFIG_IP_VS_IPV6
393static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
394{
395 switch (IP_VS_FWD_METHOD(cp)) {
396 case IP_VS_CONN_F_MASQ:
397 cp->packet_xmit = ip_vs_nat_xmit_v6;
398 break;
399
400 case IP_VS_CONN_F_TUNNEL:
401 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
402 break;
403
404 case IP_VS_CONN_F_DROUTE:
405 cp->packet_xmit = ip_vs_dr_xmit_v6;
406 break;
407
408 case IP_VS_CONN_F_LOCALNODE:
409 cp->packet_xmit = ip_vs_null_xmit;
410 break;
411
412 case IP_VS_CONN_F_BYPASS:
413 cp->packet_xmit = ip_vs_bypass_xmit_v6;
414 break;
415 }
416}
417#endif
418
372 419
373static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) 420static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
374{ 421{
@@ -402,16 +449,16 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
402 cp->flags |= atomic_read(&dest->conn_flags); 449 cp->flags |= atomic_read(&dest->conn_flags);
403 cp->dest = dest; 450 cp->dest = dest;
404 451
405 IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 452 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
406 "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 453 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
407 "dest->refcnt:%d\n", 454 "dest->refcnt:%d\n",
408 ip_vs_proto_name(cp->protocol), 455 ip_vs_proto_name(cp->protocol),
409 NIPQUAD(cp->caddr), ntohs(cp->cport), 456 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
410 NIPQUAD(cp->vaddr), ntohs(cp->vport), 457 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
411 NIPQUAD(cp->daddr), ntohs(cp->dport), 458 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
412 ip_vs_fwd_tag(cp), cp->state, 459 ip_vs_fwd_tag(cp), cp->state,
413 cp->flags, atomic_read(&cp->refcnt), 460 cp->flags, atomic_read(&cp->refcnt),
414 atomic_read(&dest->refcnt)); 461 atomic_read(&dest->refcnt));
415 462
416 /* Update the connection counters */ 463 /* Update the connection counters */
417 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 464 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -444,8 +491,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
444 struct ip_vs_dest *dest; 491 struct ip_vs_dest *dest;
445 492
446 if ((cp) && (!cp->dest)) { 493 if ((cp) && (!cp->dest)) {
447 dest = ip_vs_find_dest(cp->daddr, cp->dport, 494 dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
448 cp->vaddr, cp->vport, cp->protocol); 495 &cp->vaddr, cp->vport,
496 cp->protocol);
449 ip_vs_bind_dest(cp, dest); 497 ip_vs_bind_dest(cp, dest);
450 return dest; 498 return dest;
451 } else 499 } else
@@ -464,16 +512,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
464 if (!dest) 512 if (!dest)
465 return; 513 return;
466 514
467 IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 515 IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
468 "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 516 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
469 "dest->refcnt:%d\n", 517 "dest->refcnt:%d\n",
470 ip_vs_proto_name(cp->protocol), 518 ip_vs_proto_name(cp->protocol),
471 NIPQUAD(cp->caddr), ntohs(cp->cport), 519 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
472 NIPQUAD(cp->vaddr), ntohs(cp->vport), 520 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
473 NIPQUAD(cp->daddr), ntohs(cp->dport), 521 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
474 ip_vs_fwd_tag(cp), cp->state, 522 ip_vs_fwd_tag(cp), cp->state,
475 cp->flags, atomic_read(&cp->refcnt), 523 cp->flags, atomic_read(&cp->refcnt),
476 atomic_read(&dest->refcnt)); 524 atomic_read(&dest->refcnt));
477 525
478 /* Update the connection counters */ 526 /* Update the connection counters */
479 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 527 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -526,13 +574,16 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
526 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 574 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
527 (sysctl_ip_vs_expire_quiescent_template && 575 (sysctl_ip_vs_expire_quiescent_template &&
528 (atomic_read(&dest->weight) == 0))) { 576 (atomic_read(&dest->weight) == 0))) {
529 IP_VS_DBG(9, "check_template: dest not available for " 577 IP_VS_DBG_BUF(9, "check_template: dest not available for "
530 "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 578 "protocol %s s:%s:%d v:%s:%d "
531 "-> d:%u.%u.%u.%u:%d\n", 579 "-> d:%s:%d\n",
532 ip_vs_proto_name(ct->protocol), 580 ip_vs_proto_name(ct->protocol),
533 NIPQUAD(ct->caddr), ntohs(ct->cport), 581 IP_VS_DBG_ADDR(ct->af, &ct->caddr),
534 NIPQUAD(ct->vaddr), ntohs(ct->vport), 582 ntohs(ct->cport),
535 NIPQUAD(ct->daddr), ntohs(ct->dport)); 583 IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
584 ntohs(ct->vport),
585 IP_VS_DBG_ADDR(ct->af, &ct->daddr),
586 ntohs(ct->dport));
536 587
537 /* 588 /*
538 * Invalidate the connection template 589 * Invalidate the connection template
@@ -625,8 +676,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
625 * Create a new connection entry and hash it into the ip_vs_conn_tab 676 * Create a new connection entry and hash it into the ip_vs_conn_tab
626 */ 677 */
627struct ip_vs_conn * 678struct ip_vs_conn *
628ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, 679ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
629 __be32 daddr, __be16 dport, unsigned flags, 680 const union nf_inet_addr *vaddr, __be16 vport,
681 const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
630 struct ip_vs_dest *dest) 682 struct ip_vs_dest *dest)
631{ 683{
632 struct ip_vs_conn *cp; 684 struct ip_vs_conn *cp;
@@ -640,12 +692,13 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport
640 692
641 INIT_LIST_HEAD(&cp->c_list); 693 INIT_LIST_HEAD(&cp->c_list);
642 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); 694 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
695 cp->af = af;
643 cp->protocol = proto; 696 cp->protocol = proto;
644 cp->caddr = caddr; 697 ip_vs_addr_copy(af, &cp->caddr, caddr);
645 cp->cport = cport; 698 cp->cport = cport;
646 cp->vaddr = vaddr; 699 ip_vs_addr_copy(af, &cp->vaddr, vaddr);
647 cp->vport = vport; 700 cp->vport = vport;
648 cp->daddr = daddr; 701 ip_vs_addr_copy(af, &cp->daddr, daddr);
649 cp->dport = dport; 702 cp->dport = dport;
650 cp->flags = flags; 703 cp->flags = flags;
651 spin_lock_init(&cp->lock); 704 spin_lock_init(&cp->lock);
@@ -672,7 +725,12 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport
672 cp->timeout = 3*HZ; 725 cp->timeout = 3*HZ;
673 726
674 /* Bind its packet transmitter */ 727 /* Bind its packet transmitter */
675 ip_vs_bind_xmit(cp); 728#ifdef CONFIG_IP_VS_IPV6
729 if (af == AF_INET6)
730 ip_vs_bind_xmit_v6(cp);
731 else
732#endif
733 ip_vs_bind_xmit(cp);
676 734
677 if (unlikely(pp && atomic_read(&pp->appcnt))) 735 if (unlikely(pp && atomic_read(&pp->appcnt)))
678 ip_vs_bind_app(cp, pp); 736 ip_vs_bind_app(cp, pp);
@@ -760,12 +818,26 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
760 else { 818 else {
761 const struct ip_vs_conn *cp = v; 819 const struct ip_vs_conn *cp = v;
762 820
763 seq_printf(seq, 821#ifdef CONFIG_IP_VS_IPV6
764 "%-3s %08X %04X %08X %04X %08X %04X %-11s %7lu\n", 822 if (cp->af == AF_INET6)
823 seq_printf(seq,
824 "%-3s " NIP6_FMT " %04X " NIP6_FMT
825 " %04X " NIP6_FMT " %04X %-11s %7lu\n",
826 ip_vs_proto_name(cp->protocol),
827 NIP6(cp->caddr.in6), ntohs(cp->cport),
828 NIP6(cp->vaddr.in6), ntohs(cp->vport),
829 NIP6(cp->daddr.in6), ntohs(cp->dport),
830 ip_vs_state_name(cp->protocol, cp->state),
831 (cp->timer.expires-jiffies)/HZ);
832 else
833#endif
834 seq_printf(seq,
835 "%-3s %08X %04X %08X %04X"
836 " %08X %04X %-11s %7lu\n",
765 ip_vs_proto_name(cp->protocol), 837 ip_vs_proto_name(cp->protocol),
766 ntohl(cp->caddr), ntohs(cp->cport), 838 ntohl(cp->caddr.ip), ntohs(cp->cport),
767 ntohl(cp->vaddr), ntohs(cp->vport), 839 ntohl(cp->vaddr.ip), ntohs(cp->vport),
768 ntohl(cp->daddr), ntohs(cp->dport), 840 ntohl(cp->daddr.ip), ntohs(cp->dport),
769 ip_vs_state_name(cp->protocol, cp->state), 841 ip_vs_state_name(cp->protocol, cp->state),
770 (cp->timer.expires-jiffies)/HZ); 842 (cp->timer.expires-jiffies)/HZ);
771 } 843 }
@@ -809,12 +881,27 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
809 else { 881 else {
810 const struct ip_vs_conn *cp = v; 882 const struct ip_vs_conn *cp = v;
811 883
812 seq_printf(seq, 884#ifdef CONFIG_IP_VS_IPV6
813 "%-3s %08X %04X %08X %04X %08X %04X %-11s %-6s %7lu\n", 885 if (cp->af == AF_INET6)
886 seq_printf(seq,
887 "%-3s " NIP6_FMT " %04X " NIP6_FMT
888 " %04X " NIP6_FMT " %04X %-11s %-6s %7lu\n",
889 ip_vs_proto_name(cp->protocol),
890 NIP6(cp->caddr.in6), ntohs(cp->cport),
891 NIP6(cp->vaddr.in6), ntohs(cp->vport),
892 NIP6(cp->daddr.in6), ntohs(cp->dport),
893 ip_vs_state_name(cp->protocol, cp->state),
894 ip_vs_origin_name(cp->flags),
895 (cp->timer.expires-jiffies)/HZ);
896 else
897#endif
898 seq_printf(seq,
899 "%-3s %08X %04X %08X %04X "
900 "%08X %04X %-11s %-6s %7lu\n",
814 ip_vs_proto_name(cp->protocol), 901 ip_vs_proto_name(cp->protocol),
815 ntohl(cp->caddr), ntohs(cp->cport), 902 ntohl(cp->caddr.ip), ntohs(cp->cport),
816 ntohl(cp->vaddr), ntohs(cp->vport), 903 ntohl(cp->vaddr.ip), ntohs(cp->vport),
817 ntohl(cp->daddr), ntohs(cp->dport), 904 ntohl(cp->daddr.ip), ntohs(cp->dport),
818 ip_vs_state_name(cp->protocol, cp->state), 905 ip_vs_state_name(cp->protocol, cp->state),
819 ip_vs_origin_name(cp->flags), 906 ip_vs_origin_name(cp->flags),
820 (cp->timer.expires-jiffies)/HZ); 907 (cp->timer.expires-jiffies)/HZ);
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index a7879eafc3b5..958abf3e5f8c 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -39,6 +39,11 @@
39#include <linux/netfilter.h> 39#include <linux/netfilter.h>
40#include <linux/netfilter_ipv4.h> 40#include <linux/netfilter_ipv4.h>
41 41
42#ifdef CONFIG_IP_VS_IPV6
43#include <net/ipv6.h>
44#include <linux/netfilter_ipv6.h>
45#endif
46
42#include <net/ip_vs.h> 47#include <net/ip_vs.h>
43 48
44 49
@@ -60,6 +65,7 @@ EXPORT_SYMBOL(ip_vs_get_debug_level);
60 65
61/* ID used in ICMP lookups */ 66/* ID used in ICMP lookups */
62#define icmp_id(icmph) (((icmph)->un).echo.id) 67#define icmp_id(icmph) (((icmph)->un).echo.id)
68#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
63 69
64const char *ip_vs_proto_name(unsigned proto) 70const char *ip_vs_proto_name(unsigned proto)
65{ 71{
@@ -74,6 +80,10 @@ const char *ip_vs_proto_name(unsigned proto)
74 return "TCP"; 80 return "TCP";
75 case IPPROTO_ICMP: 81 case IPPROTO_ICMP:
76 return "ICMP"; 82 return "ICMP";
83#ifdef CONFIG_IP_VS_IPV6
84 case IPPROTO_ICMPV6:
85 return "ICMPv6";
86#endif
77 default: 87 default:
78 sprintf(buf, "IP_%d", proto); 88 sprintf(buf, "IP_%d", proto);
79 return buf; 89 return buf;
@@ -92,18 +102,18 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
92 struct ip_vs_dest *dest = cp->dest; 102 struct ip_vs_dest *dest = cp->dest;
93 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 103 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
94 spin_lock(&dest->stats.lock); 104 spin_lock(&dest->stats.lock);
95 dest->stats.inpkts++; 105 dest->stats.ustats.inpkts++;
96 dest->stats.inbytes += skb->len; 106 dest->stats.ustats.inbytes += skb->len;
97 spin_unlock(&dest->stats.lock); 107 spin_unlock(&dest->stats.lock);
98 108
99 spin_lock(&dest->svc->stats.lock); 109 spin_lock(&dest->svc->stats.lock);
100 dest->svc->stats.inpkts++; 110 dest->svc->stats.ustats.inpkts++;
101 dest->svc->stats.inbytes += skb->len; 111 dest->svc->stats.ustats.inbytes += skb->len;
102 spin_unlock(&dest->svc->stats.lock); 112 spin_unlock(&dest->svc->stats.lock);
103 113
104 spin_lock(&ip_vs_stats.lock); 114 spin_lock(&ip_vs_stats.lock);
105 ip_vs_stats.inpkts++; 115 ip_vs_stats.ustats.inpkts++;
106 ip_vs_stats.inbytes += skb->len; 116 ip_vs_stats.ustats.inbytes += skb->len;
107 spin_unlock(&ip_vs_stats.lock); 117 spin_unlock(&ip_vs_stats.lock);
108 } 118 }
109} 119}
@@ -115,18 +125,18 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
115 struct ip_vs_dest *dest = cp->dest; 125 struct ip_vs_dest *dest = cp->dest;
116 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 126 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
117 spin_lock(&dest->stats.lock); 127 spin_lock(&dest->stats.lock);
118 dest->stats.outpkts++; 128 dest->stats.ustats.outpkts++;
119 dest->stats.outbytes += skb->len; 129 dest->stats.ustats.outbytes += skb->len;
120 spin_unlock(&dest->stats.lock); 130 spin_unlock(&dest->stats.lock);
121 131
122 spin_lock(&dest->svc->stats.lock); 132 spin_lock(&dest->svc->stats.lock);
123 dest->svc->stats.outpkts++; 133 dest->svc->stats.ustats.outpkts++;
124 dest->svc->stats.outbytes += skb->len; 134 dest->svc->stats.ustats.outbytes += skb->len;
125 spin_unlock(&dest->svc->stats.lock); 135 spin_unlock(&dest->svc->stats.lock);
126 136
127 spin_lock(&ip_vs_stats.lock); 137 spin_lock(&ip_vs_stats.lock);
128 ip_vs_stats.outpkts++; 138 ip_vs_stats.ustats.outpkts++;
129 ip_vs_stats.outbytes += skb->len; 139 ip_vs_stats.ustats.outbytes += skb->len;
130 spin_unlock(&ip_vs_stats.lock); 140 spin_unlock(&ip_vs_stats.lock);
131 } 141 }
132} 142}
@@ -136,15 +146,15 @@ static inline void
136ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) 146ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
137{ 147{
138 spin_lock(&cp->dest->stats.lock); 148 spin_lock(&cp->dest->stats.lock);
139 cp->dest->stats.conns++; 149 cp->dest->stats.ustats.conns++;
140 spin_unlock(&cp->dest->stats.lock); 150 spin_unlock(&cp->dest->stats.lock);
141 151
142 spin_lock(&svc->stats.lock); 152 spin_lock(&svc->stats.lock);
143 svc->stats.conns++; 153 svc->stats.ustats.conns++;
144 spin_unlock(&svc->stats.lock); 154 spin_unlock(&svc->stats.lock);
145 155
146 spin_lock(&ip_vs_stats.lock); 156 spin_lock(&ip_vs_stats.lock);
147 ip_vs_stats.conns++; 157 ip_vs_stats.ustats.conns++;
148 spin_unlock(&ip_vs_stats.lock); 158 spin_unlock(&ip_vs_stats.lock);
149} 159}
150 160
@@ -173,20 +183,28 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
173 __be16 ports[2]) 183 __be16 ports[2])
174{ 184{
175 struct ip_vs_conn *cp = NULL; 185 struct ip_vs_conn *cp = NULL;
176 struct iphdr *iph = ip_hdr(skb); 186 struct ip_vs_iphdr iph;
177 struct ip_vs_dest *dest; 187 struct ip_vs_dest *dest;
178 struct ip_vs_conn *ct; 188 struct ip_vs_conn *ct;
179 __be16 dport; /* destination port to forward */ 189 __be16 dport; /* destination port to forward */
180 __be32 snet; /* source network of the client, after masking */ 190 union nf_inet_addr snet; /* source network of the client,
191 after masking */
192
193 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
181 194
182 /* Mask saddr with the netmask to adjust template granularity */ 195 /* Mask saddr with the netmask to adjust template granularity */
183 snet = iph->saddr & svc->netmask; 196#ifdef CONFIG_IP_VS_IPV6
197 if (svc->af == AF_INET6)
198 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
199 else
200#endif
201 snet.ip = iph.saddr.ip & svc->netmask;
184 202
185 IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u " 203 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
186 "mnet %u.%u.%u.%u\n", 204 "mnet %s\n",
187 NIPQUAD(iph->saddr), ntohs(ports[0]), 205 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
188 NIPQUAD(iph->daddr), ntohs(ports[1]), 206 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
189 NIPQUAD(snet)); 207 IP_VS_DBG_ADDR(svc->af, &snet));
190 208
191 /* 209 /*
192 * As far as we know, FTP is a very complicated network protocol, and 210 * As far as we know, FTP is a very complicated network protocol, and
@@ -204,11 +222,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
204 if (ports[1] == svc->port) { 222 if (ports[1] == svc->port) {
205 /* Check if a template already exists */ 223 /* Check if a template already exists */
206 if (svc->port != FTPPORT) 224 if (svc->port != FTPPORT)
207 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 225 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
208 iph->daddr, ports[1]); 226 &iph.daddr, ports[1]);
209 else 227 else
210 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 228 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
211 iph->daddr, 0); 229 &iph.daddr, 0);
212 230
213 if (!ct || !ip_vs_check_template(ct)) { 231 if (!ct || !ip_vs_check_template(ct)) {
214 /* 232 /*
@@ -228,18 +246,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
228 * for ftp service. 246 * for ftp service.
229 */ 247 */
230 if (svc->port != FTPPORT) 248 if (svc->port != FTPPORT)
231 ct = ip_vs_conn_new(iph->protocol, 249 ct = ip_vs_conn_new(svc->af, iph.protocol,
232 snet, 0, 250 &snet, 0,
233 iph->daddr, 251 &iph.daddr,
234 ports[1], 252 ports[1],
235 dest->addr, dest->port, 253 &dest->addr, dest->port,
236 IP_VS_CONN_F_TEMPLATE, 254 IP_VS_CONN_F_TEMPLATE,
237 dest); 255 dest);
238 else 256 else
239 ct = ip_vs_conn_new(iph->protocol, 257 ct = ip_vs_conn_new(svc->af, iph.protocol,
240 snet, 0, 258 &snet, 0,
241 iph->daddr, 0, 259 &iph.daddr, 0,
242 dest->addr, 0, 260 &dest->addr, 0,
243 IP_VS_CONN_F_TEMPLATE, 261 IP_VS_CONN_F_TEMPLATE,
244 dest); 262 dest);
245 if (ct == NULL) 263 if (ct == NULL)
@@ -258,12 +276,16 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
258 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> 276 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
259 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> 277 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
260 */ 278 */
261 if (svc->fwmark) 279 if (svc->fwmark) {
262 ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0, 280 union nf_inet_addr fwmark = {
263 htonl(svc->fwmark), 0); 281 .all = { 0, 0, 0, htonl(svc->fwmark) }
264 else 282 };
265 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 283
266 iph->daddr, 0); 284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0,
285 &fwmark, 0);
286 } else
287 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
288 &iph.daddr, 0);
267 289
268 if (!ct || !ip_vs_check_template(ct)) { 290 if (!ct || !ip_vs_check_template(ct)) {
269 /* 291 /*
@@ -282,18 +304,22 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
282 /* 304 /*
283 * Create a template according to the service 305 * Create a template according to the service
284 */ 306 */
285 if (svc->fwmark) 307 if (svc->fwmark) {
286 ct = ip_vs_conn_new(IPPROTO_IP, 308 union nf_inet_addr fwmark = {
287 snet, 0, 309 .all = { 0, 0, 0, htonl(svc->fwmark) }
288 htonl(svc->fwmark), 0, 310 };
289 dest->addr, 0, 311
312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
313 &snet, 0,
314 &fwmark, 0,
315 &dest->addr, 0,
290 IP_VS_CONN_F_TEMPLATE, 316 IP_VS_CONN_F_TEMPLATE,
291 dest); 317 dest);
292 else 318 } else
293 ct = ip_vs_conn_new(iph->protocol, 319 ct = ip_vs_conn_new(svc->af, iph.protocol,
294 snet, 0, 320 &snet, 0,
295 iph->daddr, 0, 321 &iph.daddr, 0,
296 dest->addr, 0, 322 &dest->addr, 0,
297 IP_VS_CONN_F_TEMPLATE, 323 IP_VS_CONN_F_TEMPLATE,
298 dest); 324 dest);
299 if (ct == NULL) 325 if (ct == NULL)
@@ -310,10 +336,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
310 /* 336 /*
311 * Create a new connection according to the template 337 * Create a new connection according to the template
312 */ 338 */
313 cp = ip_vs_conn_new(iph->protocol, 339 cp = ip_vs_conn_new(svc->af, iph.protocol,
314 iph->saddr, ports[0], 340 &iph.saddr, ports[0],
315 iph->daddr, ports[1], 341 &iph.daddr, ports[1],
316 dest->addr, dport, 342 &dest->addr, dport,
317 0, 343 0,
318 dest); 344 dest);
319 if (cp == NULL) { 345 if (cp == NULL) {
@@ -342,12 +368,12 @@ struct ip_vs_conn *
342ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 368ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
343{ 369{
344 struct ip_vs_conn *cp = NULL; 370 struct ip_vs_conn *cp = NULL;
345 struct iphdr *iph = ip_hdr(skb); 371 struct ip_vs_iphdr iph;
346 struct ip_vs_dest *dest; 372 struct ip_vs_dest *dest;
347 __be16 _ports[2], *pptr; 373 __be16 _ports[2], *pptr;
348 374
349 pptr = skb_header_pointer(skb, iph->ihl*4, 375 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
350 sizeof(_ports), _ports); 376 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
351 if (pptr == NULL) 377 if (pptr == NULL)
352 return NULL; 378 return NULL;
353 379
@@ -377,22 +403,22 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
377 /* 403 /*
378 * Create a connection entry. 404 * Create a connection entry.
379 */ 405 */
380 cp = ip_vs_conn_new(iph->protocol, 406 cp = ip_vs_conn_new(svc->af, iph.protocol,
381 iph->saddr, pptr[0], 407 &iph.saddr, pptr[0],
382 iph->daddr, pptr[1], 408 &iph.daddr, pptr[1],
383 dest->addr, dest->port?dest->port:pptr[1], 409 &dest->addr, dest->port ? dest->port : pptr[1],
384 0, 410 0,
385 dest); 411 dest);
386 if (cp == NULL) 412 if (cp == NULL)
387 return NULL; 413 return NULL;
388 414
389 IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u " 415 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
390 "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n", 416 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
391 ip_vs_fwd_tag(cp), 417 ip_vs_fwd_tag(cp),
392 NIPQUAD(cp->caddr), ntohs(cp->cport), 418 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
393 NIPQUAD(cp->vaddr), ntohs(cp->vport), 419 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
394 NIPQUAD(cp->daddr), ntohs(cp->dport), 420 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
395 cp->flags, atomic_read(&cp->refcnt)); 421 cp->flags, atomic_read(&cp->refcnt));
396 422
397 ip_vs_conn_stats(cp, svc); 423 ip_vs_conn_stats(cp, svc);
398 return cp; 424 return cp;
@@ -408,31 +434,39 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
408 struct ip_vs_protocol *pp) 434 struct ip_vs_protocol *pp)
409{ 435{
410 __be16 _ports[2], *pptr; 436 __be16 _ports[2], *pptr;
411 struct iphdr *iph = ip_hdr(skb); 437 struct ip_vs_iphdr iph;
438 int unicast;
439 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
412 440
413 pptr = skb_header_pointer(skb, iph->ihl*4, 441 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
414 sizeof(_ports), _ports);
415 if (pptr == NULL) { 442 if (pptr == NULL) {
416 ip_vs_service_put(svc); 443 ip_vs_service_put(svc);
417 return NF_DROP; 444 return NF_DROP;
418 } 445 }
419 446
447#ifdef CONFIG_IP_VS_IPV6
448 if (svc->af == AF_INET6)
449 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
450 else
451#endif
452 unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
453
420 /* if it is fwmark-based service, the cache_bypass sysctl is up 454 /* if it is fwmark-based service, the cache_bypass sysctl is up
421 and the destination is RTN_UNICAST (and not local), then create 455 and the destination is a non-local unicast, then create
422 a cache_bypass connection entry */ 456 a cache_bypass connection entry */
423 if (sysctl_ip_vs_cache_bypass && svc->fwmark 457 if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
424 && (inet_addr_type(&init_net, iph->daddr) == RTN_UNICAST)) {
425 int ret, cs; 458 int ret, cs;
426 struct ip_vs_conn *cp; 459 struct ip_vs_conn *cp;
460 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
427 461
428 ip_vs_service_put(svc); 462 ip_vs_service_put(svc);
429 463
430 /* create a new connection entry */ 464 /* create a new connection entry */
431 IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); 465 IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
432 cp = ip_vs_conn_new(iph->protocol, 466 cp = ip_vs_conn_new(svc->af, iph.protocol,
433 iph->saddr, pptr[0], 467 &iph.saddr, pptr[0],
434 iph->daddr, pptr[1], 468 &iph.daddr, pptr[1],
435 0, 0, 469 &daddr, 0,
436 IP_VS_CONN_F_BYPASS, 470 IP_VS_CONN_F_BYPASS,
437 NULL); 471 NULL);
438 if (cp == NULL) 472 if (cp == NULL)
@@ -473,7 +507,14 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
473 * created, the TCP RST packet cannot be sent, instead that 507 * created, the TCP RST packet cannot be sent, instead that
474 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ 508 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
475 */ 509 */
476 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 510#ifdef CONFIG_IP_VS_IPV6
511 if (svc->af == AF_INET6)
512 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0,
513 skb->dev);
514 else
515#endif
516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
517
477 return NF_DROP; 518 return NF_DROP;
478} 519}
479 520
@@ -512,6 +553,14 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
512 return err; 553 return err;
513} 554}
514 555
556#ifdef CONFIG_IP_VS_IPV6
557static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
558{
559 /* TODO IPv6: Find out what to do here for IPv6 */
560 return 0;
561}
562#endif
563
515/* 564/*
516 * Packet has been made sufficiently writable in caller 565 * Packet has been made sufficiently writable in caller
517 * - inout: 1=in->out, 0=out->in 566 * - inout: 1=in->out, 0=out->in
@@ -526,14 +575,14 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
526 struct iphdr *ciph = (struct iphdr *)(icmph + 1); 575 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
527 576
528 if (inout) { 577 if (inout) {
529 iph->saddr = cp->vaddr; 578 iph->saddr = cp->vaddr.ip;
530 ip_send_check(iph); 579 ip_send_check(iph);
531 ciph->daddr = cp->vaddr; 580 ciph->daddr = cp->vaddr.ip;
532 ip_send_check(ciph); 581 ip_send_check(ciph);
533 } else { 582 } else {
534 iph->daddr = cp->daddr; 583 iph->daddr = cp->daddr.ip;
535 ip_send_check(iph); 584 ip_send_check(iph);
536 ciph->saddr = cp->daddr; 585 ciph->saddr = cp->daddr.ip;
537 ip_send_check(ciph); 586 ip_send_check(ciph);
538 } 587 }
539 588
@@ -560,21 +609,112 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
560 "Forwarding altered incoming ICMP"); 609 "Forwarding altered incoming ICMP");
561} 610}
562 611
612#ifdef CONFIG_IP_VS_IPV6
613void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
614 struct ip_vs_conn *cp, int inout)
615{
616 struct ipv6hdr *iph = ipv6_hdr(skb);
617 unsigned int icmp_offset = sizeof(struct ipv6hdr);
618 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) +
619 icmp_offset);
620 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1);
621
622 if (inout) {
623 iph->saddr = cp->vaddr.in6;
624 ciph->daddr = cp->vaddr.in6;
625 } else {
626 iph->daddr = cp->daddr.in6;
627 ciph->saddr = cp->daddr.in6;
628 }
629
630 /* the TCP/UDP port */
631 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) {
632 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
633
634 if (inout)
635 ports[1] = cp->vport;
636 else
637 ports[0] = cp->dport;
638 }
639
640 /* And finally the ICMP checksum */
641 icmph->icmp6_cksum = 0;
642 /* TODO IPv6: is this correct for ICMPv6? */
643 ip_vs_checksum_complete(skb, icmp_offset);
644 skb->ip_summed = CHECKSUM_UNNECESSARY;
645
646 if (inout)
647 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
648 "Forwarding altered outgoing ICMPv6");
649 else
650 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
651 "Forwarding altered incoming ICMPv6");
652}
653#endif
654
655/* Handle relevant response ICMP messages - forward to the right
656 * destination host. Used for NAT and local client.
657 */
658static int handle_response_icmp(int af, struct sk_buff *skb,
659 union nf_inet_addr *snet,
660 __u8 protocol, struct ip_vs_conn *cp,
661 struct ip_vs_protocol *pp,
662 unsigned int offset, unsigned int ihl)
663{
664 unsigned int verdict = NF_DROP;
665
666 if (IP_VS_FWD_METHOD(cp) != 0) {
667 IP_VS_ERR("shouldn't reach here, because the box is on the "
668 "half connection in the tun/dr module.\n");
669 }
670
671 /* Ensure the checksum is correct */
672 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
673 /* Failed checksum! */
674 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
675 IP_VS_DBG_ADDR(af, snet));
676 goto out;
677 }
678
679 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol)
680 offset += 2 * sizeof(__u16);
681 if (!skb_make_writable(skb, offset))
682 goto out;
683
684#ifdef CONFIG_IP_VS_IPV6
685 if (af == AF_INET6)
686 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
687 else
688#endif
689 ip_vs_nat_icmp(skb, pp, cp, 1);
690
691 /* do the statistics and put it back */
692 ip_vs_out_stats(cp, skb);
693
694 skb->ipvs_property = 1;
695 verdict = NF_ACCEPT;
696
697out:
698 __ip_vs_conn_put(cp);
699
700 return verdict;
701}
702
563/* 703/*
564 * Handle ICMP messages in the inside-to-outside direction (outgoing). 704 * Handle ICMP messages in the inside-to-outside direction (outgoing).
565 * Find any that might be relevant, check against existing connections, 705 * Find any that might be relevant, check against existing connections.
566 * forward to the right destination host if relevant.
567 * Currently handles error types - unreachable, quench, ttl exceeded. 706 * Currently handles error types - unreachable, quench, ttl exceeded.
568 * (Only used in VS/NAT)
569 */ 707 */
570static int ip_vs_out_icmp(struct sk_buff *skb, int *related) 708static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
571{ 709{
572 struct iphdr *iph; 710 struct iphdr *iph;
573 struct icmphdr _icmph, *ic; 711 struct icmphdr _icmph, *ic;
574 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ 712 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
713 struct ip_vs_iphdr ciph;
575 struct ip_vs_conn *cp; 714 struct ip_vs_conn *cp;
576 struct ip_vs_protocol *pp; 715 struct ip_vs_protocol *pp;
577 unsigned int offset, ihl, verdict; 716 unsigned int offset, ihl;
717 union nf_inet_addr snet;
578 718
579 *related = 1; 719 *related = 1;
580 720
@@ -627,102 +767,231 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
627 767
628 offset += cih->ihl * 4; 768 offset += cih->ihl * 4;
629 769
770 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
630 /* The embedded headers contain source and dest in reverse order */ 771 /* The embedded headers contain source and dest in reverse order */
631 cp = pp->conn_out_get(skb, pp, cih, offset, 1); 772 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
632 if (!cp) 773 if (!cp)
633 return NF_ACCEPT; 774 return NF_ACCEPT;
634 775
635 verdict = NF_DROP; 776 snet.ip = iph->saddr;
777 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
778 pp, offset, ihl);
779}
636 780
637 if (IP_VS_FWD_METHOD(cp) != 0) { 781#ifdef CONFIG_IP_VS_IPV6
638 IP_VS_ERR("shouldn't reach here, because the box is on the " 782static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
639 "half connection in the tun/dr module.\n"); 783{
784 struct ipv6hdr *iph;
785 struct icmp6hdr _icmph, *ic;
786 struct ipv6hdr _ciph, *cih; /* The ip header contained
787 within the ICMP */
788 struct ip_vs_iphdr ciph;
789 struct ip_vs_conn *cp;
790 struct ip_vs_protocol *pp;
791 unsigned int offset;
792 union nf_inet_addr snet;
793
794 *related = 1;
795
796 /* reassemble IP fragments */
797 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
798 if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT))
799 return NF_STOLEN;
640 } 800 }
641 801
642 /* Ensure the checksum is correct */ 802 iph = ipv6_hdr(skb);
643 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 803 offset = sizeof(struct ipv6hdr);
644 /* Failed checksum! */ 804 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
645 IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n", 805 if (ic == NULL)
646 NIPQUAD(iph->saddr)); 806 return NF_DROP;
647 goto out; 807
808 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
809 ic->icmp6_type, ntohs(icmpv6_id(ic)),
810 NIP6(iph->saddr), NIP6(iph->daddr));
811
812 /*
813 * Work through seeing if this is for us.
814 * These checks are supposed to be in an order that means easy
815 * things are checked first to speed up processing.... however
816 * this means that some packets will manage to get a long way
817 * down this stack and then be rejected, but that's life.
818 */
819 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
820 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
821 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
822 *related = 0;
823 return NF_ACCEPT;
648 } 824 }
649 825
650 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 826 /* Now find the contained IP header */
651 offset += 2 * sizeof(__u16); 827 offset += sizeof(_icmph);
652 if (!skb_make_writable(skb, offset)) 828 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
653 goto out; 829 if (cih == NULL)
830 return NF_ACCEPT; /* The packet looks wrong, ignore */
654 831
655 ip_vs_nat_icmp(skb, pp, cp, 1); 832 pp = ip_vs_proto_get(cih->nexthdr);
833 if (!pp)
834 return NF_ACCEPT;
656 835
657 /* do the statistics and put it back */ 836 /* Is the embedded protocol header present? */
658 ip_vs_out_stats(cp, skb); 837 /* TODO: we don't support fragmentation at the moment anyways */
838 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
839 return NF_ACCEPT;
659 840
660 skb->ipvs_property = 1; 841 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for");
661 verdict = NF_ACCEPT;
662 842
663 out: 843 offset += sizeof(struct ipv6hdr);
664 __ip_vs_conn_put(cp);
665 844
666 return verdict; 845 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
846 /* The embedded headers contain source and dest in reverse order */
847 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
848 if (!cp)
849 return NF_ACCEPT;
850
851 ipv6_addr_copy(&snet.in6, &iph->saddr);
852 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
853 pp, offset, sizeof(struct ipv6hdr));
667} 854}
855#endif
668 856
669static inline int is_tcp_reset(const struct sk_buff *skb) 857static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
670{ 858{
671 struct tcphdr _tcph, *th; 859 struct tcphdr _tcph, *th;
672 860
673 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 861 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
674 if (th == NULL) 862 if (th == NULL)
675 return 0; 863 return 0;
676 return th->rst; 864 return th->rst;
677} 865}
678 866
867/* Handle response packets: rewrite addresses and send away...
868 * Used for NAT and local client.
869 */
870static unsigned int
871handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
872 struct ip_vs_conn *cp, int ihl)
873{
874 IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet");
875
876 if (!skb_make_writable(skb, ihl))
877 goto drop;
878
879 /* mangle the packet */
880 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
881 goto drop;
882
883#ifdef CONFIG_IP_VS_IPV6
884 if (af == AF_INET6)
885 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
886 else
887#endif
888 {
889 ip_hdr(skb)->saddr = cp->vaddr.ip;
890 ip_send_check(ip_hdr(skb));
891 }
892
893 /* For policy routing, packets originating from this
894 * machine itself may be routed differently to packets
895 * passing through. We want this packet to be routed as
896 * if it came from this machine itself. So re-compute
897 * the routing information.
898 */
899#ifdef CONFIG_IP_VS_IPV6
900 if (af == AF_INET6) {
901 if (ip6_route_me_harder(skb) != 0)
902 goto drop;
903 } else
904#endif
905 if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
906 goto drop;
907
908 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
909
910 ip_vs_out_stats(cp, skb);
911 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
912 ip_vs_conn_put(cp);
913
914 skb->ipvs_property = 1;
915
916 LeaveFunction(11);
917 return NF_ACCEPT;
918
919drop:
920 ip_vs_conn_put(cp);
921 kfree_skb(skb);
922 return NF_STOLEN;
923}
924
679/* 925/*
680 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. 926 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
681 * Check if outgoing packet belongs to the established ip_vs_conn, 927 * Check if outgoing packet belongs to the established ip_vs_conn.
682 * rewrite addresses of the packet and send it on its way...
683 */ 928 */
684static unsigned int 929static unsigned int
685ip_vs_out(unsigned int hooknum, struct sk_buff *skb, 930ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
686 const struct net_device *in, const struct net_device *out, 931 const struct net_device *in, const struct net_device *out,
687 int (*okfn)(struct sk_buff *)) 932 int (*okfn)(struct sk_buff *))
688{ 933{
689 struct iphdr *iph; 934 struct ip_vs_iphdr iph;
690 struct ip_vs_protocol *pp; 935 struct ip_vs_protocol *pp;
691 struct ip_vs_conn *cp; 936 struct ip_vs_conn *cp;
692 int ihl; 937 int af;
693 938
694 EnterFunction(11); 939 EnterFunction(11);
695 940
941 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
942
696 if (skb->ipvs_property) 943 if (skb->ipvs_property)
697 return NF_ACCEPT; 944 return NF_ACCEPT;
698 945
699 iph = ip_hdr(skb); 946 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
700 if (unlikely(iph->protocol == IPPROTO_ICMP)) { 947#ifdef CONFIG_IP_VS_IPV6
701 int related, verdict = ip_vs_out_icmp(skb, &related); 948 if (af == AF_INET6) {
949 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
950 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
702 951
703 if (related) 952 if (related)
704 return verdict; 953 return verdict;
705 iph = ip_hdr(skb); 954 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
706 } 955 }
956 } else
957#endif
958 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
959 int related, verdict = ip_vs_out_icmp(skb, &related);
707 960
708 pp = ip_vs_proto_get(iph->protocol); 961 if (related)
962 return verdict;
963 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
964 }
965
966 pp = ip_vs_proto_get(iph.protocol);
709 if (unlikely(!pp)) 967 if (unlikely(!pp))
710 return NF_ACCEPT; 968 return NF_ACCEPT;
711 969
712 /* reassemble IP fragments */ 970 /* reassemble IP fragments */
713 if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && 971#ifdef CONFIG_IP_VS_IPV6
714 !pp->dont_defrag)) { 972 if (af == AF_INET6) {
715 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) 973 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
716 return NF_STOLEN; 974 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
717 iph = ip_hdr(skb); 975
718 } 976 if (related)
977 return verdict;
719 978
720 ihl = iph->ihl << 2; 979 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
980 }
981 } else
982#endif
983 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) &&
984 !pp->dont_defrag)) {
985 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
986 return NF_STOLEN;
987
988 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
989 }
721 990
722 /* 991 /*
723 * Check if the packet belongs to an existing entry 992 * Check if the packet belongs to an existing entry
724 */ 993 */
725 cp = pp->conn_out_get(skb, pp, iph, ihl, 0); 994 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
726 995
727 if (unlikely(!cp)) { 996 if (unlikely(!cp)) {
728 if (sysctl_ip_vs_nat_icmp_send && 997 if (sysctl_ip_vs_nat_icmp_send &&
@@ -730,21 +999,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
730 pp->protocol == IPPROTO_UDP)) { 999 pp->protocol == IPPROTO_UDP)) {
731 __be16 _ports[2], *pptr; 1000 __be16 _ports[2], *pptr;
732 1001
733 pptr = skb_header_pointer(skb, ihl, 1002 pptr = skb_header_pointer(skb, iph.len,
734 sizeof(_ports), _ports); 1003 sizeof(_ports), _ports);
735 if (pptr == NULL) 1004 if (pptr == NULL)
736 return NF_ACCEPT; /* Not for me */ 1005 return NF_ACCEPT; /* Not for me */
737 if (ip_vs_lookup_real_service(iph->protocol, 1006 if (ip_vs_lookup_real_service(af, iph.protocol,
738 iph->saddr, pptr[0])) { 1007 &iph.saddr,
1008 pptr[0])) {
739 /* 1009 /*
740 * Notify the real server: there is no 1010 * Notify the real server: there is no
741 * existing entry if it is not RST 1011 * existing entry if it is not RST
742 * packet or not TCP packet. 1012 * packet or not TCP packet.
743 */ 1013 */
744 if (iph->protocol != IPPROTO_TCP 1014 if (iph.protocol != IPPROTO_TCP
745 || !is_tcp_reset(skb)) { 1015 || !is_tcp_reset(skb, iph.len)) {
746 icmp_send(skb,ICMP_DEST_UNREACH, 1016#ifdef CONFIG_IP_VS_IPV6
747 ICMP_PORT_UNREACH, 0); 1017 if (af == AF_INET6)
1018 icmpv6_send(skb,
1019 ICMPV6_DEST_UNREACH,
1020 ICMPV6_PORT_UNREACH,
1021 0, skb->dev);
1022 else
1023#endif
1024 icmp_send(skb,
1025 ICMP_DEST_UNREACH,
1026 ICMP_PORT_UNREACH, 0);
748 return NF_DROP; 1027 return NF_DROP;
749 } 1028 }
750 } 1029 }
@@ -754,41 +1033,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
754 return NF_ACCEPT; 1033 return NF_ACCEPT;
755 } 1034 }
756 1035
757 IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); 1036 return handle_response(af, skb, pp, cp, iph.len);
758
759 if (!skb_make_writable(skb, ihl))
760 goto drop;
761
762 /* mangle the packet */
763 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
764 goto drop;
765 ip_hdr(skb)->saddr = cp->vaddr;
766 ip_send_check(ip_hdr(skb));
767
768 /* For policy routing, packets originating from this
769 * machine itself may be routed differently to packets
770 * passing through. We want this packet to be routed as
771 * if it came from this machine itself. So re-compute
772 * the routing information.
773 */
774 if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
775 goto drop;
776
777 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
778
779 ip_vs_out_stats(cp, skb);
780 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
781 ip_vs_conn_put(cp);
782
783 skb->ipvs_property = 1;
784
785 LeaveFunction(11);
786 return NF_ACCEPT;
787
788 drop:
789 ip_vs_conn_put(cp);
790 kfree_skb(skb);
791 return NF_STOLEN;
792} 1037}
793 1038
794 1039
@@ -804,9 +1049,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
804 struct iphdr *iph; 1049 struct iphdr *iph;
805 struct icmphdr _icmph, *ic; 1050 struct icmphdr _icmph, *ic;
806 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ 1051 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1052 struct ip_vs_iphdr ciph;
807 struct ip_vs_conn *cp; 1053 struct ip_vs_conn *cp;
808 struct ip_vs_protocol *pp; 1054 struct ip_vs_protocol *pp;
809 unsigned int offset, ihl, verdict; 1055 unsigned int offset, ihl, verdict;
1056 union nf_inet_addr snet;
810 1057
811 *related = 1; 1058 *related = 1;
812 1059
@@ -860,10 +1107,20 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
860 1107
861 offset += cih->ihl * 4; 1108 offset += cih->ihl * 4;
862 1109
1110 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
863 /* The embedded headers contain source and dest in reverse order */ 1111 /* The embedded headers contain source and dest in reverse order */
864 cp = pp->conn_in_get(skb, pp, cih, offset, 1); 1112 cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
865 if (!cp) 1113 if (!cp) {
1114 /* The packet could also belong to a local client */
1115 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
1116 if (cp) {
1117 snet.ip = iph->saddr;
1118 return handle_response_icmp(AF_INET, skb, &snet,
1119 cih->protocol, cp, pp,
1120 offset, ihl);
1121 }
866 return NF_ACCEPT; 1122 return NF_ACCEPT;
1123 }
867 1124
868 verdict = NF_DROP; 1125 verdict = NF_DROP;
869 1126
@@ -888,6 +1145,105 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
888 return verdict; 1145 return verdict;
889} 1146}
890 1147
1148#ifdef CONFIG_IP_VS_IPV6
1149static int
1150ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1151{
1152 struct ipv6hdr *iph;
1153 struct icmp6hdr _icmph, *ic;
1154 struct ipv6hdr _ciph, *cih; /* The ip header contained
1155 within the ICMP */
1156 struct ip_vs_iphdr ciph;
1157 struct ip_vs_conn *cp;
1158 struct ip_vs_protocol *pp;
1159 unsigned int offset, verdict;
1160 union nf_inet_addr snet;
1161
1162 *related = 1;
1163
1164 /* reassemble IP fragments */
1165 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1166 if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ?
1167 IP_DEFRAG_VS_IN :
1168 IP_DEFRAG_VS_FWD))
1169 return NF_STOLEN;
1170 }
1171
1172 iph = ipv6_hdr(skb);
1173 offset = sizeof(struct ipv6hdr);
1174 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1175 if (ic == NULL)
1176 return NF_DROP;
1177
1178 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
1179 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1180 NIP6(iph->saddr), NIP6(iph->daddr));
1181
1182 /*
1183 * Work through seeing if this is for us.
1184 * These checks are supposed to be in an order that means easy
1185 * things are checked first to speed up processing.... however
1186 * this means that some packets will manage to get a long way
1187 * down this stack and then be rejected, but that's life.
1188 */
1189 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
1190 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1191 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1192 *related = 0;
1193 return NF_ACCEPT;
1194 }
1195
1196 /* Now find the contained IP header */
1197 offset += sizeof(_icmph);
1198 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1199 if (cih == NULL)
1200 return NF_ACCEPT; /* The packet looks wrong, ignore */
1201
1202 pp = ip_vs_proto_get(cih->nexthdr);
1203 if (!pp)
1204 return NF_ACCEPT;
1205
1206 /* Is the embedded protocol header present? */
1207 /* TODO: we don't support fragmentation at the moment anyways */
1208 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1209 return NF_ACCEPT;
1210
1211 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for");
1212
1213 offset += sizeof(struct ipv6hdr);
1214
1215 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1216 /* The embedded headers contain source and dest in reverse order */
1217 cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
1218 if (!cp) {
1219 /* The packet could also belong to a local client */
1220 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
1221 if (cp) {
1222 ipv6_addr_copy(&snet.in6, &iph->saddr);
1223 return handle_response_icmp(AF_INET6, skb, &snet,
1224 cih->nexthdr,
1225 cp, pp, offset,
1226 sizeof(struct ipv6hdr));
1227 }
1228 return NF_ACCEPT;
1229 }
1230
1231 verdict = NF_DROP;
1232
1233 /* do the statistics and put it back */
1234 ip_vs_in_stats(cp, skb);
1235 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr)
1236 offset += 2 * sizeof(__u16);
1237 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1238 /* do not touch skb anymore */
1239
1240 __ip_vs_conn_put(cp);
1241
1242 return verdict;
1243}
1244#endif
1245
1246
891/* 1247/*
892 * Check if it's for virtual services, look it up, 1248 * Check if it's for virtual services, look it up,
893 * and send it on its way... 1249 * and send it on its way...
@@ -897,50 +1253,54 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
897 const struct net_device *in, const struct net_device *out, 1253 const struct net_device *in, const struct net_device *out,
898 int (*okfn)(struct sk_buff *)) 1254 int (*okfn)(struct sk_buff *))
899{ 1255{
900 struct iphdr *iph; 1256 struct ip_vs_iphdr iph;
901 struct ip_vs_protocol *pp; 1257 struct ip_vs_protocol *pp;
902 struct ip_vs_conn *cp; 1258 struct ip_vs_conn *cp;
903 int ret, restart; 1259 int ret, restart, af;
904 int ihl; 1260
1261 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
1262
1263 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
905 1264
906 /* 1265 /*
907 * Big tappo: only PACKET_HOST (neither loopback nor mcasts) 1266 * Big tappo: only PACKET_HOST, including loopback for local client
908 * ... don't know why 1st test DOES NOT include 2nd (?) 1267 * Don't handle local packets on IPv6 for now
909 */ 1268 */
910 if (unlikely(skb->pkt_type != PACKET_HOST 1269 if (unlikely(skb->pkt_type != PACKET_HOST)) {
911 || skb->dev->flags & IFF_LOOPBACK || skb->sk)) { 1270 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n",
912 IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", 1271 skb->pkt_type,
913 skb->pkt_type, 1272 iph.protocol,
914 ip_hdr(skb)->protocol, 1273 IP_VS_DBG_ADDR(af, &iph.daddr));
915 NIPQUAD(ip_hdr(skb)->daddr));
916 return NF_ACCEPT; 1274 return NF_ACCEPT;
917 } 1275 }
918 1276
919 iph = ip_hdr(skb); 1277 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
920 if (unlikely(iph->protocol == IPPROTO_ICMP)) {
921 int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); 1278 int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
922 1279
923 if (related) 1280 if (related)
924 return verdict; 1281 return verdict;
925 iph = ip_hdr(skb); 1282 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
926 } 1283 }
927 1284
928 /* Protocol supported? */ 1285 /* Protocol supported? */
929 pp = ip_vs_proto_get(iph->protocol); 1286 pp = ip_vs_proto_get(iph.protocol);
930 if (unlikely(!pp)) 1287 if (unlikely(!pp))
931 return NF_ACCEPT; 1288 return NF_ACCEPT;
932 1289
933 ihl = iph->ihl << 2;
934
935 /* 1290 /*
936 * Check if the packet belongs to an existing connection entry 1291 * Check if the packet belongs to an existing connection entry
937 */ 1292 */
938 cp = pp->conn_in_get(skb, pp, iph, ihl, 0); 1293 cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
939 1294
940 if (unlikely(!cp)) { 1295 if (unlikely(!cp)) {
941 int v; 1296 int v;
942 1297
943 if (!pp->conn_schedule(skb, pp, &v, &cp)) 1298 /* For local client packets, it could be a response */
1299 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
1300 if (cp)
1301 return handle_response(af, skb, pp, cp, iph.len);
1302
1303 if (!pp->conn_schedule(af, skb, pp, &v, &cp))
944 return v; 1304 return v;
945 } 1305 }
946 1306
@@ -984,7 +1344,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
984 * encorage the standby servers to update the connections timeout 1344 * encorage the standby servers to update the connections timeout
985 */ 1345 */
986 atomic_inc(&cp->in_pkts); 1346 atomic_inc(&cp->in_pkts);
987 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && 1347 if (af == AF_INET &&
1348 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
988 (((cp->protocol != IPPROTO_TCP || 1349 (((cp->protocol != IPPROTO_TCP ||
989 cp->state == IP_VS_TCP_S_ESTABLISHED) && 1350 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
990 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] 1351 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
@@ -1023,6 +1384,21 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1023 return ip_vs_in_icmp(skb, &r, hooknum); 1384 return ip_vs_in_icmp(skb, &r, hooknum);
1024} 1385}
1025 1386
1387#ifdef CONFIG_IP_VS_IPV6
1388static unsigned int
1389ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1390 const struct net_device *in, const struct net_device *out,
1391 int (*okfn)(struct sk_buff *))
1392{
1393 int r;
1394
1395 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1396 return NF_ACCEPT;
1397
1398 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1399}
1400#endif
1401
1026 1402
1027static struct nf_hook_ops ip_vs_ops[] __read_mostly = { 1403static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1028 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1404 /* After packet filtering, forward packet through VS/DR, VS/TUN,
@@ -1060,6 +1436,43 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1060 .hooknum = NF_INET_POST_ROUTING, 1436 .hooknum = NF_INET_POST_ROUTING,
1061 .priority = NF_IP_PRI_NAT_SRC-1, 1437 .priority = NF_IP_PRI_NAT_SRC-1,
1062 }, 1438 },
1439#ifdef CONFIG_IP_VS_IPV6
1440 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1441 * or VS/NAT(change destination), so that filtering rules can be
1442 * applied to IPVS. */
1443 {
1444 .hook = ip_vs_in,
1445 .owner = THIS_MODULE,
1446 .pf = PF_INET6,
1447 .hooknum = NF_INET_LOCAL_IN,
1448 .priority = 100,
1449 },
1450 /* After packet filtering, change source only for VS/NAT */
1451 {
1452 .hook = ip_vs_out,
1453 .owner = THIS_MODULE,
1454 .pf = PF_INET6,
1455 .hooknum = NF_INET_FORWARD,
1456 .priority = 100,
1457 },
1458 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1459 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1460 {
1461 .hook = ip_vs_forward_icmp_v6,
1462 .owner = THIS_MODULE,
1463 .pf = PF_INET6,
1464 .hooknum = NF_INET_FORWARD,
1465 .priority = 99,
1466 },
1467 /* Before the netfilter connection tracking, exit from POST_ROUTING */
1468 {
1469 .hook = ip_vs_post_routing,
1470 .owner = THIS_MODULE,
1471 .pf = PF_INET6,
1472 .hooknum = NF_INET_POST_ROUTING,
1473 .priority = NF_IP6_PRI_NAT_SRC-1,
1474 },
1475#endif
1063}; 1476};
1064 1477
1065 1478
@@ -1070,10 +1483,12 @@ static int __init ip_vs_init(void)
1070{ 1483{
1071 int ret; 1484 int ret;
1072 1485
1486 ip_vs_estimator_init();
1487
1073 ret = ip_vs_control_init(); 1488 ret = ip_vs_control_init();
1074 if (ret < 0) { 1489 if (ret < 0) {
1075 IP_VS_ERR("can't setup control.\n"); 1490 IP_VS_ERR("can't setup control.\n");
1076 goto cleanup_nothing; 1491 goto cleanup_estimator;
1077 } 1492 }
1078 1493
1079 ip_vs_protocol_init(); 1494 ip_vs_protocol_init();
@@ -1106,7 +1521,8 @@ static int __init ip_vs_init(void)
1106 cleanup_protocol: 1521 cleanup_protocol:
1107 ip_vs_protocol_cleanup(); 1522 ip_vs_protocol_cleanup();
1108 ip_vs_control_cleanup(); 1523 ip_vs_control_cleanup();
1109 cleanup_nothing: 1524 cleanup_estimator:
1525 ip_vs_estimator_cleanup();
1110 return ret; 1526 return ret;
1111} 1527}
1112 1528
@@ -1117,6 +1533,7 @@ static void __exit ip_vs_cleanup(void)
1117 ip_vs_app_cleanup(); 1533 ip_vs_app_cleanup();
1118 ip_vs_protocol_cleanup(); 1534 ip_vs_protocol_cleanup();
1119 ip_vs_control_cleanup(); 1535 ip_vs_control_cleanup();
1536 ip_vs_estimator_cleanup();
1120 IP_VS_INFO("ipvs unloaded.\n"); 1537 IP_VS_INFO("ipvs unloaded.\n");
1121} 1538}
1122 1539
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 6379705a8dcb..771551d8fba9 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -35,8 +35,13 @@
35 35
36#include <net/net_namespace.h> 36#include <net/net_namespace.h>
37#include <net/ip.h> 37#include <net/ip.h>
38#ifdef CONFIG_IP_VS_IPV6
39#include <net/ipv6.h>
40#include <net/ip6_route.h>
41#endif
38#include <net/route.h> 42#include <net/route.h>
39#include <net/sock.h> 43#include <net/sock.h>
44#include <net/genetlink.h>
40 45
41#include <asm/uaccess.h> 46#include <asm/uaccess.h>
42 47
@@ -90,6 +95,26 @@ int ip_vs_get_debug_level(void)
90} 95}
91#endif 96#endif
92 97
98#ifdef CONFIG_IP_VS_IPV6
99/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
100static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
101{
102 struct rt6_info *rt;
103 struct flowi fl = {
104 .oif = 0,
105 .nl_u = {
106 .ip6_u = {
107 .daddr = *addr,
108 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
109 };
110
111 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
112 if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
113 return 1;
114
115 return 0;
116}
117#endif
93/* 118/*
94 * update_defense_level is called from keventd and from sysctl, 119 * update_defense_level is called from keventd and from sysctl,
95 * so it needs to protect itself from softirqs 120 * so it needs to protect itself from softirqs
@@ -281,11 +306,19 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
281 * Returns hash value for virtual service 306 * Returns hash value for virtual service
282 */ 307 */
283static __inline__ unsigned 308static __inline__ unsigned
284ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port) 309ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
310 __be16 port)
285{ 311{
286 register unsigned porth = ntohs(port); 312 register unsigned porth = ntohs(port);
313 __be32 addr_fold = addr->ip;
314
315#ifdef CONFIG_IP_VS_IPV6
316 if (af == AF_INET6)
317 addr_fold = addr->ip6[0]^addr->ip6[1]^
318 addr->ip6[2]^addr->ip6[3];
319#endif
287 320
288 return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth) 321 return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
289 & IP_VS_SVC_TAB_MASK; 322 & IP_VS_SVC_TAB_MASK;
290} 323}
291 324
@@ -316,7 +349,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
316 /* 349 /*
317 * Hash it by <protocol,addr,port> in ip_vs_svc_table 350 * Hash it by <protocol,addr,port> in ip_vs_svc_table
318 */ 351 */
319 hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port); 352 hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
353 svc->port);
320 list_add(&svc->s_list, &ip_vs_svc_table[hash]); 354 list_add(&svc->s_list, &ip_vs_svc_table[hash]);
321 } else { 355 } else {
322 /* 356 /*
@@ -362,17 +396,19 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
362/* 396/*
363 * Get service by {proto,addr,port} in the service table. 397 * Get service by {proto,addr,port} in the service table.
364 */ 398 */
365static __inline__ struct ip_vs_service * 399static inline struct ip_vs_service *
366__ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) 400__ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr,
401 __be16 vport)
367{ 402{
368 unsigned hash; 403 unsigned hash;
369 struct ip_vs_service *svc; 404 struct ip_vs_service *svc;
370 405
371 /* Check for "full" addressed entries */ 406 /* Check for "full" addressed entries */
372 hash = ip_vs_svc_hashkey(protocol, vaddr, vport); 407 hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
373 408
374 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ 409 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
375 if ((svc->addr == vaddr) 410 if ((svc->af == af)
411 && ip_vs_addr_equal(af, &svc->addr, vaddr)
376 && (svc->port == vport) 412 && (svc->port == vport)
377 && (svc->protocol == protocol)) { 413 && (svc->protocol == protocol)) {
378 /* HIT */ 414 /* HIT */
@@ -388,7 +424,8 @@ __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport)
388/* 424/*
389 * Get service by {fwmark} in the service table. 425 * Get service by {fwmark} in the service table.
390 */ 426 */
391static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) 427static inline struct ip_vs_service *
428__ip_vs_svc_fwm_get(int af, __u32 fwmark)
392{ 429{
393 unsigned hash; 430 unsigned hash;
394 struct ip_vs_service *svc; 431 struct ip_vs_service *svc;
@@ -397,7 +434,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
397 hash = ip_vs_svc_fwm_hashkey(fwmark); 434 hash = ip_vs_svc_fwm_hashkey(fwmark);
398 435
399 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { 436 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
400 if (svc->fwmark == fwmark) { 437 if (svc->fwmark == fwmark && svc->af == af) {
401 /* HIT */ 438 /* HIT */
402 atomic_inc(&svc->usecnt); 439 atomic_inc(&svc->usecnt);
403 return svc; 440 return svc;
@@ -408,7 +445,8 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
408} 445}
409 446
410struct ip_vs_service * 447struct ip_vs_service *
411ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) 448ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
449 const union nf_inet_addr *vaddr, __be16 vport)
412{ 450{
413 struct ip_vs_service *svc; 451 struct ip_vs_service *svc;
414 452
@@ -417,14 +455,14 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
417 /* 455 /*
418 * Check the table hashed by fwmark first 456 * Check the table hashed by fwmark first
419 */ 457 */
420 if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark))) 458 if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark)))
421 goto out; 459 goto out;
422 460
423 /* 461 /*
424 * Check the table hashed by <protocol,addr,port> 462 * Check the table hashed by <protocol,addr,port>
425 * for "full" addressed entries 463 * for "full" addressed entries
426 */ 464 */
427 svc = __ip_vs_service_get(protocol, vaddr, vport); 465 svc = __ip_vs_service_get(af, protocol, vaddr, vport);
428 466
429 if (svc == NULL 467 if (svc == NULL
430 && protocol == IPPROTO_TCP 468 && protocol == IPPROTO_TCP
@@ -434,7 +472,7 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
434 * Check if ftp service entry exists, the packet 472 * Check if ftp service entry exists, the packet
435 * might belong to FTP data connections. 473 * might belong to FTP data connections.
436 */ 474 */
437 svc = __ip_vs_service_get(protocol, vaddr, FTPPORT); 475 svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT);
438 } 476 }
439 477
440 if (svc == NULL 478 if (svc == NULL
@@ -442,16 +480,16 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
442 /* 480 /*
443 * Check if the catch-all port (port zero) exists 481 * Check if the catch-all port (port zero) exists
444 */ 482 */
445 svc = __ip_vs_service_get(protocol, vaddr, 0); 483 svc = __ip_vs_service_get(af, protocol, vaddr, 0);
446 } 484 }
447 485
448 out: 486 out:
449 read_unlock(&__ip_vs_svc_lock); 487 read_unlock(&__ip_vs_svc_lock);
450 488
451 IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", 489 IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
452 fwmark, ip_vs_proto_name(protocol), 490 fwmark, ip_vs_proto_name(protocol),
453 NIPQUAD(vaddr), ntohs(vport), 491 IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
454 svc?"hit":"not hit"); 492 svc ? "hit" : "not hit");
455 493
456 return svc; 494 return svc;
457} 495}
@@ -478,11 +516,20 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
478/* 516/*
479 * Returns hash value for real service 517 * Returns hash value for real service
480 */ 518 */
481static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port) 519static inline unsigned ip_vs_rs_hashkey(int af,
520 const union nf_inet_addr *addr,
521 __be16 port)
482{ 522{
483 register unsigned porth = ntohs(port); 523 register unsigned porth = ntohs(port);
524 __be32 addr_fold = addr->ip;
525
526#ifdef CONFIG_IP_VS_IPV6
527 if (af == AF_INET6)
528 addr_fold = addr->ip6[0]^addr->ip6[1]^
529 addr->ip6[2]^addr->ip6[3];
530#endif
484 531
485 return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth) 532 return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
486 & IP_VS_RTAB_MASK; 533 & IP_VS_RTAB_MASK;
487} 534}
488 535
@@ -502,7 +549,8 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
502 * Hash by proto,addr,port, 549 * Hash by proto,addr,port,
503 * which are the parameters of the real service. 550 * which are the parameters of the real service.
504 */ 551 */
505 hash = ip_vs_rs_hashkey(dest->addr, dest->port); 552 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
553
506 list_add(&dest->d_list, &ip_vs_rtable[hash]); 554 list_add(&dest->d_list, &ip_vs_rtable[hash]);
507 555
508 return 1; 556 return 1;
@@ -529,7 +577,9 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
529 * Lookup real service by <proto,addr,port> in the real service table. 577 * Lookup real service by <proto,addr,port> in the real service table.
530 */ 578 */
531struct ip_vs_dest * 579struct ip_vs_dest *
532ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) 580ip_vs_lookup_real_service(int af, __u16 protocol,
581 const union nf_inet_addr *daddr,
582 __be16 dport)
533{ 583{
534 unsigned hash; 584 unsigned hash;
535 struct ip_vs_dest *dest; 585 struct ip_vs_dest *dest;
@@ -538,11 +588,12 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
538 * Check for "full" addressed entries 588 * Check for "full" addressed entries
539 * Return the first found entry 589 * Return the first found entry
540 */ 590 */
541 hash = ip_vs_rs_hashkey(daddr, dport); 591 hash = ip_vs_rs_hashkey(af, daddr, dport);
542 592
543 read_lock(&__ip_vs_rs_lock); 593 read_lock(&__ip_vs_rs_lock);
544 list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { 594 list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
545 if ((dest->addr == daddr) 595 if ((dest->af == af)
596 && ip_vs_addr_equal(af, &dest->addr, daddr)
546 && (dest->port == dport) 597 && (dest->port == dport)
547 && ((dest->protocol == protocol) || 598 && ((dest->protocol == protocol) ||
548 dest->vfwmark)) { 599 dest->vfwmark)) {
@@ -560,7 +611,8 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
560 * Lookup destination by {addr,port} in the given service 611 * Lookup destination by {addr,port} in the given service
561 */ 612 */
562static struct ip_vs_dest * 613static struct ip_vs_dest *
563ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) 614ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
615 __be16 dport)
564{ 616{
565 struct ip_vs_dest *dest; 617 struct ip_vs_dest *dest;
566 618
@@ -568,7 +620,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
568 * Find the destination for the given service 620 * Find the destination for the given service
569 */ 621 */
570 list_for_each_entry(dest, &svc->destinations, n_list) { 622 list_for_each_entry(dest, &svc->destinations, n_list) {
571 if ((dest->addr == daddr) && (dest->port == dport)) { 623 if ((dest->af == svc->af)
624 && ip_vs_addr_equal(svc->af, &dest->addr, daddr)
625 && (dest->port == dport)) {
572 /* HIT */ 626 /* HIT */
573 return dest; 627 return dest;
574 } 628 }
@@ -587,13 +641,15 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
587 * ip_vs_lookup_real_service() looked promissing, but 641 * ip_vs_lookup_real_service() looked promissing, but
588 * seems not working as expected. 642 * seems not working as expected.
589 */ 643 */
590struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, 644struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
591 __be32 vaddr, __be16 vport, __u16 protocol) 645 __be16 dport,
646 const union nf_inet_addr *vaddr,
647 __be16 vport, __u16 protocol)
592{ 648{
593 struct ip_vs_dest *dest; 649 struct ip_vs_dest *dest;
594 struct ip_vs_service *svc; 650 struct ip_vs_service *svc;
595 651
596 svc = ip_vs_service_get(0, protocol, vaddr, vport); 652 svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
597 if (!svc) 653 if (!svc)
598 return NULL; 654 return NULL;
599 dest = ip_vs_lookup_dest(svc, daddr, dport); 655 dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -614,7 +670,8 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
614 * scheduling. 670 * scheduling.
615 */ 671 */
616static struct ip_vs_dest * 672static struct ip_vs_dest *
617ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) 673ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
674 __be16 dport)
618{ 675{
619 struct ip_vs_dest *dest, *nxt; 676 struct ip_vs_dest *dest, *nxt;
620 677
@@ -622,17 +679,19 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
622 * Find the destination in trash 679 * Find the destination in trash
623 */ 680 */
624 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { 681 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
625 IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, " 682 IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
626 "dest->refcnt=%d\n", 683 "dest->refcnt=%d\n",
627 dest->vfwmark, 684 dest->vfwmark,
628 NIPQUAD(dest->addr), ntohs(dest->port), 685 IP_VS_DBG_ADDR(svc->af, &dest->addr),
629 atomic_read(&dest->refcnt)); 686 ntohs(dest->port),
630 if (dest->addr == daddr && 687 atomic_read(&dest->refcnt));
688 if (dest->af == svc->af &&
689 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
631 dest->port == dport && 690 dest->port == dport &&
632 dest->vfwmark == svc->fwmark && 691 dest->vfwmark == svc->fwmark &&
633 dest->protocol == svc->protocol && 692 dest->protocol == svc->protocol &&
634 (svc->fwmark || 693 (svc->fwmark ||
635 (dest->vaddr == svc->addr && 694 (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
636 dest->vport == svc->port))) { 695 dest->vport == svc->port))) {
637 /* HIT */ 696 /* HIT */
638 return dest; 697 return dest;
@@ -642,10 +701,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
642 * Try to purge the destination from trash if not referenced 701 * Try to purge the destination from trash if not referenced
643 */ 702 */
644 if (atomic_read(&dest->refcnt) == 1) { 703 if (atomic_read(&dest->refcnt) == 1) {
645 IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u " 704 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u "
646 "from trash\n", 705 "from trash\n",
647 dest->vfwmark, 706 dest->vfwmark,
648 NIPQUAD(dest->addr), ntohs(dest->port)); 707 IP_VS_DBG_ADDR(svc->af, &dest->addr),
708 ntohs(dest->port));
649 list_del(&dest->n_list); 709 list_del(&dest->n_list);
650 ip_vs_dst_reset(dest); 710 ip_vs_dst_reset(dest);
651 __ip_vs_unbind_svc(dest); 711 __ip_vs_unbind_svc(dest);
@@ -684,18 +744,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
684{ 744{
685 spin_lock_bh(&stats->lock); 745 spin_lock_bh(&stats->lock);
686 746
687 stats->conns = 0; 747 memset(&stats->ustats, 0, sizeof(stats->ustats));
688 stats->inpkts = 0;
689 stats->outpkts = 0;
690 stats->inbytes = 0;
691 stats->outbytes = 0;
692
693 stats->cps = 0;
694 stats->inpps = 0;
695 stats->outpps = 0;
696 stats->inbps = 0;
697 stats->outbps = 0;
698
699 ip_vs_zero_estimator(stats); 748 ip_vs_zero_estimator(stats);
700 749
701 spin_unlock_bh(&stats->lock); 750 spin_unlock_bh(&stats->lock);
@@ -706,7 +755,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
706 */ 755 */
707static void 756static void
708__ip_vs_update_dest(struct ip_vs_service *svc, 757__ip_vs_update_dest(struct ip_vs_service *svc,
709 struct ip_vs_dest *dest, struct ip_vs_dest_user *udest) 758 struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest)
710{ 759{
711 int conn_flags; 760 int conn_flags;
712 761
@@ -715,10 +764,18 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
715 conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; 764 conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE;
716 765
717 /* check if local node and update the flags */ 766 /* check if local node and update the flags */
718 if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) { 767#ifdef CONFIG_IP_VS_IPV6
719 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) 768 if (svc->af == AF_INET6) {
720 | IP_VS_CONN_F_LOCALNODE; 769 if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) {
721 } 770 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
771 | IP_VS_CONN_F_LOCALNODE;
772 }
773 } else
774#endif
775 if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) {
776 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
777 | IP_VS_CONN_F_LOCALNODE;
778 }
722 779
723 /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ 780 /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
724 if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { 781 if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) {
@@ -759,7 +816,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
759 * Create a destination for the given service 816 * Create a destination for the given service
760 */ 817 */
761static int 818static int
762ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, 819ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
763 struct ip_vs_dest **dest_p) 820 struct ip_vs_dest **dest_p)
764{ 821{
765 struct ip_vs_dest *dest; 822 struct ip_vs_dest *dest;
@@ -767,9 +824,20 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
767 824
768 EnterFunction(2); 825 EnterFunction(2);
769 826
770 atype = inet_addr_type(&init_net, udest->addr); 827#ifdef CONFIG_IP_VS_IPV6
771 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 828 if (svc->af == AF_INET6) {
772 return -EINVAL; 829 atype = ipv6_addr_type(&udest->addr.in6);
830 if ((!(atype & IPV6_ADDR_UNICAST) ||
831 atype & IPV6_ADDR_LINKLOCAL) &&
832 !__ip_vs_addr_is_local_v6(&udest->addr.in6))
833 return -EINVAL;
834 } else
835#endif
836 {
837 atype = inet_addr_type(&init_net, udest->addr.ip);
838 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
839 return -EINVAL;
840 }
773 841
774 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 842 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
775 if (dest == NULL) { 843 if (dest == NULL) {
@@ -777,11 +845,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
777 return -ENOMEM; 845 return -ENOMEM;
778 } 846 }
779 847
848 dest->af = svc->af;
780 dest->protocol = svc->protocol; 849 dest->protocol = svc->protocol;
781 dest->vaddr = svc->addr; 850 dest->vaddr = svc->addr;
782 dest->vport = svc->port; 851 dest->vport = svc->port;
783 dest->vfwmark = svc->fwmark; 852 dest->vfwmark = svc->fwmark;
784 dest->addr = udest->addr; 853 ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
785 dest->port = udest->port; 854 dest->port = udest->port;
786 855
787 atomic_set(&dest->activeconns, 0); 856 atomic_set(&dest->activeconns, 0);
@@ -806,10 +875,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
806 * Add a destination into an existing service 875 * Add a destination into an existing service
807 */ 876 */
808static int 877static int
809ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 878ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
810{ 879{
811 struct ip_vs_dest *dest; 880 struct ip_vs_dest *dest;
812 __be32 daddr = udest->addr; 881 union nf_inet_addr daddr;
813 __be16 dport = udest->port; 882 __be16 dport = udest->port;
814 int ret; 883 int ret;
815 884
@@ -826,10 +895,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
826 return -ERANGE; 895 return -ERANGE;
827 } 896 }
828 897
898 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
899
829 /* 900 /*
830 * Check if the dest already exists in the list 901 * Check if the dest already exists in the list
831 */ 902 */
832 dest = ip_vs_lookup_dest(svc, daddr, dport); 903 dest = ip_vs_lookup_dest(svc, &daddr, dport);
904
833 if (dest != NULL) { 905 if (dest != NULL) {
834 IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); 906 IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
835 return -EEXIST; 907 return -EEXIST;
@@ -839,15 +911,17 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
839 * Check if the dest already exists in the trash and 911 * Check if the dest already exists in the trash and
840 * is from the same service 912 * is from the same service
841 */ 913 */
842 dest = ip_vs_trash_get_dest(svc, daddr, dport); 914 dest = ip_vs_trash_get_dest(svc, &daddr, dport);
915
843 if (dest != NULL) { 916 if (dest != NULL) {
844 IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " 917 IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
845 "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n", 918 "dest->refcnt=%d, service %u/%s:%u\n",
846 NIPQUAD(daddr), ntohs(dport), 919 IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
847 atomic_read(&dest->refcnt), 920 atomic_read(&dest->refcnt),
848 dest->vfwmark, 921 dest->vfwmark,
849 NIPQUAD(dest->vaddr), 922 IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
850 ntohs(dest->vport)); 923 ntohs(dest->vport));
924
851 __ip_vs_update_dest(svc, dest, udest); 925 __ip_vs_update_dest(svc, dest, udest);
852 926
853 /* 927 /*
@@ -868,7 +942,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
868 svc->num_dests++; 942 svc->num_dests++;
869 943
870 /* call the update_service function of its scheduler */ 944 /* call the update_service function of its scheduler */
871 svc->scheduler->update_service(svc); 945 if (svc->scheduler->update_service)
946 svc->scheduler->update_service(svc);
872 947
873 write_unlock_bh(&__ip_vs_svc_lock); 948 write_unlock_bh(&__ip_vs_svc_lock);
874 return 0; 949 return 0;
@@ -898,7 +973,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
898 svc->num_dests++; 973 svc->num_dests++;
899 974
900 /* call the update_service function of its scheduler */ 975 /* call the update_service function of its scheduler */
901 svc->scheduler->update_service(svc); 976 if (svc->scheduler->update_service)
977 svc->scheduler->update_service(svc);
902 978
903 write_unlock_bh(&__ip_vs_svc_lock); 979 write_unlock_bh(&__ip_vs_svc_lock);
904 980
@@ -912,10 +988,10 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
912 * Edit a destination in the given service 988 * Edit a destination in the given service
913 */ 989 */
914static int 990static int
915ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 991ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
916{ 992{
917 struct ip_vs_dest *dest; 993 struct ip_vs_dest *dest;
918 __be32 daddr = udest->addr; 994 union nf_inet_addr daddr;
919 __be16 dport = udest->port; 995 __be16 dport = udest->port;
920 996
921 EnterFunction(2); 997 EnterFunction(2);
@@ -931,10 +1007,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
931 return -ERANGE; 1007 return -ERANGE;
932 } 1008 }
933 1009
1010 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
1011
934 /* 1012 /*
935 * Lookup the destination list 1013 * Lookup the destination list
936 */ 1014 */
937 dest = ip_vs_lookup_dest(svc, daddr, dport); 1015 dest = ip_vs_lookup_dest(svc, &daddr, dport);
1016
938 if (dest == NULL) { 1017 if (dest == NULL) {
939 IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); 1018 IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
940 return -ENOENT; 1019 return -ENOENT;
@@ -948,7 +1027,8 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
948 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); 1027 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
949 1028
950 /* call the update_service, because server weight may be changed */ 1029 /* call the update_service, because server weight may be changed */
951 svc->scheduler->update_service(svc); 1030 if (svc->scheduler->update_service)
1031 svc->scheduler->update_service(svc);
952 1032
953 write_unlock_bh(&__ip_vs_svc_lock); 1033 write_unlock_bh(&__ip_vs_svc_lock);
954 1034
@@ -987,10 +1067,11 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
987 atomic_dec(&dest->svc->refcnt); 1067 atomic_dec(&dest->svc->refcnt);
988 kfree(dest); 1068 kfree(dest);
989 } else { 1069 } else {
990 IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, " 1070 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
991 "dest->refcnt=%d\n", 1071 "dest->refcnt=%d\n",
992 NIPQUAD(dest->addr), ntohs(dest->port), 1072 IP_VS_DBG_ADDR(dest->af, &dest->addr),
993 atomic_read(&dest->refcnt)); 1073 ntohs(dest->port),
1074 atomic_read(&dest->refcnt));
994 list_add(&dest->n_list, &ip_vs_dest_trash); 1075 list_add(&dest->n_list, &ip_vs_dest_trash);
995 atomic_inc(&dest->refcnt); 1076 atomic_inc(&dest->refcnt);
996 } 1077 }
@@ -1011,12 +1092,12 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1011 */ 1092 */
1012 list_del(&dest->n_list); 1093 list_del(&dest->n_list);
1013 svc->num_dests--; 1094 svc->num_dests--;
1014 if (svcupd) { 1095
1015 /* 1096 /*
1016 * Call the update_service function of its scheduler 1097 * Call the update_service function of its scheduler
1017 */ 1098 */
1018 svc->scheduler->update_service(svc); 1099 if (svcupd && svc->scheduler->update_service)
1019 } 1100 svc->scheduler->update_service(svc);
1020} 1101}
1021 1102
1022 1103
@@ -1024,15 +1105,15 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1024 * Delete a destination server in the given service 1105 * Delete a destination server in the given service
1025 */ 1106 */
1026static int 1107static int
1027ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) 1108ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
1028{ 1109{
1029 struct ip_vs_dest *dest; 1110 struct ip_vs_dest *dest;
1030 __be32 daddr = udest->addr;
1031 __be16 dport = udest->port; 1111 __be16 dport = udest->port;
1032 1112
1033 EnterFunction(2); 1113 EnterFunction(2);
1034 1114
1035 dest = ip_vs_lookup_dest(svc, daddr, dport); 1115 dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
1116
1036 if (dest == NULL) { 1117 if (dest == NULL) {
1037 IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); 1118 IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
1038 return -ENOENT; 1119 return -ENOENT;
@@ -1067,7 +1148,8 @@ ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
1067 * Add a service into the service hash table 1148 * Add a service into the service hash table
1068 */ 1149 */
1069static int 1150static int
1070ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) 1151ip_vs_add_service(struct ip_vs_service_user_kern *u,
1152 struct ip_vs_service **svc_p)
1071{ 1153{
1072 int ret = 0; 1154 int ret = 0;
1073 struct ip_vs_scheduler *sched = NULL; 1155 struct ip_vs_scheduler *sched = NULL;
@@ -1085,6 +1167,19 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1085 goto out_mod_dec; 1167 goto out_mod_dec;
1086 } 1168 }
1087 1169
1170#ifdef CONFIG_IP_VS_IPV6
1171 if (u->af == AF_INET6) {
1172 if (!sched->supports_ipv6) {
1173 ret = -EAFNOSUPPORT;
1174 goto out_err;
1175 }
1176 if ((u->netmask < 1) || (u->netmask > 128)) {
1177 ret = -EINVAL;
1178 goto out_err;
1179 }
1180 }
1181#endif
1182
1088 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1183 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1089 if (svc == NULL) { 1184 if (svc == NULL) {
1090 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1185 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
@@ -1096,8 +1191,9 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1096 atomic_set(&svc->usecnt, 1); 1191 atomic_set(&svc->usecnt, 1);
1097 atomic_set(&svc->refcnt, 0); 1192 atomic_set(&svc->refcnt, 0);
1098 1193
1194 svc->af = u->af;
1099 svc->protocol = u->protocol; 1195 svc->protocol = u->protocol;
1100 svc->addr = u->addr; 1196 ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
1101 svc->port = u->port; 1197 svc->port = u->port;
1102 svc->fwmark = u->fwmark; 1198 svc->fwmark = u->fwmark;
1103 svc->flags = u->flags; 1199 svc->flags = u->flags;
@@ -1121,7 +1217,10 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1121 atomic_inc(&ip_vs_nullsvc_counter); 1217 atomic_inc(&ip_vs_nullsvc_counter);
1122 1218
1123 ip_vs_new_estimator(&svc->stats); 1219 ip_vs_new_estimator(&svc->stats);
1124 ip_vs_num_services++; 1220
1221 /* Count only IPv4 services for old get/setsockopt interface */
1222 if (svc->af == AF_INET)
1223 ip_vs_num_services++;
1125 1224
1126 /* Hash the service into the service table */ 1225 /* Hash the service into the service table */
1127 write_lock_bh(&__ip_vs_svc_lock); 1226 write_lock_bh(&__ip_vs_svc_lock);
@@ -1156,7 +1255,7 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1156 * Edit a service and bind it with a new scheduler 1255 * Edit a service and bind it with a new scheduler
1157 */ 1256 */
1158static int 1257static int
1159ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) 1258ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1160{ 1259{
1161 struct ip_vs_scheduler *sched, *old_sched; 1260 struct ip_vs_scheduler *sched, *old_sched;
1162 int ret = 0; 1261 int ret = 0;
@@ -1172,6 +1271,19 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1172 } 1271 }
1173 old_sched = sched; 1272 old_sched = sched;
1174 1273
1274#ifdef CONFIG_IP_VS_IPV6
1275 if (u->af == AF_INET6) {
1276 if (!sched->supports_ipv6) {
1277 ret = -EAFNOSUPPORT;
1278 goto out;
1279 }
1280 if ((u->netmask < 1) || (u->netmask > 128)) {
1281 ret = -EINVAL;
1282 goto out;
1283 }
1284 }
1285#endif
1286
1175 write_lock_bh(&__ip_vs_svc_lock); 1287 write_lock_bh(&__ip_vs_svc_lock);
1176 1288
1177 /* 1289 /*
@@ -1193,7 +1305,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1193 */ 1305 */
1194 if ((ret = ip_vs_unbind_scheduler(svc))) { 1306 if ((ret = ip_vs_unbind_scheduler(svc))) {
1195 old_sched = sched; 1307 old_sched = sched;
1196 goto out; 1308 goto out_unlock;
1197 } 1309 }
1198 1310
1199 /* 1311 /*
@@ -1212,12 +1324,13 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1212 */ 1324 */
1213 ip_vs_bind_scheduler(svc, old_sched); 1325 ip_vs_bind_scheduler(svc, old_sched);
1214 old_sched = sched; 1326 old_sched = sched;
1215 goto out; 1327 goto out_unlock;
1216 } 1328 }
1217 } 1329 }
1218 1330
1219 out: 1331 out_unlock:
1220 write_unlock_bh(&__ip_vs_svc_lock); 1332 write_unlock_bh(&__ip_vs_svc_lock);
1333 out:
1221 1334
1222 if (old_sched) 1335 if (old_sched)
1223 ip_vs_scheduler_put(old_sched); 1336 ip_vs_scheduler_put(old_sched);
@@ -1236,7 +1349,10 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1236 struct ip_vs_dest *dest, *nxt; 1349 struct ip_vs_dest *dest, *nxt;
1237 struct ip_vs_scheduler *old_sched; 1350 struct ip_vs_scheduler *old_sched;
1238 1351
1239 ip_vs_num_services--; 1352 /* Count only IPv4 services for old get/setsockopt interface */
1353 if (svc->af == AF_INET)
1354 ip_vs_num_services--;
1355
1240 ip_vs_kill_estimator(&svc->stats); 1356 ip_vs_kill_estimator(&svc->stats);
1241 1357
1242 /* Unbind scheduler */ 1358 /* Unbind scheduler */
@@ -1671,6 +1787,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1671} 1787}
1672 1788
1673static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) 1789static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
1790__acquires(__ip_vs_svc_lock)
1674{ 1791{
1675 1792
1676 read_lock_bh(&__ip_vs_svc_lock); 1793 read_lock_bh(&__ip_vs_svc_lock);
@@ -1724,6 +1841,7 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1724} 1841}
1725 1842
1726static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) 1843static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
1844__releases(__ip_vs_svc_lock)
1727{ 1845{
1728 read_unlock_bh(&__ip_vs_svc_lock); 1846 read_unlock_bh(&__ip_vs_svc_lock);
1729} 1847}
@@ -1744,15 +1862,25 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1744 const struct ip_vs_iter *iter = seq->private; 1862 const struct ip_vs_iter *iter = seq->private;
1745 const struct ip_vs_dest *dest; 1863 const struct ip_vs_dest *dest;
1746 1864
1747 if (iter->table == ip_vs_svc_table) 1865 if (iter->table == ip_vs_svc_table) {
1748 seq_printf(seq, "%s %08X:%04X %s ", 1866#ifdef CONFIG_IP_VS_IPV6
1749 ip_vs_proto_name(svc->protocol), 1867 if (svc->af == AF_INET6)
1750 ntohl(svc->addr), 1868 seq_printf(seq, "%s [" NIP6_FMT "]:%04X %s ",
1751 ntohs(svc->port), 1869 ip_vs_proto_name(svc->protocol),
1752 svc->scheduler->name); 1870 NIP6(svc->addr.in6),
1753 else 1871 ntohs(svc->port),
1872 svc->scheduler->name);
1873 else
1874#endif
1875 seq_printf(seq, "%s %08X:%04X %s ",
1876 ip_vs_proto_name(svc->protocol),
1877 ntohl(svc->addr.ip),
1878 ntohs(svc->port),
1879 svc->scheduler->name);
1880 } else {
1754 seq_printf(seq, "FWM %08X %s ", 1881 seq_printf(seq, "FWM %08X %s ",
1755 svc->fwmark, svc->scheduler->name); 1882 svc->fwmark, svc->scheduler->name);
1883 }
1756 1884
1757 if (svc->flags & IP_VS_SVC_F_PERSISTENT) 1885 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
1758 seq_printf(seq, "persistent %d %08X\n", 1886 seq_printf(seq, "persistent %d %08X\n",
@@ -1762,13 +1890,29 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1762 seq_putc(seq, '\n'); 1890 seq_putc(seq, '\n');
1763 1891
1764 list_for_each_entry(dest, &svc->destinations, n_list) { 1892 list_for_each_entry(dest, &svc->destinations, n_list) {
1765 seq_printf(seq, 1893#ifdef CONFIG_IP_VS_IPV6
1766 " -> %08X:%04X %-7s %-6d %-10d %-10d\n", 1894 if (dest->af == AF_INET6)
1767 ntohl(dest->addr), ntohs(dest->port), 1895 seq_printf(seq,
1768 ip_vs_fwd_name(atomic_read(&dest->conn_flags)), 1896 " -> [" NIP6_FMT "]:%04X"
1769 atomic_read(&dest->weight), 1897 " %-7s %-6d %-10d %-10d\n",
1770 atomic_read(&dest->activeconns), 1898 NIP6(dest->addr.in6),
1771 atomic_read(&dest->inactconns)); 1899 ntohs(dest->port),
1900 ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
1901 atomic_read(&dest->weight),
1902 atomic_read(&dest->activeconns),
1903 atomic_read(&dest->inactconns));
1904 else
1905#endif
1906 seq_printf(seq,
1907 " -> %08X:%04X "
1908 "%-7s %-6d %-10d %-10d\n",
1909 ntohl(dest->addr.ip),
1910 ntohs(dest->port),
1911 ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
1912 atomic_read(&dest->weight),
1913 atomic_read(&dest->activeconns),
1914 atomic_read(&dest->inactconns));
1915
1772 } 1916 }
1773 } 1917 }
1774 return 0; 1918 return 0;
@@ -1812,20 +1956,20 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
1812 " Conns Packets Packets Bytes Bytes\n"); 1956 " Conns Packets Packets Bytes Bytes\n");
1813 1957
1814 spin_lock_bh(&ip_vs_stats.lock); 1958 spin_lock_bh(&ip_vs_stats.lock);
1815 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns, 1959 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
1816 ip_vs_stats.inpkts, ip_vs_stats.outpkts, 1960 ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
1817 (unsigned long long) ip_vs_stats.inbytes, 1961 (unsigned long long) ip_vs_stats.ustats.inbytes,
1818 (unsigned long long) ip_vs_stats.outbytes); 1962 (unsigned long long) ip_vs_stats.ustats.outbytes);
1819 1963
1820/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ 1964/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
1821 seq_puts(seq, 1965 seq_puts(seq,
1822 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); 1966 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
1823 seq_printf(seq,"%8X %8X %8X %16X %16X\n", 1967 seq_printf(seq,"%8X %8X %8X %16X %16X\n",
1824 ip_vs_stats.cps, 1968 ip_vs_stats.ustats.cps,
1825 ip_vs_stats.inpps, 1969 ip_vs_stats.ustats.inpps,
1826 ip_vs_stats.outpps, 1970 ip_vs_stats.ustats.outpps,
1827 ip_vs_stats.inbps, 1971 ip_vs_stats.ustats.inbps,
1828 ip_vs_stats.outbps); 1972 ip_vs_stats.ustats.outbps);
1829 spin_unlock_bh(&ip_vs_stats.lock); 1973 spin_unlock_bh(&ip_vs_stats.lock);
1830 1974
1831 return 0; 1975 return 0;
@@ -1900,14 +2044,44 @@ static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
1900 [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, 2044 [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
1901}; 2045};
1902 2046
2047static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
2048 struct ip_vs_service_user *usvc_compat)
2049{
2050 usvc->af = AF_INET;
2051 usvc->protocol = usvc_compat->protocol;
2052 usvc->addr.ip = usvc_compat->addr;
2053 usvc->port = usvc_compat->port;
2054 usvc->fwmark = usvc_compat->fwmark;
2055
2056 /* Deep copy of sched_name is not needed here */
2057 usvc->sched_name = usvc_compat->sched_name;
2058
2059 usvc->flags = usvc_compat->flags;
2060 usvc->timeout = usvc_compat->timeout;
2061 usvc->netmask = usvc_compat->netmask;
2062}
2063
2064static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
2065 struct ip_vs_dest_user *udest_compat)
2066{
2067 udest->addr.ip = udest_compat->addr;
2068 udest->port = udest_compat->port;
2069 udest->conn_flags = udest_compat->conn_flags;
2070 udest->weight = udest_compat->weight;
2071 udest->u_threshold = udest_compat->u_threshold;
2072 udest->l_threshold = udest_compat->l_threshold;
2073}
2074
1903static int 2075static int
1904do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 2076do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1905{ 2077{
1906 int ret; 2078 int ret;
1907 unsigned char arg[MAX_ARG_LEN]; 2079 unsigned char arg[MAX_ARG_LEN];
1908 struct ip_vs_service_user *usvc; 2080 struct ip_vs_service_user *usvc_compat;
2081 struct ip_vs_service_user_kern usvc;
1909 struct ip_vs_service *svc; 2082 struct ip_vs_service *svc;
1910 struct ip_vs_dest_user *udest; 2083 struct ip_vs_dest_user *udest_compat;
2084 struct ip_vs_dest_user_kern udest;
1911 2085
1912 if (!capable(CAP_NET_ADMIN)) 2086 if (!capable(CAP_NET_ADMIN))
1913 return -EPERM; 2087 return -EPERM;
@@ -1947,35 +2121,40 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1947 goto out_unlock; 2121 goto out_unlock;
1948 } 2122 }
1949 2123
1950 usvc = (struct ip_vs_service_user *)arg; 2124 usvc_compat = (struct ip_vs_service_user *)arg;
1951 udest = (struct ip_vs_dest_user *)(usvc + 1); 2125 udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
2126
2127 /* We only use the new structs internally, so copy userspace compat
2128 * structs to extended internal versions */
2129 ip_vs_copy_usvc_compat(&usvc, usvc_compat);
2130 ip_vs_copy_udest_compat(&udest, udest_compat);
1952 2131
1953 if (cmd == IP_VS_SO_SET_ZERO) { 2132 if (cmd == IP_VS_SO_SET_ZERO) {
1954 /* if no service address is set, zero counters in all */ 2133 /* if no service address is set, zero counters in all */
1955 if (!usvc->fwmark && !usvc->addr && !usvc->port) { 2134 if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
1956 ret = ip_vs_zero_all(); 2135 ret = ip_vs_zero_all();
1957 goto out_unlock; 2136 goto out_unlock;
1958 } 2137 }
1959 } 2138 }
1960 2139
1961 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ 2140 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
1962 if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) { 2141 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
1963 IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", 2142 IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n",
1964 usvc->protocol, NIPQUAD(usvc->addr), 2143 usvc.protocol, NIPQUAD(usvc.addr.ip),
1965 ntohs(usvc->port), usvc->sched_name); 2144 ntohs(usvc.port), usvc.sched_name);
1966 ret = -EFAULT; 2145 ret = -EFAULT;
1967 goto out_unlock; 2146 goto out_unlock;
1968 } 2147 }
1969 2148
1970 /* Lookup the exact service by <protocol, addr, port> or fwmark */ 2149 /* Lookup the exact service by <protocol, addr, port> or fwmark */
1971 if (usvc->fwmark == 0) 2150 if (usvc.fwmark == 0)
1972 svc = __ip_vs_service_get(usvc->protocol, 2151 svc = __ip_vs_service_get(usvc.af, usvc.protocol,
1973 usvc->addr, usvc->port); 2152 &usvc.addr, usvc.port);
1974 else 2153 else
1975 svc = __ip_vs_svc_fwm_get(usvc->fwmark); 2154 svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
1976 2155
1977 if (cmd != IP_VS_SO_SET_ADD 2156 if (cmd != IP_VS_SO_SET_ADD
1978 && (svc == NULL || svc->protocol != usvc->protocol)) { 2157 && (svc == NULL || svc->protocol != usvc.protocol)) {
1979 ret = -ESRCH; 2158 ret = -ESRCH;
1980 goto out_unlock; 2159 goto out_unlock;
1981 } 2160 }
@@ -1985,10 +2164,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1985 if (svc != NULL) 2164 if (svc != NULL)
1986 ret = -EEXIST; 2165 ret = -EEXIST;
1987 else 2166 else
1988 ret = ip_vs_add_service(usvc, &svc); 2167 ret = ip_vs_add_service(&usvc, &svc);
1989 break; 2168 break;
1990 case IP_VS_SO_SET_EDIT: 2169 case IP_VS_SO_SET_EDIT:
1991 ret = ip_vs_edit_service(svc, usvc); 2170 ret = ip_vs_edit_service(svc, &usvc);
1992 break; 2171 break;
1993 case IP_VS_SO_SET_DEL: 2172 case IP_VS_SO_SET_DEL:
1994 ret = ip_vs_del_service(svc); 2173 ret = ip_vs_del_service(svc);
@@ -1999,13 +2178,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1999 ret = ip_vs_zero_service(svc); 2178 ret = ip_vs_zero_service(svc);
2000 break; 2179 break;
2001 case IP_VS_SO_SET_ADDDEST: 2180 case IP_VS_SO_SET_ADDDEST:
2002 ret = ip_vs_add_dest(svc, udest); 2181 ret = ip_vs_add_dest(svc, &udest);
2003 break; 2182 break;
2004 case IP_VS_SO_SET_EDITDEST: 2183 case IP_VS_SO_SET_EDITDEST:
2005 ret = ip_vs_edit_dest(svc, udest); 2184 ret = ip_vs_edit_dest(svc, &udest);
2006 break; 2185 break;
2007 case IP_VS_SO_SET_DELDEST: 2186 case IP_VS_SO_SET_DELDEST:
2008 ret = ip_vs_del_dest(svc, udest); 2187 ret = ip_vs_del_dest(svc, &udest);
2009 break; 2188 break;
2010 default: 2189 default:
2011 ret = -EINVAL; 2190 ret = -EINVAL;
@@ -2028,7 +2207,7 @@ static void
2028ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) 2207ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
2029{ 2208{
2030 spin_lock_bh(&src->lock); 2209 spin_lock_bh(&src->lock);
2031 memcpy(dst, src, (char*)&src->lock - (char*)src); 2210 memcpy(dst, &src->ustats, sizeof(*dst));
2032 spin_unlock_bh(&src->lock); 2211 spin_unlock_bh(&src->lock);
2033} 2212}
2034 2213
@@ -2036,7 +2215,7 @@ static void
2036ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) 2215ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2037{ 2216{
2038 dst->protocol = src->protocol; 2217 dst->protocol = src->protocol;
2039 dst->addr = src->addr; 2218 dst->addr = src->addr.ip;
2040 dst->port = src->port; 2219 dst->port = src->port;
2041 dst->fwmark = src->fwmark; 2220 dst->fwmark = src->fwmark;
2042 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); 2221 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name));
@@ -2058,6 +2237,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
2058 2237
2059 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2238 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2060 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 2239 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
2240 /* Only expose IPv4 entries to old interface */
2241 if (svc->af != AF_INET)
2242 continue;
2243
2061 if (count >= get->num_services) 2244 if (count >= get->num_services)
2062 goto out; 2245 goto out;
2063 memset(&entry, 0, sizeof(entry)); 2246 memset(&entry, 0, sizeof(entry));
@@ -2073,6 +2256,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
2073 2256
2074 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2257 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2075 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 2258 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
2259 /* Only expose IPv4 entries to old interface */
2260 if (svc->af != AF_INET)
2261 continue;
2262
2076 if (count >= get->num_services) 2263 if (count >= get->num_services)
2077 goto out; 2264 goto out;
2078 memset(&entry, 0, sizeof(entry)); 2265 memset(&entry, 0, sizeof(entry));
@@ -2094,13 +2281,15 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
2094 struct ip_vs_get_dests __user *uptr) 2281 struct ip_vs_get_dests __user *uptr)
2095{ 2282{
2096 struct ip_vs_service *svc; 2283 struct ip_vs_service *svc;
2284 union nf_inet_addr addr = { .ip = get->addr };
2097 int ret = 0; 2285 int ret = 0;
2098 2286
2099 if (get->fwmark) 2287 if (get->fwmark)
2100 svc = __ip_vs_svc_fwm_get(get->fwmark); 2288 svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark);
2101 else 2289 else
2102 svc = __ip_vs_service_get(get->protocol, 2290 svc = __ip_vs_service_get(AF_INET, get->protocol, &addr,
2103 get->addr, get->port); 2291 get->port);
2292
2104 if (svc) { 2293 if (svc) {
2105 int count = 0; 2294 int count = 0;
2106 struct ip_vs_dest *dest; 2295 struct ip_vs_dest *dest;
@@ -2110,7 +2299,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
2110 if (count >= get->num_dests) 2299 if (count >= get->num_dests)
2111 break; 2300 break;
2112 2301
2113 entry.addr = dest->addr; 2302 entry.addr = dest->addr.ip;
2114 entry.port = dest->port; 2303 entry.port = dest->port;
2115 entry.conn_flags = atomic_read(&dest->conn_flags); 2304 entry.conn_flags = atomic_read(&dest->conn_flags);
2116 entry.weight = atomic_read(&dest->weight); 2305 entry.weight = atomic_read(&dest->weight);
@@ -2235,13 +2424,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2235 { 2424 {
2236 struct ip_vs_service_entry *entry; 2425 struct ip_vs_service_entry *entry;
2237 struct ip_vs_service *svc; 2426 struct ip_vs_service *svc;
2427 union nf_inet_addr addr;
2238 2428
2239 entry = (struct ip_vs_service_entry *)arg; 2429 entry = (struct ip_vs_service_entry *)arg;
2430 addr.ip = entry->addr;
2240 if (entry->fwmark) 2431 if (entry->fwmark)
2241 svc = __ip_vs_svc_fwm_get(entry->fwmark); 2432 svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
2242 else 2433 else
2243 svc = __ip_vs_service_get(entry->protocol, 2434 svc = __ip_vs_service_get(AF_INET, entry->protocol,
2244 entry->addr, entry->port); 2435 &addr, entry->port);
2245 if (svc) { 2436 if (svc) {
2246 ip_vs_copy_service(entry, svc); 2437 ip_vs_copy_service(entry, svc);
2247 if (copy_to_user(user, entry, sizeof(*entry)) != 0) 2438 if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2320,6 +2511,875 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2320 .owner = THIS_MODULE, 2511 .owner = THIS_MODULE,
2321}; 2512};
2322 2513
2514/*
2515 * Generic Netlink interface
2516 */
2517
2518/* IPVS genetlink family */
2519static struct genl_family ip_vs_genl_family = {
2520 .id = GENL_ID_GENERATE,
2521 .hdrsize = 0,
2522 .name = IPVS_GENL_NAME,
2523 .version = IPVS_GENL_VERSION,
2524 .maxattr = IPVS_CMD_MAX,
2525};
2526
2527/* Policy used for first-level command attributes */
2528static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2529 [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
2530 [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
2531 [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
2532 [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
2533 [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
2534 [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
2535};
2536
2537/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
2538static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2539 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2540 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2541 .len = IP_VS_IFNAME_MAXLEN },
2542 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2543};
2544
2545/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
2546static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2547 [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
2548 [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
2549 [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
2550 .len = sizeof(union nf_inet_addr) },
2551 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2552 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2553 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2554 .len = IP_VS_SCHEDNAME_MAXLEN },
2555 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
2556 .len = sizeof(struct ip_vs_flags) },
2557 [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
2558 [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
2559 [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
2560};
2561
2562/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
2563static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
2564 [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
2565 .len = sizeof(union nf_inet_addr) },
2566 [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
2567 [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
2568 [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
2569 [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
2570 [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
2571 [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
2572 [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
2573 [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
2574 [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
2575};
2576
2577static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
2578 struct ip_vs_stats *stats)
2579{
2580 struct nlattr *nl_stats = nla_nest_start(skb, container_type);
2581 if (!nl_stats)
2582 return -EMSGSIZE;
2583
2584 spin_lock_bh(&stats->lock);
2585
2586 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
2587 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
2588 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
2589 NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
2590 NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
2591 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
2592 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
2593 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
2594 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
2595 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
2596
2597 spin_unlock_bh(&stats->lock);
2598
2599 nla_nest_end(skb, nl_stats);
2600
2601 return 0;
2602
2603nla_put_failure:
2604 spin_unlock_bh(&stats->lock);
2605 nla_nest_cancel(skb, nl_stats);
2606 return -EMSGSIZE;
2607}
2608
2609static int ip_vs_genl_fill_service(struct sk_buff *skb,
2610 struct ip_vs_service *svc)
2611{
2612 struct nlattr *nl_service;
2613 struct ip_vs_flags flags = { .flags = svc->flags,
2614 .mask = ~0 };
2615
2616 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2617 if (!nl_service)
2618 return -EMSGSIZE;
2619
2620 NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
2621
2622 if (svc->fwmark) {
2623 NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
2624 } else {
2625 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
2626 NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
2627 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
2628 }
2629
2630 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
2631 NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
2632 NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
2633 NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
2634
2635 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
2636 goto nla_put_failure;
2637
2638 nla_nest_end(skb, nl_service);
2639
2640 return 0;
2641
2642nla_put_failure:
2643 nla_nest_cancel(skb, nl_service);
2644 return -EMSGSIZE;
2645}
2646
2647static int ip_vs_genl_dump_service(struct sk_buff *skb,
2648 struct ip_vs_service *svc,
2649 struct netlink_callback *cb)
2650{
2651 void *hdr;
2652
2653 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2654 &ip_vs_genl_family, NLM_F_MULTI,
2655 IPVS_CMD_NEW_SERVICE);
2656 if (!hdr)
2657 return -EMSGSIZE;
2658
2659 if (ip_vs_genl_fill_service(skb, svc) < 0)
2660 goto nla_put_failure;
2661
2662 return genlmsg_end(skb, hdr);
2663
2664nla_put_failure:
2665 genlmsg_cancel(skb, hdr);
2666 return -EMSGSIZE;
2667}
2668
2669static int ip_vs_genl_dump_services(struct sk_buff *skb,
2670 struct netlink_callback *cb)
2671{
2672 int idx = 0, i;
2673 int start = cb->args[0];
2674 struct ip_vs_service *svc;
2675
2676 mutex_lock(&__ip_vs_mutex);
2677 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2678 list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
2679 if (++idx <= start)
2680 continue;
2681 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2682 idx--;
2683 goto nla_put_failure;
2684 }
2685 }
2686 }
2687
2688 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2689 list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
2690 if (++idx <= start)
2691 continue;
2692 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2693 idx--;
2694 goto nla_put_failure;
2695 }
2696 }
2697 }
2698
2699nla_put_failure:
2700 mutex_unlock(&__ip_vs_mutex);
2701 cb->args[0] = idx;
2702
2703 return skb->len;
2704}
2705
2706static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
2707 struct nlattr *nla, int full_entry)
2708{
2709 struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
2710 struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
2711
2712 /* Parse mandatory identifying service fields first */
2713 if (nla == NULL ||
2714 nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
2715 return -EINVAL;
2716
2717 nla_af = attrs[IPVS_SVC_ATTR_AF];
2718 nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
2719 nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
2720 nla_port = attrs[IPVS_SVC_ATTR_PORT];
2721 nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
2722
2723 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2724 return -EINVAL;
2725
2726 usvc->af = nla_get_u16(nla_af);
2727#ifdef CONFIG_IP_VS_IPV6
2728 if (usvc->af != AF_INET && usvc->af != AF_INET6)
2729#else
2730 if (usvc->af != AF_INET)
2731#endif
2732 return -EAFNOSUPPORT;
2733
2734 if (nla_fwmark) {
2735 usvc->protocol = IPPROTO_TCP;
2736 usvc->fwmark = nla_get_u32(nla_fwmark);
2737 } else {
2738 usvc->protocol = nla_get_u16(nla_protocol);
2739 nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
2740 usvc->port = nla_get_u16(nla_port);
2741 usvc->fwmark = 0;
2742 }
2743
2744 /* If a full entry was requested, check for the additional fields */
2745 if (full_entry) {
2746 struct nlattr *nla_sched, *nla_flags, *nla_timeout,
2747 *nla_netmask;
2748 struct ip_vs_flags flags;
2749 struct ip_vs_service *svc;
2750
2751 nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
2752 nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
2753 nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
2754 nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
2755
2756 if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
2757 return -EINVAL;
2758
2759 nla_memcpy(&flags, nla_flags, sizeof(flags));
2760
2761 /* prefill flags from service if it already exists */
2762 if (usvc->fwmark)
2763 svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark);
2764 else
2765 svc = __ip_vs_service_get(usvc->af, usvc->protocol,
2766 &usvc->addr, usvc->port);
2767 if (svc) {
2768 usvc->flags = svc->flags;
2769 ip_vs_service_put(svc);
2770 } else
2771 usvc->flags = 0;
2772
2773 /* set new flags from userland */
2774 usvc->flags = (usvc->flags & ~flags.mask) |
2775 (flags.flags & flags.mask);
2776 usvc->sched_name = nla_data(nla_sched);
2777 usvc->timeout = nla_get_u32(nla_timeout);
2778 usvc->netmask = nla_get_u32(nla_netmask);
2779 }
2780
2781 return 0;
2782}
2783
2784static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
2785{
2786 struct ip_vs_service_user_kern usvc;
2787 int ret;
2788
2789 ret = ip_vs_genl_parse_service(&usvc, nla, 0);
2790 if (ret)
2791 return ERR_PTR(ret);
2792
2793 if (usvc.fwmark)
2794 return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
2795 else
2796 return __ip_vs_service_get(usvc.af, usvc.protocol,
2797 &usvc.addr, usvc.port);
2798}
2799
2800static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
2801{
2802 struct nlattr *nl_dest;
2803
2804 nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
2805 if (!nl_dest)
2806 return -EMSGSIZE;
2807
2808 NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
2809 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
2810
2811 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
2812 atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
2813 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
2814 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
2815 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
2816 NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
2817 atomic_read(&dest->activeconns));
2818 NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
2819 atomic_read(&dest->inactconns));
2820 NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
2821 atomic_read(&dest->persistconns));
2822
2823 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
2824 goto nla_put_failure;
2825
2826 nla_nest_end(skb, nl_dest);
2827
2828 return 0;
2829
2830nla_put_failure:
2831 nla_nest_cancel(skb, nl_dest);
2832 return -EMSGSIZE;
2833}
2834
2835static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
2836 struct netlink_callback *cb)
2837{
2838 void *hdr;
2839
2840 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2841 &ip_vs_genl_family, NLM_F_MULTI,
2842 IPVS_CMD_NEW_DEST);
2843 if (!hdr)
2844 return -EMSGSIZE;
2845
2846 if (ip_vs_genl_fill_dest(skb, dest) < 0)
2847 goto nla_put_failure;
2848
2849 return genlmsg_end(skb, hdr);
2850
2851nla_put_failure:
2852 genlmsg_cancel(skb, hdr);
2853 return -EMSGSIZE;
2854}
2855
2856static int ip_vs_genl_dump_dests(struct sk_buff *skb,
2857 struct netlink_callback *cb)
2858{
2859 int idx = 0;
2860 int start = cb->args[0];
2861 struct ip_vs_service *svc;
2862 struct ip_vs_dest *dest;
2863 struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
2864
2865 mutex_lock(&__ip_vs_mutex);
2866
2867 /* Try to find the service for which to dump destinations */
2868 if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
2869 IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
2870 goto out_err;
2871
2872 svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
2873 if (IS_ERR(svc) || svc == NULL)
2874 goto out_err;
2875
2876 /* Dump the destinations */
2877 list_for_each_entry(dest, &svc->destinations, n_list) {
2878 if (++idx <= start)
2879 continue;
2880 if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
2881 idx--;
2882 goto nla_put_failure;
2883 }
2884 }
2885
2886nla_put_failure:
2887 cb->args[0] = idx;
2888 ip_vs_service_put(svc);
2889
2890out_err:
2891 mutex_unlock(&__ip_vs_mutex);
2892
2893 return skb->len;
2894}
2895
2896static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
2897 struct nlattr *nla, int full_entry)
2898{
2899 struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
2900 struct nlattr *nla_addr, *nla_port;
2901
2902 /* Parse mandatory identifying destination fields first */
2903 if (nla == NULL ||
2904 nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
2905 return -EINVAL;
2906
2907 nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
2908 nla_port = attrs[IPVS_DEST_ATTR_PORT];
2909
2910 if (!(nla_addr && nla_port))
2911 return -EINVAL;
2912
2913 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2914 udest->port = nla_get_u16(nla_port);
2915
2916 /* If a full entry was requested, check for the additional fields */
2917 if (full_entry) {
2918 struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
2919 *nla_l_thresh;
2920
2921 nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
2922 nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
2923 nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
2924 nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
2925
2926 if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
2927 return -EINVAL;
2928
2929 udest->conn_flags = nla_get_u32(nla_fwd)
2930 & IP_VS_CONN_F_FWD_MASK;
2931 udest->weight = nla_get_u32(nla_weight);
2932 udest->u_threshold = nla_get_u32(nla_u_thresh);
2933 udest->l_threshold = nla_get_u32(nla_l_thresh);
2934 }
2935
2936 return 0;
2937}
2938
2939static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
2940 const char *mcast_ifn, __be32 syncid)
2941{
2942 struct nlattr *nl_daemon;
2943
2944 nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
2945 if (!nl_daemon)
2946 return -EMSGSIZE;
2947
2948 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
2949 NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
2950 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
2951
2952 nla_nest_end(skb, nl_daemon);
2953
2954 return 0;
2955
2956nla_put_failure:
2957 nla_nest_cancel(skb, nl_daemon);
2958 return -EMSGSIZE;
2959}
2960
2961static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
2962 const char *mcast_ifn, __be32 syncid,
2963 struct netlink_callback *cb)
2964{
2965 void *hdr;
2966 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2967 &ip_vs_genl_family, NLM_F_MULTI,
2968 IPVS_CMD_NEW_DAEMON);
2969 if (!hdr)
2970 return -EMSGSIZE;
2971
2972 if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
2973 goto nla_put_failure;
2974
2975 return genlmsg_end(skb, hdr);
2976
2977nla_put_failure:
2978 genlmsg_cancel(skb, hdr);
2979 return -EMSGSIZE;
2980}
2981
2982static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
2983 struct netlink_callback *cb)
2984{
2985 mutex_lock(&__ip_vs_mutex);
2986 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
2987 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
2988 ip_vs_master_mcast_ifn,
2989 ip_vs_master_syncid, cb) < 0)
2990 goto nla_put_failure;
2991
2992 cb->args[0] = 1;
2993 }
2994
2995 if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
2996 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
2997 ip_vs_backup_mcast_ifn,
2998 ip_vs_backup_syncid, cb) < 0)
2999 goto nla_put_failure;
3000
3001 cb->args[1] = 1;
3002 }
3003
3004nla_put_failure:
3005 mutex_unlock(&__ip_vs_mutex);
3006
3007 return skb->len;
3008}
3009
3010static int ip_vs_genl_new_daemon(struct nlattr **attrs)
3011{
3012 if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
3013 attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
3014 attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
3015 return -EINVAL;
3016
3017 return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
3018 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
3019 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
3020}
3021
3022static int ip_vs_genl_del_daemon(struct nlattr **attrs)
3023{
3024 if (!attrs[IPVS_DAEMON_ATTR_STATE])
3025 return -EINVAL;
3026
3027 return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
3028}
3029
3030static int ip_vs_genl_set_config(struct nlattr **attrs)
3031{
3032 struct ip_vs_timeout_user t;
3033
3034 __ip_vs_get_timeouts(&t);
3035
3036 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
3037 t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
3038
3039 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
3040 t.tcp_fin_timeout =
3041 nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
3042
3043 if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
3044 t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
3045
3046 return ip_vs_set_timeout(&t);
3047}
3048
3049static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3050{
3051 struct ip_vs_service *svc = NULL;
3052 struct ip_vs_service_user_kern usvc;
3053 struct ip_vs_dest_user_kern udest;
3054 int ret = 0, cmd;
3055 int need_full_svc = 0, need_full_dest = 0;
3056
3057 cmd = info->genlhdr->cmd;
3058
3059 mutex_lock(&__ip_vs_mutex);
3060
3061 if (cmd == IPVS_CMD_FLUSH) {
3062 ret = ip_vs_flush();
3063 goto out;
3064 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3065 ret = ip_vs_genl_set_config(info->attrs);
3066 goto out;
3067 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
3068 cmd == IPVS_CMD_DEL_DAEMON) {
3069
3070 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
3071
3072 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
3073 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
3074 info->attrs[IPVS_CMD_ATTR_DAEMON],
3075 ip_vs_daemon_policy)) {
3076 ret = -EINVAL;
3077 goto out;
3078 }
3079
3080 if (cmd == IPVS_CMD_NEW_DAEMON)
3081 ret = ip_vs_genl_new_daemon(daemon_attrs);
3082 else
3083 ret = ip_vs_genl_del_daemon(daemon_attrs);
3084 goto out;
3085 } else if (cmd == IPVS_CMD_ZERO &&
3086 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
3087 ret = ip_vs_zero_all();
3088 goto out;
3089 }
3090
3091 /* All following commands require a service argument, so check if we
3092 * received a valid one. We need a full service specification when
3093 * adding / editing a service. Only identifying members otherwise. */
3094 if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
3095 need_full_svc = 1;
3096
3097 ret = ip_vs_genl_parse_service(&usvc,
3098 info->attrs[IPVS_CMD_ATTR_SERVICE],
3099 need_full_svc);
3100 if (ret)
3101 goto out;
3102
3103 /* Lookup the exact service by <protocol, addr, port> or fwmark */
3104 if (usvc.fwmark == 0)
3105 svc = __ip_vs_service_get(usvc.af, usvc.protocol,
3106 &usvc.addr, usvc.port);
3107 else
3108 svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
3109
3110 /* Unless we're adding a new service, the service must already exist */
3111 if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
3112 ret = -ESRCH;
3113 goto out;
3114 }
3115
3116 /* Destination commands require a valid destination argument. For
3117 * adding / editing a destination, we need a full destination
3118 * specification. */
3119 if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
3120 cmd == IPVS_CMD_DEL_DEST) {
3121 if (cmd != IPVS_CMD_DEL_DEST)
3122 need_full_dest = 1;
3123
3124 ret = ip_vs_genl_parse_dest(&udest,
3125 info->attrs[IPVS_CMD_ATTR_DEST],
3126 need_full_dest);
3127 if (ret)
3128 goto out;
3129 }
3130
3131 switch (cmd) {
3132 case IPVS_CMD_NEW_SERVICE:
3133 if (svc == NULL)
3134 ret = ip_vs_add_service(&usvc, &svc);
3135 else
3136 ret = -EEXIST;
3137 break;
3138 case IPVS_CMD_SET_SERVICE:
3139 ret = ip_vs_edit_service(svc, &usvc);
3140 break;
3141 case IPVS_CMD_DEL_SERVICE:
3142 ret = ip_vs_del_service(svc);
3143 break;
3144 case IPVS_CMD_NEW_DEST:
3145 ret = ip_vs_add_dest(svc, &udest);
3146 break;
3147 case IPVS_CMD_SET_DEST:
3148 ret = ip_vs_edit_dest(svc, &udest);
3149 break;
3150 case IPVS_CMD_DEL_DEST:
3151 ret = ip_vs_del_dest(svc, &udest);
3152 break;
3153 case IPVS_CMD_ZERO:
3154 ret = ip_vs_zero_service(svc);
3155 break;
3156 default:
3157 ret = -EINVAL;
3158 }
3159
3160out:
3161 if (svc)
3162 ip_vs_service_put(svc);
3163 mutex_unlock(&__ip_vs_mutex);
3164
3165 return ret;
3166}
3167
3168static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3169{
3170 struct sk_buff *msg;
3171 void *reply;
3172 int ret, cmd, reply_cmd;
3173
3174 cmd = info->genlhdr->cmd;
3175
3176 if (cmd == IPVS_CMD_GET_SERVICE)
3177 reply_cmd = IPVS_CMD_NEW_SERVICE;
3178 else if (cmd == IPVS_CMD_GET_INFO)
3179 reply_cmd = IPVS_CMD_SET_INFO;
3180 else if (cmd == IPVS_CMD_GET_CONFIG)
3181 reply_cmd = IPVS_CMD_SET_CONFIG;
3182 else {
3183 IP_VS_ERR("unknown Generic Netlink command\n");
3184 return -EINVAL;
3185 }
3186
3187 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3188 if (!msg)
3189 return -ENOMEM;
3190
3191 mutex_lock(&__ip_vs_mutex);
3192
3193 reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
3194 if (reply == NULL)
3195 goto nla_put_failure;
3196
3197 switch (cmd) {
3198 case IPVS_CMD_GET_SERVICE:
3199 {
3200 struct ip_vs_service *svc;
3201
3202 svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
3203 if (IS_ERR(svc)) {
3204 ret = PTR_ERR(svc);
3205 goto out_err;
3206 } else if (svc) {
3207 ret = ip_vs_genl_fill_service(msg, svc);
3208 ip_vs_service_put(svc);
3209 if (ret)
3210 goto nla_put_failure;
3211 } else {
3212 ret = -ESRCH;
3213 goto out_err;
3214 }
3215
3216 break;
3217 }
3218
3219 case IPVS_CMD_GET_CONFIG:
3220 {
3221 struct ip_vs_timeout_user t;
3222
3223 __ip_vs_get_timeouts(&t);
3224#ifdef CONFIG_IP_VS_PROTO_TCP
3225 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
3226 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
3227 t.tcp_fin_timeout);
3228#endif
3229#ifdef CONFIG_IP_VS_PROTO_UDP
3230 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
3231#endif
3232
3233 break;
3234 }
3235
3236 case IPVS_CMD_GET_INFO:
3237 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3238 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3239 IP_VS_CONN_TAB_SIZE);
3240 break;
3241 }
3242
3243 genlmsg_end(msg, reply);
3244 ret = genlmsg_unicast(msg, info->snd_pid);
3245 goto out;
3246
3247nla_put_failure:
3248 IP_VS_ERR("not enough space in Netlink message\n");
3249 ret = -EMSGSIZE;
3250
3251out_err:
3252 nlmsg_free(msg);
3253out:
3254 mutex_unlock(&__ip_vs_mutex);
3255
3256 return ret;
3257}
3258
3259
3260static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3261 {
3262 .cmd = IPVS_CMD_NEW_SERVICE,
3263 .flags = GENL_ADMIN_PERM,
3264 .policy = ip_vs_cmd_policy,
3265 .doit = ip_vs_genl_set_cmd,
3266 },
3267 {
3268 .cmd = IPVS_CMD_SET_SERVICE,
3269 .flags = GENL_ADMIN_PERM,
3270 .policy = ip_vs_cmd_policy,
3271 .doit = ip_vs_genl_set_cmd,
3272 },
3273 {
3274 .cmd = IPVS_CMD_DEL_SERVICE,
3275 .flags = GENL_ADMIN_PERM,
3276 .policy = ip_vs_cmd_policy,
3277 .doit = ip_vs_genl_set_cmd,
3278 },
3279 {
3280 .cmd = IPVS_CMD_GET_SERVICE,
3281 .flags = GENL_ADMIN_PERM,
3282 .doit = ip_vs_genl_get_cmd,
3283 .dumpit = ip_vs_genl_dump_services,
3284 .policy = ip_vs_cmd_policy,
3285 },
3286 {
3287 .cmd = IPVS_CMD_NEW_DEST,
3288 .flags = GENL_ADMIN_PERM,
3289 .policy = ip_vs_cmd_policy,
3290 .doit = ip_vs_genl_set_cmd,
3291 },
3292 {
3293 .cmd = IPVS_CMD_SET_DEST,
3294 .flags = GENL_ADMIN_PERM,
3295 .policy = ip_vs_cmd_policy,
3296 .doit = ip_vs_genl_set_cmd,
3297 },
3298 {
3299 .cmd = IPVS_CMD_DEL_DEST,
3300 .flags = GENL_ADMIN_PERM,
3301 .policy = ip_vs_cmd_policy,
3302 .doit = ip_vs_genl_set_cmd,
3303 },
3304 {
3305 .cmd = IPVS_CMD_GET_DEST,
3306 .flags = GENL_ADMIN_PERM,
3307 .policy = ip_vs_cmd_policy,
3308 .dumpit = ip_vs_genl_dump_dests,
3309 },
3310 {
3311 .cmd = IPVS_CMD_NEW_DAEMON,
3312 .flags = GENL_ADMIN_PERM,
3313 .policy = ip_vs_cmd_policy,
3314 .doit = ip_vs_genl_set_cmd,
3315 },
3316 {
3317 .cmd = IPVS_CMD_DEL_DAEMON,
3318 .flags = GENL_ADMIN_PERM,
3319 .policy = ip_vs_cmd_policy,
3320 .doit = ip_vs_genl_set_cmd,
3321 },
3322 {
3323 .cmd = IPVS_CMD_GET_DAEMON,
3324 .flags = GENL_ADMIN_PERM,
3325 .dumpit = ip_vs_genl_dump_daemons,
3326 },
3327 {
3328 .cmd = IPVS_CMD_SET_CONFIG,
3329 .flags = GENL_ADMIN_PERM,
3330 .policy = ip_vs_cmd_policy,
3331 .doit = ip_vs_genl_set_cmd,
3332 },
3333 {
3334 .cmd = IPVS_CMD_GET_CONFIG,
3335 .flags = GENL_ADMIN_PERM,
3336 .doit = ip_vs_genl_get_cmd,
3337 },
3338 {
3339 .cmd = IPVS_CMD_GET_INFO,
3340 .flags = GENL_ADMIN_PERM,
3341 .doit = ip_vs_genl_get_cmd,
3342 },
3343 {
3344 .cmd = IPVS_CMD_ZERO,
3345 .flags = GENL_ADMIN_PERM,
3346 .policy = ip_vs_cmd_policy,
3347 .doit = ip_vs_genl_set_cmd,
3348 },
3349 {
3350 .cmd = IPVS_CMD_FLUSH,
3351 .flags = GENL_ADMIN_PERM,
3352 .doit = ip_vs_genl_set_cmd,
3353 },
3354};
3355
3356static int __init ip_vs_genl_register(void)
3357{
3358 int ret, i;
3359
3360 ret = genl_register_family(&ip_vs_genl_family);
3361 if (ret)
3362 return ret;
3363
3364 for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
3365 ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
3366 if (ret)
3367 goto err_out;
3368 }
3369 return 0;
3370
3371err_out:
3372 genl_unregister_family(&ip_vs_genl_family);
3373 return ret;
3374}
3375
3376static void ip_vs_genl_unregister(void)
3377{
3378 genl_unregister_family(&ip_vs_genl_family);
3379}
3380
3381/* End of Generic Netlink interface definitions */
3382
2323 3383
2324int __init ip_vs_control_init(void) 3384int __init ip_vs_control_init(void)
2325{ 3385{
@@ -2334,6 +3394,13 @@ int __init ip_vs_control_init(void)
2334 return ret; 3394 return ret;
2335 } 3395 }
2336 3396
3397 ret = ip_vs_genl_register();
3398 if (ret) {
3399 IP_VS_ERR("cannot register Generic Netlink interface.\n");
3400 nf_unregister_sockopt(&ip_vs_sockopts);
3401 return ret;
3402 }
3403
2337 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); 3404 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
2338 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); 3405 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
2339 3406
@@ -2368,6 +3435,7 @@ void ip_vs_control_cleanup(void)
2368 unregister_sysctl_table(sysctl_header); 3435 unregister_sysctl_table(sysctl_header);
2369 proc_net_remove(&init_net, "ip_vs_stats"); 3436 proc_net_remove(&init_net, "ip_vs_stats");
2370 proc_net_remove(&init_net, "ip_vs"); 3437 proc_net_remove(&init_net, "ip_vs");
3438 ip_vs_genl_unregister();
2371 nf_unregister_sockopt(&ip_vs_sockopts); 3439 nf_unregister_sockopt(&ip_vs_sockopts);
2372 LeaveFunction(2); 3440 LeaveFunction(2);
2373} 3441}
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index fa66824d264f..a16943fd72f1 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -218,7 +218,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
218 IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " 218 IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u "
219 "--> server %u.%u.%u.%u:%d\n", 219 "--> server %u.%u.%u.%u:%d\n",
220 NIPQUAD(iph->daddr), 220 NIPQUAD(iph->daddr),
221 NIPQUAD(dest->addr), 221 NIPQUAD(dest->addr.ip),
222 ntohs(dest->port)); 222 ntohs(dest->port));
223 223
224 return dest; 224 return dest;
@@ -234,6 +234,9 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
234 .refcnt = ATOMIC_INIT(0), 234 .refcnt = ATOMIC_INIT(0),
235 .module = THIS_MODULE, 235 .module = THIS_MODULE,
236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), 236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
237#ifdef CONFIG_IP_VS_IPV6
238 .supports_ipv6 = 0,
239#endif
237 .init_service = ip_vs_dh_init_svc, 240 .init_service = ip_vs_dh_init_svc,
238 .done_service = ip_vs_dh_done_svc, 241 .done_service = ip_vs_dh_done_svc,
239 .update_service = ip_vs_dh_update_svc, 242 .update_service = ip_vs_dh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 5a20f93bd7f9..2eb2860dabb5 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -65,37 +65,37 @@ static void estimation_timer(unsigned long arg)
65 s = container_of(e, struct ip_vs_stats, est); 65 s = container_of(e, struct ip_vs_stats, est);
66 66
67 spin_lock(&s->lock); 67 spin_lock(&s->lock);
68 n_conns = s->conns; 68 n_conns = s->ustats.conns;
69 n_inpkts = s->inpkts; 69 n_inpkts = s->ustats.inpkts;
70 n_outpkts = s->outpkts; 70 n_outpkts = s->ustats.outpkts;
71 n_inbytes = s->inbytes; 71 n_inbytes = s->ustats.inbytes;
72 n_outbytes = s->outbytes; 72 n_outbytes = s->ustats.outbytes;
73 73
74 /* scaled by 2^10, but divided 2 seconds */ 74 /* scaled by 2^10, but divided 2 seconds */
75 rate = (n_conns - e->last_conns)<<9; 75 rate = (n_conns - e->last_conns)<<9;
76 e->last_conns = n_conns; 76 e->last_conns = n_conns;
77 e->cps += ((long)rate - (long)e->cps)>>2; 77 e->cps += ((long)rate - (long)e->cps)>>2;
78 s->cps = (e->cps+0x1FF)>>10; 78 s->ustats.cps = (e->cps+0x1FF)>>10;
79 79
80 rate = (n_inpkts - e->last_inpkts)<<9; 80 rate = (n_inpkts - e->last_inpkts)<<9;
81 e->last_inpkts = n_inpkts; 81 e->last_inpkts = n_inpkts;
82 e->inpps += ((long)rate - (long)e->inpps)>>2; 82 e->inpps += ((long)rate - (long)e->inpps)>>2;
83 s->inpps = (e->inpps+0x1FF)>>10; 83 s->ustats.inpps = (e->inpps+0x1FF)>>10;
84 84
85 rate = (n_outpkts - e->last_outpkts)<<9; 85 rate = (n_outpkts - e->last_outpkts)<<9;
86 e->last_outpkts = n_outpkts; 86 e->last_outpkts = n_outpkts;
87 e->outpps += ((long)rate - (long)e->outpps)>>2; 87 e->outpps += ((long)rate - (long)e->outpps)>>2;
88 s->outpps = (e->outpps+0x1FF)>>10; 88 s->ustats.outpps = (e->outpps+0x1FF)>>10;
89 89
90 rate = (n_inbytes - e->last_inbytes)<<4; 90 rate = (n_inbytes - e->last_inbytes)<<4;
91 e->last_inbytes = n_inbytes; 91 e->last_inbytes = n_inbytes;
92 e->inbps += ((long)rate - (long)e->inbps)>>2; 92 e->inbps += ((long)rate - (long)e->inbps)>>2;
93 s->inbps = (e->inbps+0xF)>>5; 93 s->ustats.inbps = (e->inbps+0xF)>>5;
94 94
95 rate = (n_outbytes - e->last_outbytes)<<4; 95 rate = (n_outbytes - e->last_outbytes)<<4;
96 e->last_outbytes = n_outbytes; 96 e->last_outbytes = n_outbytes;
97 e->outbps += ((long)rate - (long)e->outbps)>>2; 97 e->outbps += ((long)rate - (long)e->outbps)>>2;
98 s->outbps = (e->outbps+0xF)>>5; 98 s->ustats.outbps = (e->outbps+0xF)>>5;
99 spin_unlock(&s->lock); 99 spin_unlock(&s->lock);
100 } 100 }
101 spin_unlock(&est_lock); 101 spin_unlock(&est_lock);
@@ -108,24 +108,22 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
108 108
109 INIT_LIST_HEAD(&est->list); 109 INIT_LIST_HEAD(&est->list);
110 110
111 est->last_conns = stats->conns; 111 est->last_conns = stats->ustats.conns;
112 est->cps = stats->cps<<10; 112 est->cps = stats->ustats.cps<<10;
113 113
114 est->last_inpkts = stats->inpkts; 114 est->last_inpkts = stats->ustats.inpkts;
115 est->inpps = stats->inpps<<10; 115 est->inpps = stats->ustats.inpps<<10;
116 116
117 est->last_outpkts = stats->outpkts; 117 est->last_outpkts = stats->ustats.outpkts;
118 est->outpps = stats->outpps<<10; 118 est->outpps = stats->ustats.outpps<<10;
119 119
120 est->last_inbytes = stats->inbytes; 120 est->last_inbytes = stats->ustats.inbytes;
121 est->inbps = stats->inbps<<5; 121 est->inbps = stats->ustats.inbps<<5;
122 122
123 est->last_outbytes = stats->outbytes; 123 est->last_outbytes = stats->ustats.outbytes;
124 est->outbps = stats->outbps<<5; 124 est->outbps = stats->ustats.outbps<<5;
125 125
126 spin_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
127 if (list_empty(&est_list))
128 mod_timer(&est_timer, jiffies + 2 * HZ);
129 list_add(&est->list, &est_list); 127 list_add(&est->list, &est_list);
130 spin_unlock_bh(&est_lock); 128 spin_unlock_bh(&est_lock);
131} 129}
@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
136 134
137 spin_lock_bh(&est_lock); 135 spin_lock_bh(&est_lock);
138 list_del(&est->list); 136 list_del(&est->list);
139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
140 spin_unlock_bh(&est_lock);
141 cpu_relax();
142 spin_lock_bh(&est_lock);
143 }
144 spin_unlock_bh(&est_lock); 137 spin_unlock_bh(&est_lock);
145} 138}
146 139
@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
160 est->inbps = 0; 153 est->inbps = 0;
161 est->outbps = 0; 154 est->outbps = 0;
162} 155}
156
157int __init ip_vs_estimator_init(void)
158{
159 mod_timer(&est_timer, jiffies + 2 * HZ);
160 return 0;
161}
162
163void ip_vs_estimator_cleanup(void)
164{
165 del_timer_sync(&est_timer);
166}
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index c1c758e4f733..2e7dbd8b73a4 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -140,13 +140,21 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
140 struct tcphdr *th; 140 struct tcphdr *th;
141 char *data, *data_limit; 141 char *data, *data_limit;
142 char *start, *end; 142 char *start, *end;
143 __be32 from; 143 union nf_inet_addr from;
144 __be16 port; 144 __be16 port;
145 struct ip_vs_conn *n_cp; 145 struct ip_vs_conn *n_cp;
146 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 146 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
147 unsigned buf_len; 147 unsigned buf_len;
148 int ret; 148 int ret;
149 149
150#ifdef CONFIG_IP_VS_IPV6
151 /* This application helper doesn't work with IPv6 yet,
152 * so turn this into a no-op for IPv6 packets
153 */
154 if (cp->af == AF_INET6)
155 return 1;
156#endif
157
150 *diff = 0; 158 *diff = 0;
151 159
152 /* Only useful for established sessions */ 160 /* Only useful for established sessions */
@@ -166,24 +174,25 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
166 if (ip_vs_ftp_get_addrport(data, data_limit, 174 if (ip_vs_ftp_get_addrport(data, data_limit,
167 SERVER_STRING, 175 SERVER_STRING,
168 sizeof(SERVER_STRING)-1, ')', 176 sizeof(SERVER_STRING)-1, ')',
169 &from, &port, 177 &from.ip, &port,
170 &start, &end) != 1) 178 &start, &end) != 1)
171 return 1; 179 return 1;
172 180
173 IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " 181 IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> "
174 "%u.%u.%u.%u:%d detected\n", 182 "%u.%u.%u.%u:%d detected\n",
175 NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0); 183 NIPQUAD(from.ip), ntohs(port),
184 NIPQUAD(cp->caddr.ip), 0);
176 185
177 /* 186 /*
178 * Now update or create an connection entry for it 187 * Now update or create an connection entry for it
179 */ 188 */
180 n_cp = ip_vs_conn_out_get(iph->protocol, from, port, 189 n_cp = ip_vs_conn_out_get(AF_INET, iph->protocol, &from, port,
181 cp->caddr, 0); 190 &cp->caddr, 0);
182 if (!n_cp) { 191 if (!n_cp) {
183 n_cp = ip_vs_conn_new(IPPROTO_TCP, 192 n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
184 cp->caddr, 0, 193 &cp->caddr, 0,
185 cp->vaddr, port, 194 &cp->vaddr, port,
186 from, port, 195 &from, port,
187 IP_VS_CONN_F_NO_CPORT, 196 IP_VS_CONN_F_NO_CPORT,
188 cp->dest); 197 cp->dest);
189 if (!n_cp) 198 if (!n_cp)
@@ -196,9 +205,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
196 /* 205 /*
197 * Replace the old passive address with the new one 206 * Replace the old passive address with the new one
198 */ 207 */
199 from = n_cp->vaddr; 208 from.ip = n_cp->vaddr.ip;
200 port = n_cp->vport; 209 port = n_cp->vport;
201 sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from), 210 sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip),
202 (ntohs(port)>>8)&255, ntohs(port)&255); 211 (ntohs(port)>>8)&255, ntohs(port)&255);
203 buf_len = strlen(buf); 212 buf_len = strlen(buf);
204 213
@@ -243,10 +252,18 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
243 struct tcphdr *th; 252 struct tcphdr *th;
244 char *data, *data_start, *data_limit; 253 char *data, *data_start, *data_limit;
245 char *start, *end; 254 char *start, *end;
246 __be32 to; 255 union nf_inet_addr to;
247 __be16 port; 256 __be16 port;
248 struct ip_vs_conn *n_cp; 257 struct ip_vs_conn *n_cp;
249 258
259#ifdef CONFIG_IP_VS_IPV6
260 /* This application helper doesn't work with IPv6 yet,
261 * so turn this into a no-op for IPv6 packets
262 */
263 if (cp->af == AF_INET6)
264 return 1;
265#endif
266
250 /* no diff required for incoming packets */ 267 /* no diff required for incoming packets */
251 *diff = 0; 268 *diff = 0;
252 269
@@ -291,12 +308,12 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
291 */ 308 */
292 if (ip_vs_ftp_get_addrport(data_start, data_limit, 309 if (ip_vs_ftp_get_addrport(data_start, data_limit,
293 CLIENT_STRING, sizeof(CLIENT_STRING)-1, 310 CLIENT_STRING, sizeof(CLIENT_STRING)-1,
294 '\r', &to, &port, 311 '\r', &to.ip, &port,
295 &start, &end) != 1) 312 &start, &end) != 1)
296 return 1; 313 return 1;
297 314
298 IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", 315 IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n",
299 NIPQUAD(to), ntohs(port)); 316 NIPQUAD(to.ip), ntohs(port));
300 317
301 /* Passive mode off */ 318 /* Passive mode off */
302 cp->app_data = NULL; 319 cp->app_data = NULL;
@@ -306,16 +323,16 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
306 */ 323 */
307 IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", 324 IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n",
308 ip_vs_proto_name(iph->protocol), 325 ip_vs_proto_name(iph->protocol),
309 NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0); 326 NIPQUAD(to.ip), ntohs(port), NIPQUAD(cp->vaddr.ip), 0);
310 327
311 n_cp = ip_vs_conn_in_get(iph->protocol, 328 n_cp = ip_vs_conn_in_get(AF_INET, iph->protocol,
312 to, port, 329 &to, port,
313 cp->vaddr, htons(ntohs(cp->vport)-1)); 330 &cp->vaddr, htons(ntohs(cp->vport)-1));
314 if (!n_cp) { 331 if (!n_cp) {
315 n_cp = ip_vs_conn_new(IPPROTO_TCP, 332 n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
316 to, port, 333 &to, port,
317 cp->vaddr, htons(ntohs(cp->vport)-1), 334 &cp->vaddr, htons(ntohs(cp->vport)-1),
318 cp->daddr, htons(ntohs(cp->dport)-1), 335 &cp->daddr, htons(ntohs(cp->dport)-1),
319 0, 336 0,
320 cp->dest); 337 cp->dest);
321 if (!n_cp) 338 if (!n_cp)
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 7a6a319f544a..6ecef3518cac 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry {
96 * IPVS lblc hash table 96 * IPVS lblc hash table
97 */ 97 */
98struct ip_vs_lblc_table { 98struct ip_vs_lblc_table {
99 rwlock_t lock; /* lock for this table */
100 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ 99 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
101 atomic_t entries; /* number of entries */ 100 atomic_t entries; /* number of entries */
102 int max_size; /* maximum size of entries */ 101 int max_size; /* maximum size of entries */
@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = {
123 122
124static struct ctl_table_header * sysctl_header; 123static struct ctl_table_header * sysctl_header;
125 124
126/*
127 * new/free a ip_vs_lblc_entry, which is a mapping of a destionation
128 * IP address to a server.
129 */
130static inline struct ip_vs_lblc_entry *
131ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
132{
133 struct ip_vs_lblc_entry *en;
134
135 en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
136 if (en == NULL) {
137 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
138 return NULL;
139 }
140
141 INIT_LIST_HEAD(&en->list);
142 en->addr = daddr;
143
144 atomic_inc(&dest->refcnt);
145 en->dest = dest;
146
147 return en;
148}
149
150
151static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 125static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
152{ 126{
153 list_del(&en->list); 127 list_del(&en->list);
@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
173 * Hash an entry in the ip_vs_lblc_table. 147 * Hash an entry in the ip_vs_lblc_table.
174 * returns bool success. 148 * returns bool success.
175 */ 149 */
176static int 150static void
177ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) 151ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
178{ 152{
179 unsigned hash; 153 unsigned hash = ip_vs_lblc_hashkey(en->addr);
180
181 if (!list_empty(&en->list)) {
182 IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
183 "called from %p\n", __builtin_return_address(0));
184 return 0;
185 }
186
187 /*
188 * Hash by destination IP address
189 */
190 hash = ip_vs_lblc_hashkey(en->addr);
191 154
192 write_lock(&tbl->lock);
193 list_add(&en->list, &tbl->bucket[hash]); 155 list_add(&en->list, &tbl->bucket[hash]);
194 atomic_inc(&tbl->entries); 156 atomic_inc(&tbl->entries);
195 write_unlock(&tbl->lock);
196
197 return 1;
198} 157}
199 158
200 159
201/* 160/*
202 * Get ip_vs_lblc_entry associated with supplied parameters. 161 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
162 * lock
203 */ 163 */
204static inline struct ip_vs_lblc_entry * 164static inline struct ip_vs_lblc_entry *
205ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) 165ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
206{ 166{
207 unsigned hash; 167 unsigned hash = ip_vs_lblc_hashkey(addr);
208 struct ip_vs_lblc_entry *en; 168 struct ip_vs_lblc_entry *en;
209 169
210 hash = ip_vs_lblc_hashkey(addr); 170 list_for_each_entry(en, &tbl->bucket[hash], list)
171 if (en->addr == addr)
172 return en;
211 173
212 read_lock(&tbl->lock); 174 return NULL;
175}
213 176
214 list_for_each_entry(en, &tbl->bucket[hash], list) { 177
215 if (en->addr == addr) { 178/*
216 /* HIT */ 179 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
217 read_unlock(&tbl->lock); 180 * address to a server. Called under write lock.
218 return en; 181 */
182static inline struct ip_vs_lblc_entry *
183ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
184 struct ip_vs_dest *dest)
185{
186 struct ip_vs_lblc_entry *en;
187
188 en = ip_vs_lblc_get(tbl, daddr);
189 if (!en) {
190 en = kmalloc(sizeof(*en), GFP_ATOMIC);
191 if (!en) {
192 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
193 return NULL;
219 } 194 }
220 }
221 195
222 read_unlock(&tbl->lock); 196 en->addr = daddr;
197 en->lastuse = jiffies;
223 198
224 return NULL; 199 atomic_inc(&dest->refcnt);
200 en->dest = dest;
201
202 ip_vs_lblc_hash(tbl, en);
203 } else if (en->dest != dest) {
204 atomic_dec(&en->dest->refcnt);
205 atomic_inc(&dest->refcnt);
206 en->dest = dest;
207 }
208
209 return en;
225} 210}
226 211
227 212
@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
230 */ 215 */
231static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) 216static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
232{ 217{
233 int i;
234 struct ip_vs_lblc_entry *en, *nxt; 218 struct ip_vs_lblc_entry *en, *nxt;
219 int i;
235 220
236 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 221 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
237 write_lock(&tbl->lock);
238 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 222 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
239 ip_vs_lblc_free(en); 223 ip_vs_lblc_free(en);
240 atomic_dec(&tbl->entries); 224 atomic_dec(&tbl->entries);
241 } 225 }
242 write_unlock(&tbl->lock);
243 } 226 }
244} 227}
245 228
246 229
247static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) 230static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
248{ 231{
232 struct ip_vs_lblc_table *tbl = svc->sched_data;
233 struct ip_vs_lblc_entry *en, *nxt;
249 unsigned long now = jiffies; 234 unsigned long now = jiffies;
250 int i, j; 235 int i, j;
251 struct ip_vs_lblc_entry *en, *nxt;
252 236
253 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 237 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
254 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 238 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
255 239
256 write_lock(&tbl->lock); 240 write_lock(&svc->sched_lock);
257 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 241 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
258 if (time_before(now, 242 if (time_before(now,
259 en->lastuse + sysctl_ip_vs_lblc_expiration)) 243 en->lastuse + sysctl_ip_vs_lblc_expiration))
@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
262 ip_vs_lblc_free(en); 246 ip_vs_lblc_free(en);
263 atomic_dec(&tbl->entries); 247 atomic_dec(&tbl->entries);
264 } 248 }
265 write_unlock(&tbl->lock); 249 write_unlock(&svc->sched_lock);
266 } 250 }
267 tbl->rover = j; 251 tbl->rover = j;
268} 252}
@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
281 */ 265 */
282static void ip_vs_lblc_check_expire(unsigned long data) 266static void ip_vs_lblc_check_expire(unsigned long data)
283{ 267{
284 struct ip_vs_lblc_table *tbl; 268 struct ip_vs_service *svc = (struct ip_vs_service *) data;
269 struct ip_vs_lblc_table *tbl = svc->sched_data;
285 unsigned long now = jiffies; 270 unsigned long now = jiffies;
286 int goal; 271 int goal;
287 int i, j; 272 int i, j;
288 struct ip_vs_lblc_entry *en, *nxt; 273 struct ip_vs_lblc_entry *en, *nxt;
289 274
290 tbl = (struct ip_vs_lblc_table *)data;
291
292 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 275 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
293 /* do full expiration check */ 276 /* do full expiration check */
294 ip_vs_lblc_full_check(tbl); 277 ip_vs_lblc_full_check(svc);
295 tbl->counter = 1; 278 tbl->counter = 1;
296 goto out; 279 goto out;
297 } 280 }
@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
308 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 291 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
309 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 292 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
310 293
311 write_lock(&tbl->lock); 294 write_lock(&svc->sched_lock);
312 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 295 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
313 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 296 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
314 continue; 297 continue;
@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
317 atomic_dec(&tbl->entries); 300 atomic_dec(&tbl->entries);
318 goal--; 301 goal--;
319 } 302 }
320 write_unlock(&tbl->lock); 303 write_unlock(&svc->sched_lock);
321 if (goal <= 0) 304 if (goal <= 0)
322 break; 305 break;
323 } 306 }
@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
336 /* 319 /*
337 * Allocate the ip_vs_lblc_table for this service 320 * Allocate the ip_vs_lblc_table for this service
338 */ 321 */
339 tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); 322 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
340 if (tbl == NULL) { 323 if (tbl == NULL) {
341 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); 324 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
342 return -ENOMEM; 325 return -ENOMEM;
343 } 326 }
344 svc->sched_data = tbl; 327 svc->sched_data = tbl;
345 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " 328 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
346 "current service\n", 329 "current service\n", sizeof(*tbl));
347 sizeof(struct ip_vs_lblc_table));
348 330
349 /* 331 /*
350 * Initialize the hash buckets 332 * Initialize the hash buckets
@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
352 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 334 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
353 INIT_LIST_HEAD(&tbl->bucket[i]); 335 INIT_LIST_HEAD(&tbl->bucket[i]);
354 } 336 }
355 rwlock_init(&tbl->lock);
356 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; 337 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
357 tbl->rover = 0; 338 tbl->rover = 0;
358 tbl->counter = 1; 339 tbl->counter = 1;
@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
361 * Hook periodic timer for garbage collection 342 * Hook periodic timer for garbage collection
362 */ 343 */
363 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, 344 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
364 (unsigned long)tbl); 345 (unsigned long)svc);
365 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 346 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
366 add_timer(&tbl->periodic_timer);
367 347
368 return 0; 348 return 0;
369} 349}
@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
380 ip_vs_lblc_flush(tbl); 360 ip_vs_lblc_flush(tbl);
381 361
382 /* release the table itself */ 362 /* release the table itself */
383 kfree(svc->sched_data); 363 kfree(tbl);
384 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", 364 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
385 sizeof(struct ip_vs_lblc_table)); 365 sizeof(*tbl));
386 366
387 return 0; 367 return 0;
388} 368}
389 369
390 370
391static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
392{
393 return 0;
394}
395
396
397static inline struct ip_vs_dest * 371static inline struct ip_vs_dest *
398__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 372__ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
399{ 373{
400 struct ip_vs_dest *dest, *least; 374 struct ip_vs_dest *dest, *least;
401 int loh, doh; 375 int loh, doh;
@@ -448,7 +422,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
448 422
449 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " 423 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
450 "activeconns %d refcnt %d weight %d overhead %d\n", 424 "activeconns %d refcnt %d weight %d overhead %d\n",
451 NIPQUAD(least->addr), ntohs(least->port), 425 NIPQUAD(least->addr.ip), ntohs(least->port),
452 atomic_read(&least->activeconns), 426 atomic_read(&least->activeconns),
453 atomic_read(&least->refcnt), 427 atomic_read(&least->refcnt),
454 atomic_read(&least->weight), loh); 428 atomic_read(&least->weight), loh);
@@ -484,47 +458,55 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
484static struct ip_vs_dest * 458static struct ip_vs_dest *
485ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 459ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
486{ 460{
487 struct ip_vs_dest *dest; 461 struct ip_vs_lblc_table *tbl = svc->sched_data;
488 struct ip_vs_lblc_table *tbl;
489 struct ip_vs_lblc_entry *en;
490 struct iphdr *iph = ip_hdr(skb); 462 struct iphdr *iph = ip_hdr(skb);
463 struct ip_vs_dest *dest = NULL;
464 struct ip_vs_lblc_entry *en;
491 465
492 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); 466 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
493 467
494 tbl = (struct ip_vs_lblc_table *)svc->sched_data; 468 /* First look in our cache */
469 read_lock(&svc->sched_lock);
495 en = ip_vs_lblc_get(tbl, iph->daddr); 470 en = ip_vs_lblc_get(tbl, iph->daddr);
496 if (en == NULL) { 471 if (en) {
497 dest = __ip_vs_wlc_schedule(svc, iph); 472 /* We only hold a read lock, but this is atomic */
498 if (dest == NULL) { 473 en->lastuse = jiffies;
499 IP_VS_DBG(1, "no destination available\n"); 474
500 return NULL; 475 /*
501 } 476 * If the destination is not available, i.e. it's in the trash,
502 en = ip_vs_lblc_new(iph->daddr, dest); 477 * we must ignore it, as it may be removed from under our feet,
503 if (en == NULL) { 478 * if someone drops our reference count. Our caller only makes
504 return NULL; 479 * sure that destinations, that are not in the trash, are not
505 } 480 * moved to the trash, while we are scheduling. But anyone can
506 ip_vs_lblc_hash(tbl, en); 481 * free up entries from the trash at any time.
507 } else { 482 */
508 dest = en->dest; 483
509 if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) 484 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
510 || atomic_read(&dest->weight) <= 0 485 dest = en->dest;
511 || is_overloaded(dest, svc)) { 486 }
512 dest = __ip_vs_wlc_schedule(svc, iph); 487 read_unlock(&svc->sched_lock);
513 if (dest == NULL) { 488
514 IP_VS_DBG(1, "no destination available\n"); 489 /* If the destination has a weight and is not overloaded, use it */
515 return NULL; 490 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
516 } 491 goto out;
517 atomic_dec(&en->dest->refcnt); 492
518 atomic_inc(&dest->refcnt); 493 /* No cache entry or it is invalid, time to schedule */
519 en->dest = dest; 494 dest = __ip_vs_lblc_schedule(svc, iph);
520 } 495 if (!dest) {
496 IP_VS_DBG(1, "no destination available\n");
497 return NULL;
521 } 498 }
522 en->lastuse = jiffies;
523 499
500 /* If we fail to create a cache entry, we'll just use the valid dest */
501 write_lock(&svc->sched_lock);
502 ip_vs_lblc_new(tbl, iph->daddr, dest);
503 write_unlock(&svc->sched_lock);
504
505out:
524 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " 506 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
525 "--> server %u.%u.%u.%u:%d\n", 507 "--> server %u.%u.%u.%u:%d\n",
526 NIPQUAD(en->addr), 508 NIPQUAD(iph->daddr),
527 NIPQUAD(dest->addr), 509 NIPQUAD(dest->addr.ip),
528 ntohs(dest->port)); 510 ntohs(dest->port));
529 511
530 return dest; 512 return dest;
@@ -540,9 +522,11 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
540 .refcnt = ATOMIC_INIT(0), 522 .refcnt = ATOMIC_INIT(0),
541 .module = THIS_MODULE, 523 .module = THIS_MODULE,
542 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), 524 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
525#ifdef CONFIG_IP_VS_IPV6
526 .supports_ipv6 = 0,
527#endif
543 .init_service = ip_vs_lblc_init_svc, 528 .init_service = ip_vs_lblc_init_svc,
544 .done_service = ip_vs_lblc_done_svc, 529 .done_service = ip_vs_lblc_done_svc,
545 .update_service = ip_vs_lblc_update_svc,
546 .schedule = ip_vs_lblc_schedule, 530 .schedule = ip_vs_lblc_schedule,
547}; 531};
548 532
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index c234e73968a6..1f75ea83bcf8 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
106 return NULL; 106 return NULL;
107 } 107 }
108 108
109 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); 109 e = kmalloc(sizeof(*e), GFP_ATOMIC);
110 if (e == NULL) { 110 if (e == NULL) {
111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); 111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
112 return NULL; 112 return NULL;
@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
116 e->dest = dest; 116 e->dest = dest;
117 117
118 /* link it to the list */ 118 /* link it to the list */
119 write_lock(&set->lock);
120 e->next = set->list; 119 e->next = set->list;
121 set->list = e; 120 set->list = e;
122 atomic_inc(&set->size); 121 atomic_inc(&set->size);
123 write_unlock(&set->lock);
124 122
125 set->lastmod = jiffies; 123 set->lastmod = jiffies;
126 return e; 124 return e;
@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131{ 129{
132 struct ip_vs_dest_list *e, **ep; 130 struct ip_vs_dest_list *e, **ep;
133 131
134 write_lock(&set->lock);
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 132 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136 if (e->dest == dest) { 133 if (e->dest == dest) {
137 /* HIT */ 134 /* HIT */
@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
144 } 141 }
145 ep = &e->next; 142 ep = &e->next;
146 } 143 }
147 write_unlock(&set->lock);
148} 144}
149 145
150static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 146static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
174 if (set == NULL) 170 if (set == NULL)
175 return NULL; 171 return NULL;
176 172
177 read_lock(&set->lock);
178 /* select the first destination server, whose weight > 0 */ 173 /* select the first destination server, whose weight > 0 */
179 for (e=set->list; e!=NULL; e=e->next) { 174 for (e=set->list; e!=NULL; e=e->next) {
180 least = e->dest; 175 least = e->dest;
@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
188 goto nextstage; 183 goto nextstage;
189 } 184 }
190 } 185 }
191 read_unlock(&set->lock);
192 return NULL; 186 return NULL;
193 187
194 /* find the destination with the weighted least load */ 188 /* find the destination with the weighted least load */
@@ -207,11 +201,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
207 loh = doh; 201 loh = doh;
208 } 202 }
209 } 203 }
210 read_unlock(&set->lock);
211 204
212 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " 205 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
213 "activeconns %d refcnt %d weight %d overhead %d\n", 206 "activeconns %d refcnt %d weight %d overhead %d\n",
214 NIPQUAD(least->addr), ntohs(least->port), 207 NIPQUAD(least->addr.ip), ntohs(least->port),
215 atomic_read(&least->activeconns), 208 atomic_read(&least->activeconns),
216 atomic_read(&least->refcnt), 209 atomic_read(&least->refcnt),
217 atomic_read(&least->weight), loh); 210 atomic_read(&least->weight), loh);
@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
229 if (set == NULL) 222 if (set == NULL)
230 return NULL; 223 return NULL;
231 224
232 read_lock(&set->lock);
233 /* select the first destination server, whose weight > 0 */ 225 /* select the first destination server, whose weight > 0 */
234 for (e=set->list; e!=NULL; e=e->next) { 226 for (e=set->list; e!=NULL; e=e->next) {
235 most = e->dest; 227 most = e->dest;
@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
239 goto nextstage; 231 goto nextstage;
240 } 232 }
241 } 233 }
242 read_unlock(&set->lock);
243 return NULL; 234 return NULL;
244 235
245 /* find the destination with the weighted most load */ 236 /* find the destination with the weighted most load */
@@ -256,11 +247,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
256 moh = doh; 247 moh = doh;
257 } 248 }
258 } 249 }
259 read_unlock(&set->lock);
260 250
261 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " 251 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
262 "activeconns %d refcnt %d weight %d overhead %d\n", 252 "activeconns %d refcnt %d weight %d overhead %d\n",
263 NIPQUAD(most->addr), ntohs(most->port), 253 NIPQUAD(most->addr.ip), ntohs(most->port),
264 atomic_read(&most->activeconns), 254 atomic_read(&most->activeconns),
265 atomic_read(&most->refcnt), 255 atomic_read(&most->refcnt),
266 atomic_read(&most->weight), moh); 256 atomic_read(&most->weight), moh);
@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry {
284 * IPVS lblcr hash table 274 * IPVS lblcr hash table
285 */ 275 */
286struct ip_vs_lblcr_table { 276struct ip_vs_lblcr_table {
287 rwlock_t lock; /* lock for this table */
288 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 277 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
289 atomic_t entries; /* number of entries */ 278 atomic_t entries; /* number of entries */
290 int max_size; /* maximum size of entries */ 279 int max_size; /* maximum size of entries */
@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = {
311 300
312static struct ctl_table_header * sysctl_header; 301static struct ctl_table_header * sysctl_header;
313 302
314/*
315 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
316 * IP address to a server.
317 */
318static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
319{
320 struct ip_vs_lblcr_entry *en;
321
322 en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
323 if (en == NULL) {
324 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
325 return NULL;
326 }
327
328 INIT_LIST_HEAD(&en->list);
329 en->addr = daddr;
330
331 /* initilize its dest set */
332 atomic_set(&(en->set.size), 0);
333 en->set.list = NULL;
334 rwlock_init(&en->set.lock);
335
336 return en;
337}
338
339
340static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 303static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
341{ 304{
342 list_del(&en->list); 305 list_del(&en->list);
@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
358 * Hash an entry in the ip_vs_lblcr_table. 321 * Hash an entry in the ip_vs_lblcr_table.
359 * returns bool success. 322 * returns bool success.
360 */ 323 */
361static int 324static void
362ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 325ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
363{ 326{
364 unsigned hash; 327 unsigned hash = ip_vs_lblcr_hashkey(en->addr);
365 328
366 if (!list_empty(&en->list)) {
367 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
368 "called from %p\n", __builtin_return_address(0));
369 return 0;
370 }
371
372 /*
373 * Hash by destination IP address
374 */
375 hash = ip_vs_lblcr_hashkey(en->addr);
376
377 write_lock(&tbl->lock);
378 list_add(&en->list, &tbl->bucket[hash]); 329 list_add(&en->list, &tbl->bucket[hash]);
379 atomic_inc(&tbl->entries); 330 atomic_inc(&tbl->entries);
380 write_unlock(&tbl->lock);
381
382 return 1;
383} 331}
384 332
385 333
386/* 334/*
387 * Get ip_vs_lblcr_entry associated with supplied parameters. 335 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
336 * read lock.
388 */ 337 */
389static inline struct ip_vs_lblcr_entry * 338static inline struct ip_vs_lblcr_entry *
390ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) 339ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
391{ 340{
392 unsigned hash; 341 unsigned hash = ip_vs_lblcr_hashkey(addr);
393 struct ip_vs_lblcr_entry *en; 342 struct ip_vs_lblcr_entry *en;
394 343
395 hash = ip_vs_lblcr_hashkey(addr); 344 list_for_each_entry(en, &tbl->bucket[hash], list)
345 if (en->addr == addr)
346 return en;
396 347
397 read_lock(&tbl->lock); 348 return NULL;
349}
398 350
399 list_for_each_entry(en, &tbl->bucket[hash], list) { 351
400 if (en->addr == addr) { 352/*
401 /* HIT */ 353 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
402 read_unlock(&tbl->lock); 354 * IP address to a server. Called under write lock.
403 return en; 355 */
356static inline struct ip_vs_lblcr_entry *
357ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr,
358 struct ip_vs_dest *dest)
359{
360 struct ip_vs_lblcr_entry *en;
361
362 en = ip_vs_lblcr_get(tbl, daddr);
363 if (!en) {
364 en = kmalloc(sizeof(*en), GFP_ATOMIC);
365 if (!en) {
366 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
367 return NULL;
404 } 368 }
369
370 en->addr = daddr;
371 en->lastuse = jiffies;
372
373 /* initilize its dest set */
374 atomic_set(&(en->set.size), 0);
375 en->set.list = NULL;
376 rwlock_init(&en->set.lock);
377
378 ip_vs_lblcr_hash(tbl, en);
405 } 379 }
406 380
407 read_unlock(&tbl->lock); 381 write_lock(&en->set.lock);
382 ip_vs_dest_set_insert(&en->set, dest);
383 write_unlock(&en->set.lock);
408 384
409 return NULL; 385 return en;
410} 386}
411 387
412 388
@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
418 int i; 394 int i;
419 struct ip_vs_lblcr_entry *en, *nxt; 395 struct ip_vs_lblcr_entry *en, *nxt;
420 396
397 /* No locking required, only called during cleanup. */
421 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 398 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
422 write_lock(&tbl->lock);
423 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 399 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
424 ip_vs_lblcr_free(en); 400 ip_vs_lblcr_free(en);
425 atomic_dec(&tbl->entries);
426 } 401 }
427 write_unlock(&tbl->lock);
428 } 402 }
429} 403}
430 404
431 405
432static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) 406static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
433{ 407{
408 struct ip_vs_lblcr_table *tbl = svc->sched_data;
434 unsigned long now = jiffies; 409 unsigned long now = jiffies;
435 int i, j; 410 int i, j;
436 struct ip_vs_lblcr_entry *en, *nxt; 411 struct ip_vs_lblcr_entry *en, *nxt;
@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
438 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 413 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
439 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 414 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
440 415
441 write_lock(&tbl->lock); 416 write_lock(&svc->sched_lock);
442 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 417 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
443 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, 418 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
444 now)) 419 now))
@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
447 ip_vs_lblcr_free(en); 422 ip_vs_lblcr_free(en);
448 atomic_dec(&tbl->entries); 423 atomic_dec(&tbl->entries);
449 } 424 }
450 write_unlock(&tbl->lock); 425 write_unlock(&svc->sched_lock);
451 } 426 }
452 tbl->rover = j; 427 tbl->rover = j;
453} 428}
@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
466 */ 441 */
467static void ip_vs_lblcr_check_expire(unsigned long data) 442static void ip_vs_lblcr_check_expire(unsigned long data)
468{ 443{
469 struct ip_vs_lblcr_table *tbl; 444 struct ip_vs_service *svc = (struct ip_vs_service *) data;
445 struct ip_vs_lblcr_table *tbl = svc->sched_data;
470 unsigned long now = jiffies; 446 unsigned long now = jiffies;
471 int goal; 447 int goal;
472 int i, j; 448 int i, j;
473 struct ip_vs_lblcr_entry *en, *nxt; 449 struct ip_vs_lblcr_entry *en, *nxt;
474 450
475 tbl = (struct ip_vs_lblcr_table *)data;
476
477 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 451 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
478 /* do full expiration check */ 452 /* do full expiration check */
479 ip_vs_lblcr_full_check(tbl); 453 ip_vs_lblcr_full_check(svc);
480 tbl->counter = 1; 454 tbl->counter = 1;
481 goto out; 455 goto out;
482 } 456 }
@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
493 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 467 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
494 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 468 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
495 469
496 write_lock(&tbl->lock); 470 write_lock(&svc->sched_lock);
497 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 471 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
498 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 472 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
499 continue; 473 continue;
@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
502 atomic_dec(&tbl->entries); 476 atomic_dec(&tbl->entries);
503 goal--; 477 goal--;
504 } 478 }
505 write_unlock(&tbl->lock); 479 write_unlock(&svc->sched_lock);
506 if (goal <= 0) 480 if (goal <= 0)
507 break; 481 break;
508 } 482 }
@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
520 /* 494 /*
521 * Allocate the ip_vs_lblcr_table for this service 495 * Allocate the ip_vs_lblcr_table for this service
522 */ 496 */
523 tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); 497 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
524 if (tbl == NULL) { 498 if (tbl == NULL) {
525 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); 499 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
526 return -ENOMEM; 500 return -ENOMEM;
527 } 501 }
528 svc->sched_data = tbl; 502 svc->sched_data = tbl;
529 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " 503 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
530 "current service\n", 504 "current service\n", sizeof(*tbl));
531 sizeof(struct ip_vs_lblcr_table));
532 505
533 /* 506 /*
534 * Initialize the hash buckets 507 * Initialize the hash buckets
@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
536 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 509 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
537 INIT_LIST_HEAD(&tbl->bucket[i]); 510 INIT_LIST_HEAD(&tbl->bucket[i]);
538 } 511 }
539 rwlock_init(&tbl->lock);
540 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 512 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
541 tbl->rover = 0; 513 tbl->rover = 0;
542 tbl->counter = 1; 514 tbl->counter = 1;
@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
545 * Hook periodic timer for garbage collection 517 * Hook periodic timer for garbage collection
546 */ 518 */
547 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 519 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
548 (unsigned long)tbl); 520 (unsigned long)svc);
549 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 521 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
550 add_timer(&tbl->periodic_timer);
551 522
552 return 0; 523 return 0;
553} 524}
@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
564 ip_vs_lblcr_flush(tbl); 535 ip_vs_lblcr_flush(tbl);
565 536
566 /* release the table itself */ 537 /* release the table itself */
567 kfree(svc->sched_data); 538 kfree(tbl);
568 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", 539 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
569 sizeof(struct ip_vs_lblcr_table)); 540 sizeof(*tbl));
570 541
571 return 0; 542 return 0;
572} 543}
573 544
574 545
575static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
576{
577 return 0;
578}
579
580
581static inline struct ip_vs_dest * 546static inline struct ip_vs_dest *
582__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 547__ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph)
583{ 548{
584 struct ip_vs_dest *dest, *least; 549 struct ip_vs_dest *dest, *least;
585 int loh, doh; 550 int loh, doh;
@@ -633,7 +598,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
633 598
634 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " 599 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
635 "activeconns %d refcnt %d weight %d overhead %d\n", 600 "activeconns %d refcnt %d weight %d overhead %d\n",
636 NIPQUAD(least->addr), ntohs(least->port), 601 NIPQUAD(least->addr.ip), ntohs(least->port),
637 atomic_read(&least->activeconns), 602 atomic_read(&least->activeconns),
638 atomic_read(&least->refcnt), 603 atomic_read(&least->refcnt),
639 atomic_read(&least->weight), loh); 604 atomic_read(&least->weight), loh);
@@ -669,51 +634,79 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
669static struct ip_vs_dest * 634static struct ip_vs_dest *
670ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 635ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
671{ 636{
672 struct ip_vs_dest *dest; 637 struct ip_vs_lblcr_table *tbl = svc->sched_data;
673 struct ip_vs_lblcr_table *tbl;
674 struct ip_vs_lblcr_entry *en;
675 struct iphdr *iph = ip_hdr(skb); 638 struct iphdr *iph = ip_hdr(skb);
639 struct ip_vs_dest *dest = NULL;
640 struct ip_vs_lblcr_entry *en;
676 641
677 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); 642 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
678 643
679 tbl = (struct ip_vs_lblcr_table *)svc->sched_data; 644 /* First look in our cache */
645 read_lock(&svc->sched_lock);
680 en = ip_vs_lblcr_get(tbl, iph->daddr); 646 en = ip_vs_lblcr_get(tbl, iph->daddr);
681 if (en == NULL) { 647 if (en) {
682 dest = __ip_vs_wlc_schedule(svc, iph); 648 /* We only hold a read lock, but this is atomic */
683 if (dest == NULL) { 649 en->lastuse = jiffies;
684 IP_VS_DBG(1, "no destination available\n"); 650
685 return NULL; 651 /* Get the least loaded destination */
686 } 652 read_lock(&en->set.lock);
687 en = ip_vs_lblcr_new(iph->daddr);
688 if (en == NULL) {
689 return NULL;
690 }
691 ip_vs_dest_set_insert(&en->set, dest);
692 ip_vs_lblcr_hash(tbl, en);
693 } else {
694 dest = ip_vs_dest_set_min(&en->set); 653 dest = ip_vs_dest_set_min(&en->set);
695 if (!dest || is_overloaded(dest, svc)) { 654 read_unlock(&en->set.lock);
696 dest = __ip_vs_wlc_schedule(svc, iph); 655
697 if (dest == NULL) { 656 /* More than one destination + enough time passed by, cleanup */
698 IP_VS_DBG(1, "no destination available\n");
699 return NULL;
700 }
701 ip_vs_dest_set_insert(&en->set, dest);
702 }
703 if (atomic_read(&en->set.size) > 1 && 657 if (atomic_read(&en->set.size) > 1 &&
704 jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { 658 time_after(jiffies, en->set.lastmod +
659 sysctl_ip_vs_lblcr_expiration)) {
705 struct ip_vs_dest *m; 660 struct ip_vs_dest *m;
661
662 write_lock(&en->set.lock);
706 m = ip_vs_dest_set_max(&en->set); 663 m = ip_vs_dest_set_max(&en->set);
707 if (m) 664 if (m)
708 ip_vs_dest_set_erase(&en->set, m); 665 ip_vs_dest_set_erase(&en->set, m);
666 write_unlock(&en->set.lock);
709 } 667 }
668
669 /* If the destination is not overloaded, use it */
670 if (dest && !is_overloaded(dest, svc)) {
671 read_unlock(&svc->sched_lock);
672 goto out;
673 }
674
675 /* The cache entry is invalid, time to schedule */
676 dest = __ip_vs_lblcr_schedule(svc, iph);
677 if (!dest) {
678 IP_VS_DBG(1, "no destination available\n");
679 read_unlock(&svc->sched_lock);
680 return NULL;
681 }
682
683 /* Update our cache entry */
684 write_lock(&en->set.lock);
685 ip_vs_dest_set_insert(&en->set, dest);
686 write_unlock(&en->set.lock);
687 }
688 read_unlock(&svc->sched_lock);
689
690 if (dest)
691 goto out;
692
693 /* No cache entry, time to schedule */
694 dest = __ip_vs_lblcr_schedule(svc, iph);
695 if (!dest) {
696 IP_VS_DBG(1, "no destination available\n");
697 return NULL;
710 } 698 }
711 en->lastuse = jiffies;
712 699
700 /* If we fail to create a cache entry, we'll just use the valid dest */
701 write_lock(&svc->sched_lock);
702 ip_vs_lblcr_new(tbl, iph->daddr, dest);
703 write_unlock(&svc->sched_lock);
704
705out:
713 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " 706 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
714 "--> server %u.%u.%u.%u:%d\n", 707 "--> server %u.%u.%u.%u:%d\n",
715 NIPQUAD(en->addr), 708 NIPQUAD(iph->daddr),
716 NIPQUAD(dest->addr), 709 NIPQUAD(dest->addr.ip),
717 ntohs(dest->port)); 710 ntohs(dest->port));
718 711
719 return dest; 712 return dest;
@@ -729,9 +722,11 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
729 .refcnt = ATOMIC_INIT(0), 722 .refcnt = ATOMIC_INIT(0),
730 .module = THIS_MODULE, 723 .module = THIS_MODULE,
731 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), 724 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
725#ifdef CONFIG_IP_VS_IPV6
726 .supports_ipv6 = 0,
727#endif
732 .init_service = ip_vs_lblcr_init_svc, 728 .init_service = ip_vs_lblcr_init_svc,
733 .done_service = ip_vs_lblcr_done_svc, 729 .done_service = ip_vs_lblcr_done_svc,
734 .update_service = ip_vs_lblcr_update_svc,
735 .schedule = ip_vs_lblcr_schedule, 730 .schedule = ip_vs_lblcr_schedule,
736}; 731};
737 732
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ebcdbf75ac65..b69f808ac461 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -20,24 +20,6 @@
20#include <net/ip_vs.h> 20#include <net/ip_vs.h>
21 21
22 22
23static int ip_vs_lc_init_svc(struct ip_vs_service *svc)
24{
25 return 0;
26}
27
28
29static int ip_vs_lc_done_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int ip_vs_lc_update_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static inline unsigned int 23static inline unsigned int
42ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) 24ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
43{ 25{
@@ -85,10 +67,10 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
85 } 67 }
86 68
87 if (least) 69 if (least)
88 IP_VS_DBG(6, "LC: server %u.%u.%u.%u:%u activeconns %d inactconns %d\n", 70 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
89 NIPQUAD(least->addr), ntohs(least->port), 71 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
90 atomic_read(&least->activeconns), 72 atomic_read(&least->activeconns),
91 atomic_read(&least->inactconns)); 73 atomic_read(&least->inactconns));
92 74
93 return least; 75 return least;
94} 76}
@@ -99,9 +81,9 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
99 .refcnt = ATOMIC_INIT(0), 81 .refcnt = ATOMIC_INIT(0),
100 .module = THIS_MODULE, 82 .module = THIS_MODULE,
101 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), 83 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
102 .init_service = ip_vs_lc_init_svc, 84#ifdef CONFIG_IP_VS_IPV6
103 .done_service = ip_vs_lc_done_svc, 85 .supports_ipv6 = 1,
104 .update_service = ip_vs_lc_update_svc, 86#endif
105 .schedule = ip_vs_lc_schedule, 87 .schedule = ip_vs_lc_schedule,
106}; 88};
107 89
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index 92f3a6770031..9a2d8033f08f 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -37,27 +37,6 @@
37#include <net/ip_vs.h> 37#include <net/ip_vs.h>
38 38
39 39
40static int
41ip_vs_nq_init_svc(struct ip_vs_service *svc)
42{
43 return 0;
44}
45
46
47static int
48ip_vs_nq_done_svc(struct ip_vs_service *svc)
49{
50 return 0;
51}
52
53
54static int
55ip_vs_nq_update_svc(struct ip_vs_service *svc)
56{
57 return 0;
58}
59
60
61static inline unsigned int 40static inline unsigned int
62ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 41ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
63{ 42{
@@ -120,12 +99,12 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
120 return NULL; 99 return NULL;
121 100
122 out: 101 out:
123 IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " 102 IP_VS_DBG_BUF(6, "NQ: server %s:%u "
124 "activeconns %d refcnt %d weight %d overhead %d\n", 103 "activeconns %d refcnt %d weight %d overhead %d\n",
125 NIPQUAD(least->addr), ntohs(least->port), 104 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
126 atomic_read(&least->activeconns), 105 atomic_read(&least->activeconns),
127 atomic_read(&least->refcnt), 106 atomic_read(&least->refcnt),
128 atomic_read(&least->weight), loh); 107 atomic_read(&least->weight), loh);
129 108
130 return least; 109 return least;
131} 110}
@@ -137,9 +116,9 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
137 .refcnt = ATOMIC_INIT(0), 116 .refcnt = ATOMIC_INIT(0),
138 .module = THIS_MODULE, 117 .module = THIS_MODULE,
139 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), 118 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
140 .init_service = ip_vs_nq_init_svc, 119#ifdef CONFIG_IP_VS_IPV6
141 .done_service = ip_vs_nq_done_svc, 120 .supports_ipv6 = 1,
142 .update_service = ip_vs_nq_update_svc, 121#endif
143 .schedule = ip_vs_nq_schedule, 122 .schedule = ip_vs_nq_schedule,
144}; 123};
145 124
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 6099a88fc200..0791f9e08feb 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -151,11 +151,11 @@ const char * ip_vs_state_name(__u16 proto, int state)
151} 151}
152 152
153 153
154void 154static void
155ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, 155ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
156 const struct sk_buff *skb, 156 const struct sk_buff *skb,
157 int offset, 157 int offset,
158 const char *msg) 158 const char *msg)
159{ 159{
160 char buf[128]; 160 char buf[128];
161 struct iphdr _iph, *ih; 161 struct iphdr _iph, *ih;
@@ -189,6 +189,61 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
189 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); 189 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
190} 190}
191 191
192#ifdef CONFIG_IP_VS_IPV6
193static void
194ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
195 const struct sk_buff *skb,
196 int offset,
197 const char *msg)
198{
199 char buf[192];
200 struct ipv6hdr _iph, *ih;
201
202 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
203 if (ih == NULL)
204 sprintf(buf, "%s TRUNCATED", pp->name);
205 else if (ih->nexthdr == IPPROTO_FRAGMENT)
206 sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT " frag",
207 pp->name, NIP6(ih->saddr),
208 NIP6(ih->daddr));
209 else {
210 __be16 _ports[2], *pptr;
211
212 pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
213 sizeof(_ports), _ports);
214 if (pptr == NULL)
215 sprintf(buf, "%s TRUNCATED " NIP6_FMT "->" NIP6_FMT,
216 pp->name,
217 NIP6(ih->saddr),
218 NIP6(ih->daddr));
219 else
220 sprintf(buf, "%s " NIP6_FMT ":%u->" NIP6_FMT ":%u",
221 pp->name,
222 NIP6(ih->saddr),
223 ntohs(pptr[0]),
224 NIP6(ih->daddr),
225 ntohs(pptr[1]));
226 }
227
228 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
229}
230#endif
231
232
233void
234ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
235 const struct sk_buff *skb,
236 int offset,
237 const char *msg)
238{
239#ifdef CONFIG_IP_VS_IPV6
240 if (skb->protocol == htons(ETH_P_IPV6))
241 ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
242 else
243#endif
244 ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
245}
246
192 247
193int __init ip_vs_protocol_init(void) 248int __init ip_vs_protocol_init(void)
194{ 249{
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
deleted file mode 100644
index 73e0ea87c1f5..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42ah_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph,
45 unsigned int proto_off,
46 int inverse)
47{
48 struct ip_vs_conn *cp;
49
50 if (likely(!inverse)) {
51 cp = ip_vs_conn_in_get(IPPROTO_UDP,
52 iph->saddr,
53 htons(PORT_ISAKMP),
54 iph->daddr,
55 htons(PORT_ISAKMP));
56 } else {
57 cp = ip_vs_conn_in_get(IPPROTO_UDP,
58 iph->daddr,
59 htons(PORT_ISAKMP),
60 iph->saddr,
61 htons(PORT_ISAKMP));
62 }
63
64 if (!cp) {
65 /*
66 * We are not sure if the packet is from our
67 * service, so our conn_schedule hook should return NF_ACCEPT
68 */
69 IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
70 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
71 inverse ? "ICMP+" : "",
72 pp->name,
73 NIPQUAD(iph->saddr),
74 NIPQUAD(iph->daddr));
75 }
76
77 return cp;
78}
79
80
81static struct ip_vs_conn *
82ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{
85 struct ip_vs_conn *cp;
86
87 if (likely(!inverse)) {
88 cp = ip_vs_conn_out_get(IPPROTO_UDP,
89 iph->saddr,
90 htons(PORT_ISAKMP),
91 iph->daddr,
92 htons(PORT_ISAKMP));
93 } else {
94 cp = ip_vs_conn_out_get(IPPROTO_UDP,
95 iph->daddr,
96 htons(PORT_ISAKMP),
97 iph->saddr,
98 htons(PORT_ISAKMP));
99 }
100
101 if (!cp) {
102 IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
103 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
104 inverse ? "ICMP+" : "",
105 pp->name,
106 NIPQUAD(iph->saddr),
107 NIPQUAD(iph->daddr));
108 }
109
110 return cp;
111}
112
113
114static int
115ah_conn_schedule(struct sk_buff *skb,
116 struct ip_vs_protocol *pp,
117 int *verdict, struct ip_vs_conn **cpp)
118{
119 /*
120 * AH is only related traffic. Pass the packet to IP stack.
121 */
122 *verdict = NF_ACCEPT;
123 return 0;
124}
125
126
127static void
128ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
129 int offset, const char *msg)
130{
131 char buf[256];
132 struct iphdr _iph, *ih;
133
134 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
135 if (ih == NULL)
136 sprintf(buf, "%s TRUNCATED", pp->name);
137 else
138 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
139 pp->name, NIPQUAD(ih->saddr),
140 NIPQUAD(ih->daddr));
141
142 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
143}
144
145
146static void ah_init(struct ip_vs_protocol *pp)
147{
148 /* nothing to do now */
149}
150
151
152static void ah_exit(struct ip_vs_protocol *pp)
153{
154 /* nothing to do now */
155}
156
157
158struct ip_vs_protocol ip_vs_protocol_ah = {
159 .name = "AH",
160 .protocol = IPPROTO_AH,
161 .num_states = 1,
162 .dont_defrag = 1,
163 .init = ah_init,
164 .exit = ah_exit,
165 .conn_schedule = ah_conn_schedule,
166 .conn_in_get = ah_conn_in_get,
167 .conn_out_get = ah_conn_out_get,
168 .snat_handler = NULL,
169 .dnat_handler = NULL,
170 .csum_check = NULL,
171 .state_transition = NULL,
172 .register_app = NULL,
173 .unregister_app = NULL,
174 .app_conn_bind = NULL,
175 .debug_packet = ah_debug_packet,
176 .timeout_change = NULL, /* ISAKMP */
177 .set_state_timeout = NULL,
178};
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah_esp.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
new file mode 100644
index 000000000000..80ab0c8e5b4a
--- /dev/null
+++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
@@ -0,0 +1,235 @@
1/*
2 * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
43 const struct ip_vs_iphdr *iph, unsigned int proto_off,
44 int inverse)
45{
46 struct ip_vs_conn *cp;
47
48 if (likely(!inverse)) {
49 cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
50 &iph->saddr,
51 htons(PORT_ISAKMP),
52 &iph->daddr,
53 htons(PORT_ISAKMP));
54 } else {
55 cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
56 &iph->daddr,
57 htons(PORT_ISAKMP),
58 &iph->saddr,
59 htons(PORT_ISAKMP));
60 }
61
62 if (!cp) {
63 /*
64 * We are not sure if the packet is from our
65 * service, so our conn_schedule hook should return NF_ACCEPT
66 */
67 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
68 "%s%s %s->%s\n",
69 inverse ? "ICMP+" : "",
70 pp->name,
71 IP_VS_DBG_ADDR(af, &iph->saddr),
72 IP_VS_DBG_ADDR(af, &iph->daddr));
73 }
74
75 return cp;
76}
77
78
79static struct ip_vs_conn *
80ah_esp_conn_out_get(int af, const struct sk_buff *skb,
81 struct ip_vs_protocol *pp,
82 const struct ip_vs_iphdr *iph,
83 unsigned int proto_off,
84 int inverse)
85{
86 struct ip_vs_conn *cp;
87
88 if (likely(!inverse)) {
89 cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
90 &iph->saddr,
91 htons(PORT_ISAKMP),
92 &iph->daddr,
93 htons(PORT_ISAKMP));
94 } else {
95 cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
96 &iph->daddr,
97 htons(PORT_ISAKMP),
98 &iph->saddr,
99 htons(PORT_ISAKMP));
100 }
101
102 if (!cp) {
103 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
104 "%s%s %s->%s\n",
105 inverse ? "ICMP+" : "",
106 pp->name,
107 IP_VS_DBG_ADDR(af, &iph->saddr),
108 IP_VS_DBG_ADDR(af, &iph->daddr));
109 }
110
111 return cp;
112}
113
114
115static int
116ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
117 int *verdict, struct ip_vs_conn **cpp)
118{
119 /*
120 * AH/ESP is only related traffic. Pass the packet to IP stack.
121 */
122 *verdict = NF_ACCEPT;
123 return 0;
124}
125
126
127static void
128ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
129 int offset, const char *msg)
130{
131 char buf[256];
132 struct iphdr _iph, *ih;
133
134 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
135 if (ih == NULL)
136 sprintf(buf, "%s TRUNCATED", pp->name);
137 else
138 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
139 pp->name, NIPQUAD(ih->saddr),
140 NIPQUAD(ih->daddr));
141
142 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
143}
144
145#ifdef CONFIG_IP_VS_IPV6
146static void
147ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
148 int offset, const char *msg)
149{
150 char buf[256];
151 struct ipv6hdr _iph, *ih;
152
153 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
154 if (ih == NULL)
155 sprintf(buf, "%s TRUNCATED", pp->name);
156 else
157 sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT,
158 pp->name, NIP6(ih->saddr),
159 NIP6(ih->daddr));
160
161 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
162}
163#endif
164
165static void
166ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
167 int offset, const char *msg)
168{
169#ifdef CONFIG_IP_VS_IPV6
170 if (skb->protocol == htons(ETH_P_IPV6))
171 ah_esp_debug_packet_v6(pp, skb, offset, msg);
172 else
173#endif
174 ah_esp_debug_packet_v4(pp, skb, offset, msg);
175}
176
177
178static void ah_esp_init(struct ip_vs_protocol *pp)
179{
180 /* nothing to do now */
181}
182
183
184static void ah_esp_exit(struct ip_vs_protocol *pp)
185{
186 /* nothing to do now */
187}
188
189
190#ifdef CONFIG_IP_VS_PROTO_AH
191struct ip_vs_protocol ip_vs_protocol_ah = {
192 .name = "AH",
193 .protocol = IPPROTO_AH,
194 .num_states = 1,
195 .dont_defrag = 1,
196 .init = ah_esp_init,
197 .exit = ah_esp_exit,
198 .conn_schedule = ah_esp_conn_schedule,
199 .conn_in_get = ah_esp_conn_in_get,
200 .conn_out_get = ah_esp_conn_out_get,
201 .snat_handler = NULL,
202 .dnat_handler = NULL,
203 .csum_check = NULL,
204 .state_transition = NULL,
205 .register_app = NULL,
206 .unregister_app = NULL,
207 .app_conn_bind = NULL,
208 .debug_packet = ah_esp_debug_packet,
209 .timeout_change = NULL, /* ISAKMP */
210 .set_state_timeout = NULL,
211};
212#endif
213
214#ifdef CONFIG_IP_VS_PROTO_ESP
215struct ip_vs_protocol ip_vs_protocol_esp = {
216 .name = "ESP",
217 .protocol = IPPROTO_ESP,
218 .num_states = 1,
219 .dont_defrag = 1,
220 .init = ah_esp_init,
221 .exit = ah_esp_exit,
222 .conn_schedule = ah_esp_conn_schedule,
223 .conn_in_get = ah_esp_conn_in_get,
224 .conn_out_get = ah_esp_conn_out_get,
225 .snat_handler = NULL,
226 .dnat_handler = NULL,
227 .csum_check = NULL,
228 .state_transition = NULL,
229 .register_app = NULL,
230 .unregister_app = NULL,
231 .app_conn_bind = NULL,
232 .debug_packet = ah_esp_debug_packet,
233 .timeout_change = NULL, /* ISAKMP */
234};
235#endif
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
deleted file mode 100644
index 21d70c8ffa54..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42esp_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph,
45 unsigned int proto_off,
46 int inverse)
47{
48 struct ip_vs_conn *cp;
49
50 if (likely(!inverse)) {
51 cp = ip_vs_conn_in_get(IPPROTO_UDP,
52 iph->saddr,
53 htons(PORT_ISAKMP),
54 iph->daddr,
55 htons(PORT_ISAKMP));
56 } else {
57 cp = ip_vs_conn_in_get(IPPROTO_UDP,
58 iph->daddr,
59 htons(PORT_ISAKMP),
60 iph->saddr,
61 htons(PORT_ISAKMP));
62 }
63
64 if (!cp) {
65 /*
66 * We are not sure if the packet is from our
67 * service, so our conn_schedule hook should return NF_ACCEPT
68 */
69 IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
70 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
71 inverse ? "ICMP+" : "",
72 pp->name,
73 NIPQUAD(iph->saddr),
74 NIPQUAD(iph->daddr));
75 }
76
77 return cp;
78}
79
80
81static struct ip_vs_conn *
82esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{
85 struct ip_vs_conn *cp;
86
87 if (likely(!inverse)) {
88 cp = ip_vs_conn_out_get(IPPROTO_UDP,
89 iph->saddr,
90 htons(PORT_ISAKMP),
91 iph->daddr,
92 htons(PORT_ISAKMP));
93 } else {
94 cp = ip_vs_conn_out_get(IPPROTO_UDP,
95 iph->daddr,
96 htons(PORT_ISAKMP),
97 iph->saddr,
98 htons(PORT_ISAKMP));
99 }
100
101 if (!cp) {
102 IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
103 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
104 inverse ? "ICMP+" : "",
105 pp->name,
106 NIPQUAD(iph->saddr),
107 NIPQUAD(iph->daddr));
108 }
109
110 return cp;
111}
112
113
114static int
115esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
116 int *verdict, struct ip_vs_conn **cpp)
117{
118 /*
119 * ESP is only related traffic. Pass the packet to IP stack.
120 */
121 *verdict = NF_ACCEPT;
122 return 0;
123}
124
125
126static void
127esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
128 int offset, const char *msg)
129{
130 char buf[256];
131 struct iphdr _iph, *ih;
132
133 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
134 if (ih == NULL)
135 sprintf(buf, "%s TRUNCATED", pp->name);
136 else
137 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
138 pp->name, NIPQUAD(ih->saddr),
139 NIPQUAD(ih->daddr));
140
141 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
142}
143
144
145static void esp_init(struct ip_vs_protocol *pp)
146{
147 /* nothing to do now */
148}
149
150
151static void esp_exit(struct ip_vs_protocol *pp)
152{
153 /* nothing to do now */
154}
155
156
157struct ip_vs_protocol ip_vs_protocol_esp = {
158 .name = "ESP",
159 .protocol = IPPROTO_ESP,
160 .num_states = 1,
161 .dont_defrag = 1,
162 .init = esp_init,
163 .exit = esp_exit,
164 .conn_schedule = esp_conn_schedule,
165 .conn_in_get = esp_conn_in_get,
166 .conn_out_get = esp_conn_out_get,
167 .snat_handler = NULL,
168 .dnat_handler = NULL,
169 .csum_check = NULL,
170 .state_transition = NULL,
171 .register_app = NULL,
172 .unregister_app = NULL,
173 .app_conn_bind = NULL,
174 .debug_packet = esp_debug_packet,
175 .timeout_change = NULL, /* ISAKMP */
176};
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index d0ea467986a0..dd4566ea2bff 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -18,6 +18,7 @@
18#include <linux/tcp.h> /* for tcphdr */ 18#include <linux/tcp.h> /* for tcphdr */
19#include <net/ip.h> 19#include <net/ip.h>
20#include <net/tcp.h> /* for csum_tcpudp_magic */ 20#include <net/tcp.h> /* for csum_tcpudp_magic */
21#include <net/ip6_checksum.h>
21#include <linux/netfilter.h> 22#include <linux/netfilter.h>
22#include <linux/netfilter_ipv4.h> 23#include <linux/netfilter_ipv4.h>
23 24
@@ -25,8 +26,9 @@
25 26
26 27
27static struct ip_vs_conn * 28static struct ip_vs_conn *
28tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 29tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
29 const struct iphdr *iph, unsigned int proto_off, int inverse) 30 const struct ip_vs_iphdr *iph, unsigned int proto_off,
31 int inverse)
30{ 32{
31 __be16 _ports[2], *pptr; 33 __be16 _ports[2], *pptr;
32 34
@@ -35,19 +37,20 @@ tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
35 return NULL; 37 return NULL;
36 38
37 if (likely(!inverse)) { 39 if (likely(!inverse)) {
38 return ip_vs_conn_in_get(iph->protocol, 40 return ip_vs_conn_in_get(af, iph->protocol,
39 iph->saddr, pptr[0], 41 &iph->saddr, pptr[0],
40 iph->daddr, pptr[1]); 42 &iph->daddr, pptr[1]);
41 } else { 43 } else {
42 return ip_vs_conn_in_get(iph->protocol, 44 return ip_vs_conn_in_get(af, iph->protocol,
43 iph->daddr, pptr[1], 45 &iph->daddr, pptr[1],
44 iph->saddr, pptr[0]); 46 &iph->saddr, pptr[0]);
45 } 47 }
46} 48}
47 49
48static struct ip_vs_conn * 50static struct ip_vs_conn *
49tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 51tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
50 const struct iphdr *iph, unsigned int proto_off, int inverse) 52 const struct ip_vs_iphdr *iph, unsigned int proto_off,
53 int inverse)
51{ 54{
52 __be16 _ports[2], *pptr; 55 __be16 _ports[2], *pptr;
53 56
@@ -56,34 +59,36 @@ tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
56 return NULL; 59 return NULL;
57 60
58 if (likely(!inverse)) { 61 if (likely(!inverse)) {
59 return ip_vs_conn_out_get(iph->protocol, 62 return ip_vs_conn_out_get(af, iph->protocol,
60 iph->saddr, pptr[0], 63 &iph->saddr, pptr[0],
61 iph->daddr, pptr[1]); 64 &iph->daddr, pptr[1]);
62 } else { 65 } else {
63 return ip_vs_conn_out_get(iph->protocol, 66 return ip_vs_conn_out_get(af, iph->protocol,
64 iph->daddr, pptr[1], 67 &iph->daddr, pptr[1],
65 iph->saddr, pptr[0]); 68 &iph->saddr, pptr[0]);
66 } 69 }
67} 70}
68 71
69 72
70static int 73static int
71tcp_conn_schedule(struct sk_buff *skb, 74tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
72 struct ip_vs_protocol *pp,
73 int *verdict, struct ip_vs_conn **cpp) 75 int *verdict, struct ip_vs_conn **cpp)
74{ 76{
75 struct ip_vs_service *svc; 77 struct ip_vs_service *svc;
76 struct tcphdr _tcph, *th; 78 struct tcphdr _tcph, *th;
79 struct ip_vs_iphdr iph;
77 80
78 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 81 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
82
83 th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
79 if (th == NULL) { 84 if (th == NULL) {
80 *verdict = NF_DROP; 85 *verdict = NF_DROP;
81 return 0; 86 return 0;
82 } 87 }
83 88
84 if (th->syn && 89 if (th->syn &&
85 (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, 90 (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
86 ip_hdr(skb)->daddr, th->dest))) { 91 th->dest))) {
87 if (ip_vs_todrop()) { 92 if (ip_vs_todrop()) {
88 /* 93 /*
89 * It seems that we are very loaded. 94 * It seems that we are very loaded.
@@ -110,22 +115,62 @@ tcp_conn_schedule(struct sk_buff *skb,
110 115
111 116
112static inline void 117static inline void
113tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, 118tcp_fast_csum_update(int af, struct tcphdr *tcph,
119 const union nf_inet_addr *oldip,
120 const union nf_inet_addr *newip,
114 __be16 oldport, __be16 newport) 121 __be16 oldport, __be16 newport)
115{ 122{
123#ifdef CONFIG_IP_VS_IPV6
124 if (af == AF_INET6)
125 tcph->check =
126 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
127 ip_vs_check_diff2(oldport, newport,
128 ~csum_unfold(tcph->check))));
129 else
130#endif
116 tcph->check = 131 tcph->check =
117 csum_fold(ip_vs_check_diff4(oldip, newip, 132 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
118 ip_vs_check_diff2(oldport, newport, 133 ip_vs_check_diff2(oldport, newport,
119 ~csum_unfold(tcph->check)))); 134 ~csum_unfold(tcph->check))));
120} 135}
121 136
122 137
138static inline void
139tcp_partial_csum_update(int af, struct tcphdr *tcph,
140 const union nf_inet_addr *oldip,
141 const union nf_inet_addr *newip,
142 __be16 oldlen, __be16 newlen)
143{
144#ifdef CONFIG_IP_VS_IPV6
145 if (af == AF_INET6)
146 tcph->check =
147 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
148 ip_vs_check_diff2(oldlen, newlen,
149 ~csum_unfold(tcph->check))));
150 else
151#endif
152 tcph->check =
153 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
154 ip_vs_check_diff2(oldlen, newlen,
155 ~csum_unfold(tcph->check))));
156}
157
158
123static int 159static int
124tcp_snat_handler(struct sk_buff *skb, 160tcp_snat_handler(struct sk_buff *skb,
125 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 161 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
126{ 162{
127 struct tcphdr *tcph; 163 struct tcphdr *tcph;
128 const unsigned int tcphoff = ip_hdrlen(skb); 164 unsigned int tcphoff;
165 int oldlen;
166
167#ifdef CONFIG_IP_VS_IPV6
168 if (cp->af == AF_INET6)
169 tcphoff = sizeof(struct ipv6hdr);
170 else
171#endif
172 tcphoff = ip_hdrlen(skb);
173 oldlen = skb->len - tcphoff;
129 174
130 /* csum_check requires unshared skb */ 175 /* csum_check requires unshared skb */
131 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) 176 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -133,7 +178,7 @@ tcp_snat_handler(struct sk_buff *skb,
133 178
134 if (unlikely(cp->app != NULL)) { 179 if (unlikely(cp->app != NULL)) {
135 /* Some checks before mangling */ 180 /* Some checks before mangling */
136 if (pp->csum_check && !pp->csum_check(skb, pp)) 181 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
137 return 0; 182 return 0;
138 183
139 /* Call application helper if needed */ 184 /* Call application helper if needed */
@@ -141,13 +186,17 @@ tcp_snat_handler(struct sk_buff *skb,
141 return 0; 186 return 0;
142 } 187 }
143 188
144 tcph = (void *)ip_hdr(skb) + tcphoff; 189 tcph = (void *)skb_network_header(skb) + tcphoff;
145 tcph->source = cp->vport; 190 tcph->source = cp->vport;
146 191
147 /* Adjust TCP checksums */ 192 /* Adjust TCP checksums */
148 if (!cp->app) { 193 if (skb->ip_summed == CHECKSUM_PARTIAL) {
194 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
195 htonl(oldlen),
196 htonl(skb->len - tcphoff));
197 } else if (!cp->app) {
149 /* Only port and addr are changed, do fast csum update */ 198 /* Only port and addr are changed, do fast csum update */
150 tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, 199 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
151 cp->dport, cp->vport); 200 cp->dport, cp->vport);
152 if (skb->ip_summed == CHECKSUM_COMPLETE) 201 if (skb->ip_summed == CHECKSUM_COMPLETE)
153 skb->ip_summed = CHECKSUM_NONE; 202 skb->ip_summed = CHECKSUM_NONE;
@@ -155,9 +204,20 @@ tcp_snat_handler(struct sk_buff *skb,
155 /* full checksum calculation */ 204 /* full checksum calculation */
156 tcph->check = 0; 205 tcph->check = 0;
157 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 206 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
158 tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, 207#ifdef CONFIG_IP_VS_IPV6
159 skb->len - tcphoff, 208 if (cp->af == AF_INET6)
160 cp->protocol, skb->csum); 209 tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
210 &cp->caddr.in6,
211 skb->len - tcphoff,
212 cp->protocol, skb->csum);
213 else
214#endif
215 tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
216 cp->caddr.ip,
217 skb->len - tcphoff,
218 cp->protocol,
219 skb->csum);
220
161 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", 221 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
162 pp->name, tcph->check, 222 pp->name, tcph->check,
163 (char*)&(tcph->check) - (char*)tcph); 223 (char*)&(tcph->check) - (char*)tcph);
@@ -171,7 +231,16 @@ tcp_dnat_handler(struct sk_buff *skb,
171 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 231 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
172{ 232{
173 struct tcphdr *tcph; 233 struct tcphdr *tcph;
174 const unsigned int tcphoff = ip_hdrlen(skb); 234 unsigned int tcphoff;
235 int oldlen;
236
237#ifdef CONFIG_IP_VS_IPV6
238 if (cp->af == AF_INET6)
239 tcphoff = sizeof(struct ipv6hdr);
240 else
241#endif
242 tcphoff = ip_hdrlen(skb);
243 oldlen = skb->len - tcphoff;
175 244
176 /* csum_check requires unshared skb */ 245 /* csum_check requires unshared skb */
177 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) 246 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -179,7 +248,7 @@ tcp_dnat_handler(struct sk_buff *skb,
179 248
180 if (unlikely(cp->app != NULL)) { 249 if (unlikely(cp->app != NULL)) {
181 /* Some checks before mangling */ 250 /* Some checks before mangling */
182 if (pp->csum_check && !pp->csum_check(skb, pp)) 251 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
183 return 0; 252 return 0;
184 253
185 /* 254 /*
@@ -190,15 +259,19 @@ tcp_dnat_handler(struct sk_buff *skb,
190 return 0; 259 return 0;
191 } 260 }
192 261
193 tcph = (void *)ip_hdr(skb) + tcphoff; 262 tcph = (void *)skb_network_header(skb) + tcphoff;
194 tcph->dest = cp->dport; 263 tcph->dest = cp->dport;
195 264
196 /* 265 /*
197 * Adjust TCP checksums 266 * Adjust TCP checksums
198 */ 267 */
199 if (!cp->app) { 268 if (skb->ip_summed == CHECKSUM_PARTIAL) {
269 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
270 htonl(oldlen),
271 htonl(skb->len - tcphoff));
272 } else if (!cp->app) {
200 /* Only port and addr are changed, do fast csum update */ 273 /* Only port and addr are changed, do fast csum update */
201 tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, 274 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
202 cp->vport, cp->dport); 275 cp->vport, cp->dport);
203 if (skb->ip_summed == CHECKSUM_COMPLETE) 276 if (skb->ip_summed == CHECKSUM_COMPLETE)
204 skb->ip_summed = CHECKSUM_NONE; 277 skb->ip_summed = CHECKSUM_NONE;
@@ -206,9 +279,19 @@ tcp_dnat_handler(struct sk_buff *skb,
206 /* full checksum calculation */ 279 /* full checksum calculation */
207 tcph->check = 0; 280 tcph->check = 0;
208 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 281 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
209 tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, 282#ifdef CONFIG_IP_VS_IPV6
210 skb->len - tcphoff, 283 if (cp->af == AF_INET6)
211 cp->protocol, skb->csum); 284 tcph->check = csum_ipv6_magic(&cp->caddr.in6,
285 &cp->daddr.in6,
286 skb->len - tcphoff,
287 cp->protocol, skb->csum);
288 else
289#endif
290 tcph->check = csum_tcpudp_magic(cp->caddr.ip,
291 cp->daddr.ip,
292 skb->len - tcphoff,
293 cp->protocol,
294 skb->csum);
212 skb->ip_summed = CHECKSUM_UNNECESSARY; 295 skb->ip_summed = CHECKSUM_UNNECESSARY;
213 } 296 }
214 return 1; 297 return 1;
@@ -216,21 +299,43 @@ tcp_dnat_handler(struct sk_buff *skb,
216 299
217 300
218static int 301static int
219tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) 302tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
220{ 303{
221 const unsigned int tcphoff = ip_hdrlen(skb); 304 unsigned int tcphoff;
305
306#ifdef CONFIG_IP_VS_IPV6
307 if (af == AF_INET6)
308 tcphoff = sizeof(struct ipv6hdr);
309 else
310#endif
311 tcphoff = ip_hdrlen(skb);
222 312
223 switch (skb->ip_summed) { 313 switch (skb->ip_summed) {
224 case CHECKSUM_NONE: 314 case CHECKSUM_NONE:
225 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 315 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
226 case CHECKSUM_COMPLETE: 316 case CHECKSUM_COMPLETE:
227 if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 317#ifdef CONFIG_IP_VS_IPV6
228 skb->len - tcphoff, 318 if (af == AF_INET6) {
229 ip_hdr(skb)->protocol, skb->csum)) { 319 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
230 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 320 &ipv6_hdr(skb)->daddr,
231 "Failed checksum for"); 321 skb->len - tcphoff,
232 return 0; 322 ipv6_hdr(skb)->nexthdr,
233 } 323 skb->csum)) {
324 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
325 "Failed checksum for");
326 return 0;
327 }
328 } else
329#endif
330 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
331 ip_hdr(skb)->daddr,
332 skb->len - tcphoff,
333 ip_hdr(skb)->protocol,
334 skb->csum)) {
335 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
336 "Failed checksum for");
337 return 0;
338 }
234 break; 339 break;
235 default: 340 default:
236 /* No need to checksum. */ 341 /* No need to checksum. */
@@ -419,19 +524,23 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
419 if (new_state != cp->state) { 524 if (new_state != cp->state) {
420 struct ip_vs_dest *dest = cp->dest; 525 struct ip_vs_dest *dest = cp->dest;
421 526
422 IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->" 527 IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
423 "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n", 528 "%s:%d state: %s->%s conn->refcnt:%d\n",
424 pp->name, 529 pp->name,
425 (state_off==TCP_DIR_OUTPUT)?"output ":"input ", 530 ((state_off == TCP_DIR_OUTPUT) ?
426 th->syn? 'S' : '.', 531 "output " : "input "),
427 th->fin? 'F' : '.', 532 th->syn ? 'S' : '.',
428 th->ack? 'A' : '.', 533 th->fin ? 'F' : '.',
429 th->rst? 'R' : '.', 534 th->ack ? 'A' : '.',
430 NIPQUAD(cp->daddr), ntohs(cp->dport), 535 th->rst ? 'R' : '.',
431 NIPQUAD(cp->caddr), ntohs(cp->cport), 536 IP_VS_DBG_ADDR(cp->af, &cp->daddr),
432 tcp_state_name(cp->state), 537 ntohs(cp->dport),
433 tcp_state_name(new_state), 538 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
434 atomic_read(&cp->refcnt)); 539 ntohs(cp->cport),
540 tcp_state_name(cp->state),
541 tcp_state_name(new_state),
542 atomic_read(&cp->refcnt));
543
435 if (dest) { 544 if (dest) {
436 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && 545 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
437 (new_state != IP_VS_TCP_S_ESTABLISHED)) { 546 (new_state != IP_VS_TCP_S_ESTABLISHED)) {
@@ -461,7 +570,13 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
461{ 570{
462 struct tcphdr _tcph, *th; 571 struct tcphdr _tcph, *th;
463 572
464 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 573#ifdef CONFIG_IP_VS_IPV6
574 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
575#else
576 int ihl = ip_hdrlen(skb);
577#endif
578
579 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
465 if (th == NULL) 580 if (th == NULL)
466 return 0; 581 return 0;
467 582
@@ -546,12 +661,15 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
546 break; 661 break;
547 spin_unlock(&tcp_app_lock); 662 spin_unlock(&tcp_app_lock);
548 663
549 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 664 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
550 "%u.%u.%u.%u:%u to app %s on port %u\n", 665 "%s:%u to app %s on port %u\n",
551 __func__, 666 __func__,
552 NIPQUAD(cp->caddr), ntohs(cp->cport), 667 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
553 NIPQUAD(cp->vaddr), ntohs(cp->vport), 668 ntohs(cp->cport),
554 inc->name, ntohs(inc->port)); 669 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
670 ntohs(cp->vport),
671 inc->name, ntohs(inc->port));
672
555 cp->app = inc; 673 cp->app = inc;
556 if (inc->init_conn) 674 if (inc->init_conn)
557 result = inc->init_conn(inc, cp); 675 result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index c6be5d56823f..6eb6039d6343 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -22,10 +22,12 @@
22 22
23#include <net/ip_vs.h> 23#include <net/ip_vs.h>
24#include <net/ip.h> 24#include <net/ip.h>
25#include <net/ip6_checksum.h>
25 26
26static struct ip_vs_conn * 27static struct ip_vs_conn *
27udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 28udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
28 const struct iphdr *iph, unsigned int proto_off, int inverse) 29 const struct ip_vs_iphdr *iph, unsigned int proto_off,
30 int inverse)
29{ 31{
30 struct ip_vs_conn *cp; 32 struct ip_vs_conn *cp;
31 __be16 _ports[2], *pptr; 33 __be16 _ports[2], *pptr;
@@ -35,13 +37,13 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
35 return NULL; 37 return NULL;
36 38
37 if (likely(!inverse)) { 39 if (likely(!inverse)) {
38 cp = ip_vs_conn_in_get(iph->protocol, 40 cp = ip_vs_conn_in_get(af, iph->protocol,
39 iph->saddr, pptr[0], 41 &iph->saddr, pptr[0],
40 iph->daddr, pptr[1]); 42 &iph->daddr, pptr[1]);
41 } else { 43 } else {
42 cp = ip_vs_conn_in_get(iph->protocol, 44 cp = ip_vs_conn_in_get(af, iph->protocol,
43 iph->daddr, pptr[1], 45 &iph->daddr, pptr[1],
44 iph->saddr, pptr[0]); 46 &iph->saddr, pptr[0]);
45 } 47 }
46 48
47 return cp; 49 return cp;
@@ -49,25 +51,25 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
49 51
50 52
51static struct ip_vs_conn * 53static struct ip_vs_conn *
52udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 54udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
53 const struct iphdr *iph, unsigned int proto_off, int inverse) 55 const struct ip_vs_iphdr *iph, unsigned int proto_off,
56 int inverse)
54{ 57{
55 struct ip_vs_conn *cp; 58 struct ip_vs_conn *cp;
56 __be16 _ports[2], *pptr; 59 __be16 _ports[2], *pptr;
57 60
58 pptr = skb_header_pointer(skb, ip_hdrlen(skb), 61 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
59 sizeof(_ports), _ports);
60 if (pptr == NULL) 62 if (pptr == NULL)
61 return NULL; 63 return NULL;
62 64
63 if (likely(!inverse)) { 65 if (likely(!inverse)) {
64 cp = ip_vs_conn_out_get(iph->protocol, 66 cp = ip_vs_conn_out_get(af, iph->protocol,
65 iph->saddr, pptr[0], 67 &iph->saddr, pptr[0],
66 iph->daddr, pptr[1]); 68 &iph->daddr, pptr[1]);
67 } else { 69 } else {
68 cp = ip_vs_conn_out_get(iph->protocol, 70 cp = ip_vs_conn_out_get(af, iph->protocol,
69 iph->daddr, pptr[1], 71 &iph->daddr, pptr[1],
70 iph->saddr, pptr[0]); 72 &iph->saddr, pptr[0]);
71 } 73 }
72 74
73 return cp; 75 return cp;
@@ -75,21 +77,24 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
75 77
76 78
77static int 79static int
78udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, 80udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
79 int *verdict, struct ip_vs_conn **cpp) 81 int *verdict, struct ip_vs_conn **cpp)
80{ 82{
81 struct ip_vs_service *svc; 83 struct ip_vs_service *svc;
82 struct udphdr _udph, *uh; 84 struct udphdr _udph, *uh;
85 struct ip_vs_iphdr iph;
86
87 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
83 88
84 uh = skb_header_pointer(skb, ip_hdrlen(skb), 89 uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
85 sizeof(_udph), &_udph);
86 if (uh == NULL) { 90 if (uh == NULL) {
87 *verdict = NF_DROP; 91 *verdict = NF_DROP;
88 return 0; 92 return 0;
89 } 93 }
90 94
91 if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, 95 svc = ip_vs_service_get(af, skb->mark, iph.protocol,
92 ip_hdr(skb)->daddr, uh->dest))) { 96 &iph.daddr, uh->dest);
97 if (svc) {
93 if (ip_vs_todrop()) { 98 if (ip_vs_todrop()) {
94 /* 99 /*
95 * It seems that we are very loaded. 100 * It seems that we are very loaded.
@@ -116,23 +121,63 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
116 121
117 122
118static inline void 123static inline void
119udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, 124udp_fast_csum_update(int af, struct udphdr *uhdr,
125 const union nf_inet_addr *oldip,
126 const union nf_inet_addr *newip,
120 __be16 oldport, __be16 newport) 127 __be16 oldport, __be16 newport)
121{ 128{
122 uhdr->check = 129#ifdef CONFIG_IP_VS_IPV6
123 csum_fold(ip_vs_check_diff4(oldip, newip, 130 if (af == AF_INET6)
124 ip_vs_check_diff2(oldport, newport, 131 uhdr->check =
125 ~csum_unfold(uhdr->check)))); 132 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
133 ip_vs_check_diff2(oldport, newport,
134 ~csum_unfold(uhdr->check))));
135 else
136#endif
137 uhdr->check =
138 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
139 ip_vs_check_diff2(oldport, newport,
140 ~csum_unfold(uhdr->check))));
126 if (!uhdr->check) 141 if (!uhdr->check)
127 uhdr->check = CSUM_MANGLED_0; 142 uhdr->check = CSUM_MANGLED_0;
128} 143}
129 144
145static inline void
146udp_partial_csum_update(int af, struct udphdr *uhdr,
147 const union nf_inet_addr *oldip,
148 const union nf_inet_addr *newip,
149 __be16 oldlen, __be16 newlen)
150{
151#ifdef CONFIG_IP_VS_IPV6
152 if (af == AF_INET6)
153 uhdr->check =
154 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
155 ip_vs_check_diff2(oldlen, newlen,
156 ~csum_unfold(uhdr->check))));
157 else
158#endif
159 uhdr->check =
160 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
161 ip_vs_check_diff2(oldlen, newlen,
162 ~csum_unfold(uhdr->check))));
163}
164
165
130static int 166static int
131udp_snat_handler(struct sk_buff *skb, 167udp_snat_handler(struct sk_buff *skb,
132 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 168 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
133{ 169{
134 struct udphdr *udph; 170 struct udphdr *udph;
135 const unsigned int udphoff = ip_hdrlen(skb); 171 unsigned int udphoff;
172 int oldlen;
173
174#ifdef CONFIG_IP_VS_IPV6
175 if (cp->af == AF_INET6)
176 udphoff = sizeof(struct ipv6hdr);
177 else
178#endif
179 udphoff = ip_hdrlen(skb);
180 oldlen = skb->len - udphoff;
136 181
137 /* csum_check requires unshared skb */ 182 /* csum_check requires unshared skb */
138 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) 183 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -140,7 +185,7 @@ udp_snat_handler(struct sk_buff *skb,
140 185
141 if (unlikely(cp->app != NULL)) { 186 if (unlikely(cp->app != NULL)) {
142 /* Some checks before mangling */ 187 /* Some checks before mangling */
143 if (pp->csum_check && !pp->csum_check(skb, pp)) 188 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
144 return 0; 189 return 0;
145 190
146 /* 191 /*
@@ -150,15 +195,19 @@ udp_snat_handler(struct sk_buff *skb,
150 return 0; 195 return 0;
151 } 196 }
152 197
153 udph = (void *)ip_hdr(skb) + udphoff; 198 udph = (void *)skb_network_header(skb) + udphoff;
154 udph->source = cp->vport; 199 udph->source = cp->vport;
155 200
156 /* 201 /*
157 * Adjust UDP checksums 202 * Adjust UDP checksums
158 */ 203 */
159 if (!cp->app && (udph->check != 0)) { 204 if (skb->ip_summed == CHECKSUM_PARTIAL) {
205 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
206 htonl(oldlen),
207 htonl(skb->len - udphoff));
208 } else if (!cp->app && (udph->check != 0)) {
160 /* Only port and addr are changed, do fast csum update */ 209 /* Only port and addr are changed, do fast csum update */
161 udp_fast_csum_update(udph, cp->daddr, cp->vaddr, 210 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
162 cp->dport, cp->vport); 211 cp->dport, cp->vport);
163 if (skb->ip_summed == CHECKSUM_COMPLETE) 212 if (skb->ip_summed == CHECKSUM_COMPLETE)
164 skb->ip_summed = CHECKSUM_NONE; 213 skb->ip_summed = CHECKSUM_NONE;
@@ -166,9 +215,19 @@ udp_snat_handler(struct sk_buff *skb,
166 /* full checksum calculation */ 215 /* full checksum calculation */
167 udph->check = 0; 216 udph->check = 0;
168 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); 217 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
169 udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, 218#ifdef CONFIG_IP_VS_IPV6
170 skb->len - udphoff, 219 if (cp->af == AF_INET6)
171 cp->protocol, skb->csum); 220 udph->check = csum_ipv6_magic(&cp->vaddr.in6,
221 &cp->caddr.in6,
222 skb->len - udphoff,
223 cp->protocol, skb->csum);
224 else
225#endif
226 udph->check = csum_tcpudp_magic(cp->vaddr.ip,
227 cp->caddr.ip,
228 skb->len - udphoff,
229 cp->protocol,
230 skb->csum);
172 if (udph->check == 0) 231 if (udph->check == 0)
173 udph->check = CSUM_MANGLED_0; 232 udph->check = CSUM_MANGLED_0;
174 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", 233 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
@@ -184,7 +243,16 @@ udp_dnat_handler(struct sk_buff *skb,
184 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 243 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
185{ 244{
186 struct udphdr *udph; 245 struct udphdr *udph;
187 unsigned int udphoff = ip_hdrlen(skb); 246 unsigned int udphoff;
247 int oldlen;
248
249#ifdef CONFIG_IP_VS_IPV6
250 if (cp->af == AF_INET6)
251 udphoff = sizeof(struct ipv6hdr);
252 else
253#endif
254 udphoff = ip_hdrlen(skb);
255 oldlen = skb->len - udphoff;
188 256
189 /* csum_check requires unshared skb */ 257 /* csum_check requires unshared skb */
190 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) 258 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -192,7 +260,7 @@ udp_dnat_handler(struct sk_buff *skb,
192 260
193 if (unlikely(cp->app != NULL)) { 261 if (unlikely(cp->app != NULL)) {
194 /* Some checks before mangling */ 262 /* Some checks before mangling */
195 if (pp->csum_check && !pp->csum_check(skb, pp)) 263 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
196 return 0; 264 return 0;
197 265
198 /* 266 /*
@@ -203,15 +271,19 @@ udp_dnat_handler(struct sk_buff *skb,
203 return 0; 271 return 0;
204 } 272 }
205 273
206 udph = (void *)ip_hdr(skb) + udphoff; 274 udph = (void *)skb_network_header(skb) + udphoff;
207 udph->dest = cp->dport; 275 udph->dest = cp->dport;
208 276
209 /* 277 /*
210 * Adjust UDP checksums 278 * Adjust UDP checksums
211 */ 279 */
212 if (!cp->app && (udph->check != 0)) { 280 if (skb->ip_summed == CHECKSUM_PARTIAL) {
281 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
282 htonl(oldlen),
283 htonl(skb->len - udphoff));
284 } else if (!cp->app && (udph->check != 0)) {
213 /* Only port and addr are changed, do fast csum update */ 285 /* Only port and addr are changed, do fast csum update */
214 udp_fast_csum_update(udph, cp->vaddr, cp->daddr, 286 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
215 cp->vport, cp->dport); 287 cp->vport, cp->dport);
216 if (skb->ip_summed == CHECKSUM_COMPLETE) 288 if (skb->ip_summed == CHECKSUM_COMPLETE)
217 skb->ip_summed = CHECKSUM_NONE; 289 skb->ip_summed = CHECKSUM_NONE;
@@ -219,9 +291,19 @@ udp_dnat_handler(struct sk_buff *skb,
219 /* full checksum calculation */ 291 /* full checksum calculation */
220 udph->check = 0; 292 udph->check = 0;
221 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); 293 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
222 udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, 294#ifdef CONFIG_IP_VS_IPV6
223 skb->len - udphoff, 295 if (cp->af == AF_INET6)
224 cp->protocol, skb->csum); 296 udph->check = csum_ipv6_magic(&cp->caddr.in6,
297 &cp->daddr.in6,
298 skb->len - udphoff,
299 cp->protocol, skb->csum);
300 else
301#endif
302 udph->check = csum_tcpudp_magic(cp->caddr.ip,
303 cp->daddr.ip,
304 skb->len - udphoff,
305 cp->protocol,
306 skb->csum);
225 if (udph->check == 0) 307 if (udph->check == 0)
226 udph->check = CSUM_MANGLED_0; 308 udph->check = CSUM_MANGLED_0;
227 skb->ip_summed = CHECKSUM_UNNECESSARY; 309 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -231,10 +313,17 @@ udp_dnat_handler(struct sk_buff *skb,
231 313
232 314
233static int 315static int
234udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) 316udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
235{ 317{
236 struct udphdr _udph, *uh; 318 struct udphdr _udph, *uh;
237 const unsigned int udphoff = ip_hdrlen(skb); 319 unsigned int udphoff;
320
321#ifdef CONFIG_IP_VS_IPV6
322 if (af == AF_INET6)
323 udphoff = sizeof(struct ipv6hdr);
324 else
325#endif
326 udphoff = ip_hdrlen(skb);
238 327
239 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); 328 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
240 if (uh == NULL) 329 if (uh == NULL)
@@ -246,15 +335,28 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
246 skb->csum = skb_checksum(skb, udphoff, 335 skb->csum = skb_checksum(skb, udphoff,
247 skb->len - udphoff, 0); 336 skb->len - udphoff, 0);
248 case CHECKSUM_COMPLETE: 337 case CHECKSUM_COMPLETE:
249 if (csum_tcpudp_magic(ip_hdr(skb)->saddr, 338#ifdef CONFIG_IP_VS_IPV6
250 ip_hdr(skb)->daddr, 339 if (af == AF_INET6) {
251 skb->len - udphoff, 340 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
252 ip_hdr(skb)->protocol, 341 &ipv6_hdr(skb)->daddr,
253 skb->csum)) { 342 skb->len - udphoff,
254 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 343 ipv6_hdr(skb)->nexthdr,
255 "Failed checksum for"); 344 skb->csum)) {
256 return 0; 345 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
257 } 346 "Failed checksum for");
347 return 0;
348 }
349 } else
350#endif
351 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
352 ip_hdr(skb)->daddr,
353 skb->len - udphoff,
354 ip_hdr(skb)->protocol,
355 skb->csum)) {
356 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
357 "Failed checksum for");
358 return 0;
359 }
258 break; 360 break;
259 default: 361 default:
260 /* No need to checksum. */ 362 /* No need to checksum. */
@@ -340,12 +442,15 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
340 break; 442 break;
341 spin_unlock(&udp_app_lock); 443 spin_unlock(&udp_app_lock);
342 444
343 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 445 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
344 "%u.%u.%u.%u:%u to app %s on port %u\n", 446 "%s:%u to app %s on port %u\n",
345 __func__, 447 __func__,
346 NIPQUAD(cp->caddr), ntohs(cp->cport), 448 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
347 NIPQUAD(cp->vaddr), ntohs(cp->vport), 449 ntohs(cp->cport),
348 inc->name, ntohs(inc->port)); 450 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
451 ntohs(cp->vport),
452 inc->name, ntohs(inc->port));
453
349 cp->app = inc; 454 cp->app = inc;
350 if (inc->init_conn) 455 if (inc->init_conn)
351 result = inc->init_conn(inc, cp); 456 result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 358110d17e59..a22195f68ac4 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
32} 32}
33 33
34 34
35static int ip_vs_rr_done_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static int ip_vs_rr_update_svc(struct ip_vs_service *svc) 35static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
42{ 36{
43 svc->sched_data = &svc->destinations; 37 svc->sched_data = &svc->destinations;
@@ -80,11 +74,11 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
80 out: 74 out:
81 svc->sched_data = q; 75 svc->sched_data = q;
82 write_unlock(&svc->sched_lock); 76 write_unlock(&svc->sched_lock);
83 IP_VS_DBG(6, "RR: server %u.%u.%u.%u:%u " 77 IP_VS_DBG_BUF(6, "RR: server %s:%u "
84 "activeconns %d refcnt %d weight %d\n", 78 "activeconns %d refcnt %d weight %d\n",
85 NIPQUAD(dest->addr), ntohs(dest->port), 79 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
86 atomic_read(&dest->activeconns), 80 atomic_read(&dest->activeconns),
87 atomic_read(&dest->refcnt), atomic_read(&dest->weight)); 81 atomic_read(&dest->refcnt), atomic_read(&dest->weight));
88 82
89 return dest; 83 return dest;
90} 84}
@@ -95,8 +89,10 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
95 .refcnt = ATOMIC_INIT(0), 89 .refcnt = ATOMIC_INIT(0),
96 .module = THIS_MODULE, 90 .module = THIS_MODULE,
97 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), 91 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
92#ifdef CONFIG_IP_VS_IPV6
93 .supports_ipv6 = 1,
94#endif
98 .init_service = ip_vs_rr_init_svc, 95 .init_service = ip_vs_rr_init_svc,
99 .done_service = ip_vs_rr_done_svc,
100 .update_service = ip_vs_rr_update_svc, 96 .update_service = ip_vs_rr_update_svc,
101 .schedule = ip_vs_rr_schedule, 97 .schedule = ip_vs_rr_schedule,
102}; 98};
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 77663d84cbd1..7d2f22f04b83 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -41,27 +41,6 @@
41#include <net/ip_vs.h> 41#include <net/ip_vs.h>
42 42
43 43
44static int
45ip_vs_sed_init_svc(struct ip_vs_service *svc)
46{
47 return 0;
48}
49
50
51static int
52ip_vs_sed_done_svc(struct ip_vs_service *svc)
53{
54 return 0;
55}
56
57
58static int
59ip_vs_sed_update_svc(struct ip_vs_service *svc)
60{
61 return 0;
62}
63
64
65static inline unsigned int 44static inline unsigned int
66ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 45ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
67{ 46{
@@ -122,12 +101,12 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
122 } 101 }
123 } 102 }
124 103
125 IP_VS_DBG(6, "SED: server %u.%u.%u.%u:%u " 104 IP_VS_DBG_BUF(6, "SED: server %s:%u "
126 "activeconns %d refcnt %d weight %d overhead %d\n", 105 "activeconns %d refcnt %d weight %d overhead %d\n",
127 NIPQUAD(least->addr), ntohs(least->port), 106 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
128 atomic_read(&least->activeconns), 107 atomic_read(&least->activeconns),
129 atomic_read(&least->refcnt), 108 atomic_read(&least->refcnt),
130 atomic_read(&least->weight), loh); 109 atomic_read(&least->weight), loh);
131 110
132 return least; 111 return least;
133} 112}
@@ -139,9 +118,9 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
139 .refcnt = ATOMIC_INIT(0), 118 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE, 119 .module = THIS_MODULE,
141 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), 120 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
142 .init_service = ip_vs_sed_init_svc, 121#ifdef CONFIG_IP_VS_IPV6
143 .done_service = ip_vs_sed_done_svc, 122 .supports_ipv6 = 1,
144 .update_service = ip_vs_sed_update_svc, 123#endif
145 .schedule = ip_vs_sed_schedule, 124 .schedule = ip_vs_sed_schedule,
146}; 125};
147 126
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 7b979e228056..1d96de27fefd 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -215,7 +215,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
215 IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " 215 IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u "
216 "--> server %u.%u.%u.%u:%d\n", 216 "--> server %u.%u.%u.%u:%d\n",
217 NIPQUAD(iph->saddr), 217 NIPQUAD(iph->saddr),
218 NIPQUAD(dest->addr), 218 NIPQUAD(dest->addr.ip),
219 ntohs(dest->port)); 219 ntohs(dest->port));
220 220
221 return dest; 221 return dest;
@@ -231,6 +231,9 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
231 .refcnt = ATOMIC_INIT(0), 231 .refcnt = ATOMIC_INIT(0),
232 .module = THIS_MODULE, 232 .module = THIS_MODULE,
233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), 233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
234#ifdef CONFIG_IP_VS_IPV6
235 .supports_ipv6 = 0,
236#endif
234 .init_service = ip_vs_sh_init_svc, 237 .init_service = ip_vs_sh_init_svc,
235 .done_service = ip_vs_sh_done_svc, 238 .done_service = ip_vs_sh_done_svc,
236 .update_service = ip_vs_sh_update_svc, 239 .update_service = ip_vs_sh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index a652da2c3200..28237a5f62e2 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -256,9 +256,9 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
256 s->cport = cp->cport; 256 s->cport = cp->cport;
257 s->vport = cp->vport; 257 s->vport = cp->vport;
258 s->dport = cp->dport; 258 s->dport = cp->dport;
259 s->caddr = cp->caddr; 259 s->caddr = cp->caddr.ip;
260 s->vaddr = cp->vaddr; 260 s->vaddr = cp->vaddr.ip;
261 s->daddr = cp->daddr; 261 s->daddr = cp->daddr.ip;
262 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); 262 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
263 s->state = htons(cp->state); 263 s->state = htons(cp->state);
264 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { 264 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
@@ -366,21 +366,28 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
366 } 366 }
367 367
368 if (!(flags & IP_VS_CONN_F_TEMPLATE)) 368 if (!(flags & IP_VS_CONN_F_TEMPLATE))
369 cp = ip_vs_conn_in_get(s->protocol, 369 cp = ip_vs_conn_in_get(AF_INET, s->protocol,
370 s->caddr, s->cport, 370 (union nf_inet_addr *)&s->caddr,
371 s->vaddr, s->vport); 371 s->cport,
372 (union nf_inet_addr *)&s->vaddr,
373 s->vport);
372 else 374 else
373 cp = ip_vs_ct_in_get(s->protocol, 375 cp = ip_vs_ct_in_get(AF_INET, s->protocol,
374 s->caddr, s->cport, 376 (union nf_inet_addr *)&s->caddr,
375 s->vaddr, s->vport); 377 s->cport,
378 (union nf_inet_addr *)&s->vaddr,
379 s->vport);
376 if (!cp) { 380 if (!cp) {
377 /* 381 /*
378 * Find the appropriate destination for the connection. 382 * Find the appropriate destination for the connection.
379 * If it is not found the connection will remain unbound 383 * If it is not found the connection will remain unbound
380 * but still handled. 384 * but still handled.
381 */ 385 */
382 dest = ip_vs_find_dest(s->daddr, s->dport, 386 dest = ip_vs_find_dest(AF_INET,
383 s->vaddr, s->vport, 387 (union nf_inet_addr *)&s->daddr,
388 s->dport,
389 (union nf_inet_addr *)&s->vaddr,
390 s->vport,
384 s->protocol); 391 s->protocol);
385 /* Set the approprite ativity flag */ 392 /* Set the approprite ativity flag */
386 if (s->protocol == IPPROTO_TCP) { 393 if (s->protocol == IPPROTO_TCP) {
@@ -389,10 +396,13 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
389 else 396 else
390 flags &= ~IP_VS_CONN_F_INACTIVE; 397 flags &= ~IP_VS_CONN_F_INACTIVE;
391 } 398 }
392 cp = ip_vs_conn_new(s->protocol, 399 cp = ip_vs_conn_new(AF_INET, s->protocol,
393 s->caddr, s->cport, 400 (union nf_inet_addr *)&s->caddr,
394 s->vaddr, s->vport, 401 s->cport,
395 s->daddr, s->dport, 402 (union nf_inet_addr *)&s->vaddr,
403 s->vport,
404 (union nf_inet_addr *)&s->daddr,
405 s->dport,
396 flags, dest); 406 flags, dest);
397 if (dest) 407 if (dest)
398 atomic_dec(&dest->refcnt); 408 atomic_dec(&dest->refcnt);
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 9b0ef86bb1f7..8c596e712599 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -25,27 +25,6 @@
25#include <net/ip_vs.h> 25#include <net/ip_vs.h>
26 26
27 27
28static int
29ip_vs_wlc_init_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int
36ip_vs_wlc_done_svc(struct ip_vs_service *svc)
37{
38 return 0;
39}
40
41
42static int
43ip_vs_wlc_update_svc(struct ip_vs_service *svc)
44{
45 return 0;
46}
47
48
49static inline unsigned int 28static inline unsigned int
50ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) 29ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
51{ 30{
@@ -110,12 +89,12 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
110 } 89 }
111 } 90 }
112 91
113 IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u " 92 IP_VS_DBG_BUF(6, "WLC: server %s:%u "
114 "activeconns %d refcnt %d weight %d overhead %d\n", 93 "activeconns %d refcnt %d weight %d overhead %d\n",
115 NIPQUAD(least->addr), ntohs(least->port), 94 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
116 atomic_read(&least->activeconns), 95 atomic_read(&least->activeconns),
117 atomic_read(&least->refcnt), 96 atomic_read(&least->refcnt),
118 atomic_read(&least->weight), loh); 97 atomic_read(&least->weight), loh);
119 98
120 return least; 99 return least;
121} 100}
@@ -127,9 +106,9 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
127 .refcnt = ATOMIC_INIT(0), 106 .refcnt = ATOMIC_INIT(0),
128 .module = THIS_MODULE, 107 .module = THIS_MODULE,
129 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), 108 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
130 .init_service = ip_vs_wlc_init_svc, 109#ifdef CONFIG_IP_VS_IPV6
131 .done_service = ip_vs_wlc_done_svc, 110 .supports_ipv6 = 1,
132 .update_service = ip_vs_wlc_update_svc, 111#endif
133 .schedule = ip_vs_wlc_schedule, 112 .schedule = ip_vs_wlc_schedule,
134}; 113};
135 114
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 0d86a79b87b5..7ea92fed50bf 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -195,12 +195,12 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
195 } 195 }
196 } 196 }
197 197
198 IP_VS_DBG(6, "WRR: server %u.%u.%u.%u:%u " 198 IP_VS_DBG_BUF(6, "WRR: server %s:%u "
199 "activeconns %d refcnt %d weight %d\n", 199 "activeconns %d refcnt %d weight %d\n",
200 NIPQUAD(dest->addr), ntohs(dest->port), 200 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
201 atomic_read(&dest->activeconns), 201 atomic_read(&dest->activeconns),
202 atomic_read(&dest->refcnt), 202 atomic_read(&dest->refcnt),
203 atomic_read(&dest->weight)); 203 atomic_read(&dest->weight));
204 204
205 out: 205 out:
206 write_unlock(&svc->sched_lock); 206 write_unlock(&svc->sched_lock);
@@ -213,6 +213,9 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
213 .refcnt = ATOMIC_INIT(0), 213 .refcnt = ATOMIC_INIT(0),
214 .module = THIS_MODULE, 214 .module = THIS_MODULE,
215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), 215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
216#ifdef CONFIG_IP_VS_IPV6
217 .supports_ipv6 = 1,
218#endif
216 .init_service = ip_vs_wrr_init_svc, 219 .init_service = ip_vs_wrr_init_svc,
217 .done_service = ip_vs_wrr_done_svc, 220 .done_service = ip_vs_wrr_done_svc,
218 .update_service = ip_vs_wrr_update_svc, 221 .update_service = ip_vs_wrr_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 9892d4aca42e..02ddc2b3ce2e 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -20,6 +20,9 @@
20#include <net/udp.h> 20#include <net/udp.h>
21#include <net/icmp.h> /* for icmp_send */ 21#include <net/icmp.h> /* for icmp_send */
22#include <net/route.h> /* for ip_route_output */ 22#include <net/route.h> /* for ip_route_output */
23#include <net/ipv6.h>
24#include <net/ip6_route.h>
25#include <linux/icmpv6.h>
23#include <linux/netfilter.h> 26#include <linux/netfilter.h>
24#include <linux/netfilter_ipv4.h> 27#include <linux/netfilter_ipv4.h>
25 28
@@ -47,7 +50,8 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
47 50
48 if (!dst) 51 if (!dst)
49 return NULL; 52 return NULL;
50 if ((dst->obsolete || rtos != dest->dst_rtos) && 53 if ((dst->obsolete
54 || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
51 dst->ops->check(dst, cookie) == NULL) { 55 dst->ops->check(dst, cookie) == NULL) {
52 dest->dst_cache = NULL; 56 dest->dst_cache = NULL;
53 dst_release(dst); 57 dst_release(dst);
@@ -71,7 +75,7 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
71 .oif = 0, 75 .oif = 0,
72 .nl_u = { 76 .nl_u = {
73 .ip4_u = { 77 .ip4_u = {
74 .daddr = dest->addr, 78 .daddr = dest->addr.ip,
75 .saddr = 0, 79 .saddr = 0,
76 .tos = rtos, } }, 80 .tos = rtos, } },
77 }; 81 };
@@ -80,12 +84,12 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
80 spin_unlock(&dest->dst_lock); 84 spin_unlock(&dest->dst_lock);
81 IP_VS_DBG_RL("ip_route_output error, " 85 IP_VS_DBG_RL("ip_route_output error, "
82 "dest: %u.%u.%u.%u\n", 86 "dest: %u.%u.%u.%u\n",
83 NIPQUAD(dest->addr)); 87 NIPQUAD(dest->addr.ip));
84 return NULL; 88 return NULL;
85 } 89 }
86 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); 90 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
87 IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", 91 IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n",
88 NIPQUAD(dest->addr), 92 NIPQUAD(dest->addr.ip),
89 atomic_read(&rt->u.dst.__refcnt), rtos); 93 atomic_read(&rt->u.dst.__refcnt), rtos);
90 } 94 }
91 spin_unlock(&dest->dst_lock); 95 spin_unlock(&dest->dst_lock);
@@ -94,14 +98,14 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
94 .oif = 0, 98 .oif = 0,
95 .nl_u = { 99 .nl_u = {
96 .ip4_u = { 100 .ip4_u = {
97 .daddr = cp->daddr, 101 .daddr = cp->daddr.ip,
98 .saddr = 0, 102 .saddr = 0,
99 .tos = rtos, } }, 103 .tos = rtos, } },
100 }; 104 };
101 105
102 if (ip_route_output_key(&init_net, &rt, &fl)) { 106 if (ip_route_output_key(&init_net, &rt, &fl)) {
103 IP_VS_DBG_RL("ip_route_output error, dest: " 107 IP_VS_DBG_RL("ip_route_output error, dest: "
104 "%u.%u.%u.%u\n", NIPQUAD(cp->daddr)); 108 "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip));
105 return NULL; 109 return NULL;
106 } 110 }
107 } 111 }
@@ -109,6 +113,70 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
109 return rt; 113 return rt;
110} 114}
111 115
116#ifdef CONFIG_IP_VS_IPV6
117static struct rt6_info *
118__ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
119{
120 struct rt6_info *rt; /* Route to the other host */
121 struct ip_vs_dest *dest = cp->dest;
122
123 if (dest) {
124 spin_lock(&dest->dst_lock);
125 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
126 if (!rt) {
127 struct flowi fl = {
128 .oif = 0,
129 .nl_u = {
130 .ip6_u = {
131 .daddr = dest->addr.in6,
132 .saddr = {
133 .s6_addr32 =
134 { 0, 0, 0, 0 },
135 },
136 },
137 },
138 };
139
140 rt = (struct rt6_info *)ip6_route_output(&init_net,
141 NULL, &fl);
142 if (!rt) {
143 spin_unlock(&dest->dst_lock);
144 IP_VS_DBG_RL("ip6_route_output error, "
145 "dest: " NIP6_FMT "\n",
146 NIP6(dest->addr.in6));
147 return NULL;
148 }
149 __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
150 IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n",
151 NIP6(dest->addr.in6),
152 atomic_read(&rt->u.dst.__refcnt));
153 }
154 spin_unlock(&dest->dst_lock);
155 } else {
156 struct flowi fl = {
157 .oif = 0,
158 .nl_u = {
159 .ip6_u = {
160 .daddr = cp->daddr.in6,
161 .saddr = {
162 .s6_addr32 = { 0, 0, 0, 0 },
163 },
164 },
165 },
166 };
167
168 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
169 if (!rt) {
170 IP_VS_DBG_RL("ip6_route_output error, dest: "
171 NIP6_FMT "\n", NIP6(cp->daddr.in6));
172 return NULL;
173 }
174 }
175
176 return rt;
177}
178#endif
179
112 180
113/* 181/*
114 * Release dest->dst_cache before a dest is removed 182 * Release dest->dst_cache before a dest is removed
@@ -123,11 +191,11 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
123 dst_release(old_dst); 191 dst_release(old_dst);
124} 192}
125 193
126#define IP_VS_XMIT(skb, rt) \ 194#define IP_VS_XMIT(pf, skb, rt) \
127do { \ 195do { \
128 (skb)->ipvs_property = 1; \ 196 (skb)->ipvs_property = 1; \
129 skb_forward_csum(skb); \ 197 skb_forward_csum(skb); \
130 NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, (skb), NULL, \ 198 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
131 (rt)->u.dst.dev, dst_output); \ 199 (rt)->u.dst.dev, dst_output); \
132} while (0) 200} while (0)
133 201
@@ -200,7 +268,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
200 /* Another hack: avoid icmp_send in ip_fragment */ 268 /* Another hack: avoid icmp_send in ip_fragment */
201 skb->local_df = 1; 269 skb->local_df = 1;
202 270
203 IP_VS_XMIT(skb, rt); 271 IP_VS_XMIT(PF_INET, skb, rt);
204 272
205 LeaveFunction(10); 273 LeaveFunction(10);
206 return NF_STOLEN; 274 return NF_STOLEN;
@@ -213,6 +281,70 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
213 return NF_STOLEN; 281 return NF_STOLEN;
214} 282}
215 283
284#ifdef CONFIG_IP_VS_IPV6
285int
286ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
287 struct ip_vs_protocol *pp)
288{
289 struct rt6_info *rt; /* Route to the other host */
290 struct ipv6hdr *iph = ipv6_hdr(skb);
291 int mtu;
292 struct flowi fl = {
293 .oif = 0,
294 .nl_u = {
295 .ip6_u = {
296 .daddr = iph->daddr,
297 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
298 };
299
300 EnterFunction(10);
301
302 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
303 if (!rt) {
304 IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, "
305 "dest: " NIP6_FMT "\n", NIP6(iph->daddr));
306 goto tx_error_icmp;
307 }
308
309 /* MTU checking */
310 mtu = dst_mtu(&rt->u.dst);
311 if (skb->len > mtu) {
312 dst_release(&rt->u.dst);
313 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
314 IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n");
315 goto tx_error;
316 }
317
318 /*
319 * Call ip_send_check because we are not sure it is called
320 * after ip_defrag. Is copy-on-write needed?
321 */
322 skb = skb_share_check(skb, GFP_ATOMIC);
323 if (unlikely(skb == NULL)) {
324 dst_release(&rt->u.dst);
325 return NF_STOLEN;
326 }
327
328 /* drop old route */
329 dst_release(skb->dst);
330 skb->dst = &rt->u.dst;
331
332 /* Another hack: avoid icmp_send in ip_fragment */
333 skb->local_df = 1;
334
335 IP_VS_XMIT(PF_INET6, skb, rt);
336
337 LeaveFunction(10);
338 return NF_STOLEN;
339
340 tx_error_icmp:
341 dst_link_failure(skb);
342 tx_error:
343 kfree_skb(skb);
344 LeaveFunction(10);
345 return NF_STOLEN;
346}
347#endif
216 348
217/* 349/*
218 * NAT transmitter (only for outside-to-inside nat forwarding) 350 * NAT transmitter (only for outside-to-inside nat forwarding)
@@ -264,7 +396,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
264 /* mangle the packet */ 396 /* mangle the packet */
265 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 397 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
266 goto tx_error; 398 goto tx_error;
267 ip_hdr(skb)->daddr = cp->daddr; 399 ip_hdr(skb)->daddr = cp->daddr.ip;
268 ip_send_check(ip_hdr(skb)); 400 ip_send_check(ip_hdr(skb));
269 401
270 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 402 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
@@ -276,7 +408,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
276 /* Another hack: avoid icmp_send in ip_fragment */ 408 /* Another hack: avoid icmp_send in ip_fragment */
277 skb->local_df = 1; 409 skb->local_df = 1;
278 410
279 IP_VS_XMIT(skb, rt); 411 IP_VS_XMIT(PF_INET, skb, rt);
280 412
281 LeaveFunction(10); 413 LeaveFunction(10);
282 return NF_STOLEN; 414 return NF_STOLEN;
@@ -292,6 +424,83 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
292 goto tx_error; 424 goto tx_error;
293} 425}
294 426
427#ifdef CONFIG_IP_VS_IPV6
428int
429ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
430 struct ip_vs_protocol *pp)
431{
432 struct rt6_info *rt; /* Route to the other host */
433 int mtu;
434
435 EnterFunction(10);
436
437 /* check if it is a connection of no-client-port */
438 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
439 __be16 _pt, *p;
440 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
441 sizeof(_pt), &_pt);
442 if (p == NULL)
443 goto tx_error;
444 ip_vs_conn_fill_cport(cp, *p);
445 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
446 }
447
448 rt = __ip_vs_get_out_rt_v6(cp);
449 if (!rt)
450 goto tx_error_icmp;
451
452 /* MTU checking */
453 mtu = dst_mtu(&rt->u.dst);
454 if (skb->len > mtu) {
455 dst_release(&rt->u.dst);
456 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
457 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
458 "ip_vs_nat_xmit_v6(): frag needed for");
459 goto tx_error;
460 }
461
462 /* copy-on-write the packet before mangling it */
463 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
464 goto tx_error_put;
465
466 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
467 goto tx_error_put;
468
469 /* drop old route */
470 dst_release(skb->dst);
471 skb->dst = &rt->u.dst;
472
473 /* mangle the packet */
474 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
475 goto tx_error;
476 ipv6_hdr(skb)->daddr = cp->daddr.in6;
477
478 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
479
480 /* FIXME: when application helper enlarges the packet and the length
481 is larger than the MTU of outgoing device, there will be still
482 MTU problem. */
483
484 /* Another hack: avoid icmp_send in ip_fragment */
485 skb->local_df = 1;
486
487 IP_VS_XMIT(PF_INET6, skb, rt);
488
489 LeaveFunction(10);
490 return NF_STOLEN;
491
492tx_error_icmp:
493 dst_link_failure(skb);
494tx_error:
495 LeaveFunction(10);
496 kfree_skb(skb);
497 return NF_STOLEN;
498tx_error_put:
499 dst_release(&rt->u.dst);
500 goto tx_error;
501}
502#endif
503
295 504
296/* 505/*
297 * IP Tunneling transmitter 506 * IP Tunneling transmitter
@@ -423,6 +632,112 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
423 return NF_STOLEN; 632 return NF_STOLEN;
424} 633}
425 634
635#ifdef CONFIG_IP_VS_IPV6
636int
637ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
638 struct ip_vs_protocol *pp)
639{
640 struct rt6_info *rt; /* Route to the other host */
641 struct net_device *tdev; /* Device to other host */
642 struct ipv6hdr *old_iph = ipv6_hdr(skb);
643 sk_buff_data_t old_transport_header = skb->transport_header;
644 struct ipv6hdr *iph; /* Our new IP header */
645 unsigned int max_headroom; /* The extra header space needed */
646 int mtu;
647
648 EnterFunction(10);
649
650 if (skb->protocol != htons(ETH_P_IPV6)) {
651 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, "
652 "ETH_P_IPV6: %d, skb protocol: %d\n",
653 htons(ETH_P_IPV6), skb->protocol);
654 goto tx_error;
655 }
656
657 rt = __ip_vs_get_out_rt_v6(cp);
658 if (!rt)
659 goto tx_error_icmp;
660
661 tdev = rt->u.dst.dev;
662
663 mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
664 /* TODO IPv6: do we need this check in IPv6? */
665 if (mtu < 1280) {
666 dst_release(&rt->u.dst);
667 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
668 goto tx_error;
669 }
670 if (skb->dst)
671 skb->dst->ops->update_pmtu(skb->dst, mtu);
672
673 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
674 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
675 dst_release(&rt->u.dst);
676 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n");
677 goto tx_error;
678 }
679
680 /*
681 * Okay, now see if we can stuff it in the buffer as-is.
682 */
683 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
684
685 if (skb_headroom(skb) < max_headroom
686 || skb_cloned(skb) || skb_shared(skb)) {
687 struct sk_buff *new_skb =
688 skb_realloc_headroom(skb, max_headroom);
689 if (!new_skb) {
690 dst_release(&rt->u.dst);
691 kfree_skb(skb);
692 IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n");
693 return NF_STOLEN;
694 }
695 kfree_skb(skb);
696 skb = new_skb;
697 old_iph = ipv6_hdr(skb);
698 }
699
700 skb->transport_header = old_transport_header;
701
702 skb_push(skb, sizeof(struct ipv6hdr));
703 skb_reset_network_header(skb);
704 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
705
706 /* drop old route */
707 dst_release(skb->dst);
708 skb->dst = &rt->u.dst;
709
710 /*
711 * Push down and install the IPIP header.
712 */
713 iph = ipv6_hdr(skb);
714 iph->version = 6;
715 iph->nexthdr = IPPROTO_IPV6;
716 iph->payload_len = old_iph->payload_len + sizeof(old_iph);
717 iph->priority = old_iph->priority;
718 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
719 iph->daddr = rt->rt6i_dst.addr;
720 iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */
721 iph->hop_limit = old_iph->hop_limit;
722
723 /* Another hack: avoid icmp_send in ip_fragment */
724 skb->local_df = 1;
725
726 ip6_local_out(skb);
727
728 LeaveFunction(10);
729
730 return NF_STOLEN;
731
732tx_error_icmp:
733 dst_link_failure(skb);
734tx_error:
735 kfree_skb(skb);
736 LeaveFunction(10);
737 return NF_STOLEN;
738}
739#endif
740
426 741
427/* 742/*
428 * Direct Routing transmitter 743 * Direct Routing transmitter
@@ -467,7 +782,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
467 /* Another hack: avoid icmp_send in ip_fragment */ 782 /* Another hack: avoid icmp_send in ip_fragment */
468 skb->local_df = 1; 783 skb->local_df = 1;
469 784
470 IP_VS_XMIT(skb, rt); 785 IP_VS_XMIT(PF_INET, skb, rt);
471 786
472 LeaveFunction(10); 787 LeaveFunction(10);
473 return NF_STOLEN; 788 return NF_STOLEN;
@@ -480,6 +795,60 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
480 return NF_STOLEN; 795 return NF_STOLEN;
481} 796}
482 797
798#ifdef CONFIG_IP_VS_IPV6
799int
800ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
801 struct ip_vs_protocol *pp)
802{
803 struct rt6_info *rt; /* Route to the other host */
804 int mtu;
805
806 EnterFunction(10);
807
808 rt = __ip_vs_get_out_rt_v6(cp);
809 if (!rt)
810 goto tx_error_icmp;
811
812 /* MTU checking */
813 mtu = dst_mtu(&rt->u.dst);
814 if (skb->len > mtu) {
815 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
816 dst_release(&rt->u.dst);
817 IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n");
818 goto tx_error;
819 }
820
821 /*
822 * Call ip_send_check because we are not sure it is called
823 * after ip_defrag. Is copy-on-write needed?
824 */
825 skb = skb_share_check(skb, GFP_ATOMIC);
826 if (unlikely(skb == NULL)) {
827 dst_release(&rt->u.dst);
828 return NF_STOLEN;
829 }
830
831 /* drop old route */
832 dst_release(skb->dst);
833 skb->dst = &rt->u.dst;
834
835 /* Another hack: avoid icmp_send in ip_fragment */
836 skb->local_df = 1;
837
838 IP_VS_XMIT(PF_INET6, skb, rt);
839
840 LeaveFunction(10);
841 return NF_STOLEN;
842
843tx_error_icmp:
844 dst_link_failure(skb);
845tx_error:
846 kfree_skb(skb);
847 LeaveFunction(10);
848 return NF_STOLEN;
849}
850#endif
851
483 852
484/* 853/*
485 * ICMP packet transmitter 854 * ICMP packet transmitter
@@ -540,7 +909,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
540 /* Another hack: avoid icmp_send in ip_fragment */ 909 /* Another hack: avoid icmp_send in ip_fragment */
541 skb->local_df = 1; 910 skb->local_df = 1;
542 911
543 IP_VS_XMIT(skb, rt); 912 IP_VS_XMIT(PF_INET, skb, rt);
544 913
545 rc = NF_STOLEN; 914 rc = NF_STOLEN;
546 goto out; 915 goto out;
@@ -557,3 +926,79 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
557 ip_rt_put(rt); 926 ip_rt_put(rt);
558 goto tx_error; 927 goto tx_error;
559} 928}
929
930#ifdef CONFIG_IP_VS_IPV6
931int
932ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
933 struct ip_vs_protocol *pp, int offset)
934{
935 struct rt6_info *rt; /* Route to the other host */
936 int mtu;
937 int rc;
938
939 EnterFunction(10);
940
941 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
942 forwarded directly here, because there is no need to
943 translate address/port back */
944 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
945 if (cp->packet_xmit)
946 rc = cp->packet_xmit(skb, cp, pp);
947 else
948 rc = NF_ACCEPT;
949 /* do not touch skb anymore */
950 atomic_inc(&cp->in_pkts);
951 goto out;
952 }
953
954 /*
955 * mangle and send the packet here (only for VS/NAT)
956 */
957
958 rt = __ip_vs_get_out_rt_v6(cp);
959 if (!rt)
960 goto tx_error_icmp;
961
962 /* MTU checking */
963 mtu = dst_mtu(&rt->u.dst);
964 if (skb->len > mtu) {
965 dst_release(&rt->u.dst);
966 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
967 IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
968 goto tx_error;
969 }
970
971 /* copy-on-write the packet before mangling it */
972 if (!skb_make_writable(skb, offset))
973 goto tx_error_put;
974
975 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
976 goto tx_error_put;
977
978 /* drop the old route when skb is not shared */
979 dst_release(skb->dst);
980 skb->dst = &rt->u.dst;
981
982 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
983
984 /* Another hack: avoid icmp_send in ip_fragment */
985 skb->local_df = 1;
986
987 IP_VS_XMIT(PF_INET6, skb, rt);
988
989 rc = NF_STOLEN;
990 goto out;
991
992tx_error_icmp:
993 dst_link_failure(skb);
994tx_error:
995 dev_kfree_skb(skb);
996 rc = NF_STOLEN;
997out:
998 LeaveFunction(10);
999 return rc;
1000tx_error_put:
1001 dst_release(&rt->u.dst);
1002 goto tx_error;
1003}
1004#endif
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ee5354c9aa1..f62187bb6d08 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -282,6 +282,8 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
282 struct rtable *r = NULL; 282 struct rtable *r = NULL;
283 283
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rt_hash_table[st->bucket].chain)
286 continue;
285 rcu_read_lock_bh(); 287 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 288 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 289 while (r) {
@@ -299,11 +301,14 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
299 struct rtable *r) 301 struct rtable *r)
300{ 302{
301 struct rt_cache_iter_state *st = seq->private; 303 struct rt_cache_iter_state *st = seq->private;
304
302 r = r->u.dst.rt_next; 305 r = r->u.dst.rt_next;
303 while (!r) { 306 while (!r) {
304 rcu_read_unlock_bh(); 307 rcu_read_unlock_bh();
305 if (--st->bucket < 0) 308 do {
306 break; 309 if (--st->bucket < 0)
310 return NULL;
311 } while (!rt_hash_table[st->bucket].chain);
307 rcu_read_lock_bh(); 312 rcu_read_lock_bh();
308 r = rt_hash_table[st->bucket].chain; 313 r = rt_hash_table[st->bucket].chain;
309 } 314 }
@@ -2840,7 +2845,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2840 if (s_h < 0) 2845 if (s_h < 0)
2841 s_h = 0; 2846 s_h = 0;
2842 s_idx = idx = cb->args[1]; 2847 s_idx = idx = cb->args[1];
2843 for (h = s_h; h <= rt_hash_mask; h++) { 2848 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2849 if (!rt_hash_table[h].chain)
2850 continue;
2844 rcu_read_lock_bh(); 2851 rcu_read_lock_bh();
2845 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2852 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2846 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2853 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
@@ -2859,7 +2866,6 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859 dst_release(xchg(&skb->dst, NULL)); 2866 dst_release(xchg(&skb->dst, NULL));
2860 } 2867 }
2861 rcu_read_unlock_bh(); 2868 rcu_read_unlock_bh();
2862 s_idx = 0;
2863 } 2869 }
2864 2870
2865done: 2871done:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2a96bd..3b76bce769dd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -979,6 +979,39 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
979 } 979 }
980} 980}
981 981
982/* This must be called before lost_out is incremented */
983static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
984{
985 if ((tp->retransmit_skb_hint == NULL) ||
986 before(TCP_SKB_CB(skb)->seq,
987 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
988 tp->retransmit_skb_hint = skb;
989
990 if (!tp->lost_out ||
991 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
992 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
993}
994
995static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
996{
997 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
998 tcp_verify_retransmit_hint(tp, skb);
999
1000 tp->lost_out += tcp_skb_pcount(skb);
1001 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1002 }
1003}
1004
1005void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
1006{
1007 tcp_verify_retransmit_hint(tp, skb);
1008
1009 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1010 tp->lost_out += tcp_skb_pcount(skb);
1011 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1012 }
1013}
1014
982/* This procedure tags the retransmission queue when SACKs arrive. 1015/* This procedure tags the retransmission queue when SACKs arrive.
983 * 1016 *
984 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 1017 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1155,13 +1188,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1155 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1188 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1156 tp->retrans_out -= tcp_skb_pcount(skb); 1189 tp->retrans_out -= tcp_skb_pcount(skb);
1157 1190
1158 /* clear lost hint */ 1191 tcp_skb_mark_lost_uncond_verify(tp, skb);
1159 tp->retransmit_skb_hint = NULL;
1160
1161 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1162 tp->lost_out += tcp_skb_pcount(skb);
1163 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1164 }
1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1192 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1166 } else { 1193 } else {
1167 if (before(ack_seq, new_low_seq)) 1194 if (before(ack_seq, new_low_seq))
@@ -1271,9 +1298,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1271 ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1298 ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1272 tp->lost_out -= tcp_skb_pcount(skb); 1299 tp->lost_out -= tcp_skb_pcount(skb);
1273 tp->retrans_out -= tcp_skb_pcount(skb); 1300 tp->retrans_out -= tcp_skb_pcount(skb);
1274
1275 /* clear lost hint */
1276 tp->retransmit_skb_hint = NULL;
1277 } 1301 }
1278 } else { 1302 } else {
1279 if (!(sacked & TCPCB_RETRANS)) { 1303 if (!(sacked & TCPCB_RETRANS)) {
@@ -1292,9 +1316,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1292 if (sacked & TCPCB_LOST) { 1316 if (sacked & TCPCB_LOST) {
1293 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1317 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1294 tp->lost_out -= tcp_skb_pcount(skb); 1318 tp->lost_out -= tcp_skb_pcount(skb);
1295
1296 /* clear lost hint */
1297 tp->retransmit_skb_hint = NULL;
1298 } 1319 }
1299 } 1320 }
1300 1321
@@ -1324,7 +1345,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1324 if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { 1345 if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
1325 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1346 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1326 tp->retrans_out -= tcp_skb_pcount(skb); 1347 tp->retrans_out -= tcp_skb_pcount(skb);
1327 tp->retransmit_skb_hint = NULL;
1328 } 1348 }
1329 1349
1330 return flag; 1350 return flag;
@@ -1726,6 +1746,8 @@ int tcp_use_frto(struct sock *sk)
1726 return 0; 1746 return 0;
1727 1747
1728 skb = tcp_write_queue_head(sk); 1748 skb = tcp_write_queue_head(sk);
1749 if (tcp_skb_is_last(sk, skb))
1750 return 1;
1729 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 1751 skb = tcp_write_queue_next(sk, skb); /* Skips head */
1730 tcp_for_write_queue_from(skb, sk) { 1752 tcp_for_write_queue_from(skb, sk) {
1731 if (skb == tcp_send_head(sk)) 1753 if (skb == tcp_send_head(sk))
@@ -1867,6 +1889,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1867 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1889 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1868 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1890 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1869 tp->lost_out += tcp_skb_pcount(skb); 1891 tp->lost_out += tcp_skb_pcount(skb);
1892 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
1870 } 1893 }
1871 } 1894 }
1872 tcp_verify_left_out(tp); 1895 tcp_verify_left_out(tp);
@@ -1883,7 +1906,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1883 tp->high_seq = tp->snd_nxt; 1906 tp->high_seq = tp->snd_nxt;
1884 TCP_ECN_queue_cwr(tp); 1907 TCP_ECN_queue_cwr(tp);
1885 1908
1886 tcp_clear_retrans_hints_partial(tp); 1909 tcp_clear_all_retrans_hints(tp);
1887} 1910}
1888 1911
1889static void tcp_clear_retrans_partial(struct tcp_sock *tp) 1912static void tcp_clear_retrans_partial(struct tcp_sock *tp)
@@ -1934,12 +1957,11 @@ void tcp_enter_loss(struct sock *sk, int how)
1934 /* Push undo marker, if it was plain RTO and nothing 1957 /* Push undo marker, if it was plain RTO and nothing
1935 * was retransmitted. */ 1958 * was retransmitted. */
1936 tp->undo_marker = tp->snd_una; 1959 tp->undo_marker = tp->snd_una;
1937 tcp_clear_retrans_hints_partial(tp);
1938 } else { 1960 } else {
1939 tp->sacked_out = 0; 1961 tp->sacked_out = 0;
1940 tp->fackets_out = 0; 1962 tp->fackets_out = 0;
1941 tcp_clear_all_retrans_hints(tp);
1942 } 1963 }
1964 tcp_clear_all_retrans_hints(tp);
1943 1965
1944 tcp_for_write_queue(skb, sk) { 1966 tcp_for_write_queue(skb, sk) {
1945 if (skb == tcp_send_head(sk)) 1967 if (skb == tcp_send_head(sk))
@@ -1952,6 +1974,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1952 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1974 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1953 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1975 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1954 tp->lost_out += tcp_skb_pcount(skb); 1976 tp->lost_out += tcp_skb_pcount(skb);
1977 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
1955 } 1978 }
1956 } 1979 }
1957 tcp_verify_left_out(tp); 1980 tcp_verify_left_out(tp);
@@ -2157,19 +2180,6 @@ static int tcp_time_to_recover(struct sock *sk)
2157 return 0; 2180 return 0;
2158} 2181}
2159 2182
2160/* RFC: This is from the original, I doubt that this is necessary at all:
2161 * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
2162 * retransmitted past LOST markings in the first place? I'm not fully sure
2163 * about undo and end of connection cases, which can cause R without L?
2164 */
2165static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
2166{
2167 if ((tp->retransmit_skb_hint != NULL) &&
2168 before(TCP_SKB_CB(skb)->seq,
2169 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
2170 tp->retransmit_skb_hint = NULL;
2171}
2172
2173/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2183/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2174 * is against sacked "cnt", otherwise it's against facked "cnt" 2184 * is against sacked "cnt", otherwise it's against facked "cnt"
2175 */ 2185 */
@@ -2217,11 +2227,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2217 cnt = packets; 2227 cnt = packets;
2218 } 2228 }
2219 2229
2220 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2230 tcp_skb_mark_lost(tp, skb);
2221 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2222 tp->lost_out += tcp_skb_pcount(skb);
2223 tcp_verify_retransmit_hint(tp, skb);
2224 }
2225 } 2231 }
2226 tcp_verify_left_out(tp); 2232 tcp_verify_left_out(tp);
2227} 2233}
@@ -2263,11 +2269,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2263 if (!tcp_skb_timedout(sk, skb)) 2269 if (!tcp_skb_timedout(sk, skb))
2264 break; 2270 break;
2265 2271
2266 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2272 tcp_skb_mark_lost(tp, skb);
2267 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2268 tp->lost_out += tcp_skb_pcount(skb);
2269 tcp_verify_retransmit_hint(tp, skb);
2270 }
2271 } 2273 }
2272 2274
2273 tp->scoreboard_skb_hint = skb; 2275 tp->scoreboard_skb_hint = skb;
@@ -2378,10 +2380,6 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
2378 } 2380 }
2379 tcp_moderate_cwnd(tp); 2381 tcp_moderate_cwnd(tp);
2380 tp->snd_cwnd_stamp = tcp_time_stamp; 2382 tp->snd_cwnd_stamp = tcp_time_stamp;
2381
2382 /* There is something screwy going on with the retrans hints after
2383 an undo */
2384 tcp_clear_all_retrans_hints(tp);
2385} 2383}
2386 2384
2387static inline int tcp_may_undo(struct tcp_sock *tp) 2385static inline int tcp_may_undo(struct tcp_sock *tp)
@@ -2848,6 +2846,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2848 int flag = 0; 2846 int flag = 0;
2849 u32 pkts_acked = 0; 2847 u32 pkts_acked = 0;
2850 u32 reord = tp->packets_out; 2848 u32 reord = tp->packets_out;
2849 u32 prior_sacked = tp->sacked_out;
2851 s32 seq_rtt = -1; 2850 s32 seq_rtt = -1;
2852 s32 ca_seq_rtt = -1; 2851 s32 ca_seq_rtt = -1;
2853 ktime_t last_ackt = net_invalid_timestamp(); 2852 ktime_t last_ackt = net_invalid_timestamp();
@@ -2929,7 +2928,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2929 2928
2930 tcp_unlink_write_queue(skb, sk); 2929 tcp_unlink_write_queue(skb, sk);
2931 sk_wmem_free_skb(sk, skb); 2930 sk_wmem_free_skb(sk, skb);
2932 tcp_clear_all_retrans_hints(tp); 2931 tp->scoreboard_skb_hint = NULL;
2932 if (skb == tp->retransmit_skb_hint)
2933 tp->retransmit_skb_hint = NULL;
2934 if (skb == tp->lost_skb_hint)
2935 tp->lost_skb_hint = NULL;
2933 } 2936 }
2934 2937
2935 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2938 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
@@ -2948,6 +2951,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2948 /* Non-retransmitted hole got filled? That's reordering */ 2951 /* Non-retransmitted hole got filled? That's reordering */
2949 if (reord < prior_fackets) 2952 if (reord < prior_fackets)
2950 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 2953 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
2954
2955 /* No need to care for underflows here because
2956 * the lost_skb_hint gets NULLed if we're past it
2957 * (or something non-trivial happened)
2958 */
2959 if (tcp_is_fack(tp))
2960 tp->lost_cnt_hint -= pkts_acked;
2961 else
2962 tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
2951 } 2963 }
2952 2964
2953 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 2965 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3442,6 +3454,22 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3442 } 3454 }
3443} 3455}
3444 3456
3457static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3458{
3459 __be32 *ptr = (__be32 *)(th + 1);
3460
3461 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3462 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3463 tp->rx_opt.saw_tstamp = 1;
3464 ++ptr;
3465 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3466 ++ptr;
3467 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3468 return 1;
3469 }
3470 return 0;
3471}
3472
3445/* Fast parse options. This hopes to only see timestamps. 3473/* Fast parse options. This hopes to only see timestamps.
3446 * If it is wrong it falls back on tcp_parse_options(). 3474 * If it is wrong it falls back on tcp_parse_options().
3447 */ 3475 */
@@ -3453,16 +3481,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3453 return 0; 3481 return 0;
3454 } else if (tp->rx_opt.tstamp_ok && 3482 } else if (tp->rx_opt.tstamp_ok &&
3455 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 3483 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
3456 __be32 *ptr = (__be32 *)(th + 1); 3484 if (tcp_parse_aligned_timestamp(tp, th))
3457 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3458 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3459 tp->rx_opt.saw_tstamp = 1;
3460 ++ptr;
3461 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3462 ++ptr;
3463 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3464 return 1; 3485 return 1;
3465 }
3466 } 3486 }
3467 tcp_parse_options(skb, &tp->rx_opt, 1); 3487 tcp_parse_options(skb, &tp->rx_opt, 1);
3468 return 1; 3488 return 1;
@@ -4138,7 +4158,7 @@ drop:
4138 skb1 = skb1->prev; 4158 skb1 = skb1->prev;
4139 } 4159 }
4140 } 4160 }
4141 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 4161 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4142 4162
4143 /* And clean segments covered by new one as whole. */ 4163 /* And clean segments covered by new one as whole. */
4144 while ((skb1 = skb->next) != 4164 while ((skb1 = skb->next) !=
@@ -4161,6 +4181,18 @@ add_sack:
4161 } 4181 }
4162} 4182}
4163 4183
4184static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4185 struct sk_buff_head *list)
4186{
4187 struct sk_buff *next = skb->next;
4188
4189 __skb_unlink(skb, list);
4190 __kfree_skb(skb);
4191 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4192
4193 return next;
4194}
4195
4164/* Collapse contiguous sequence of skbs head..tail with 4196/* Collapse contiguous sequence of skbs head..tail with
4165 * sequence numbers start..end. 4197 * sequence numbers start..end.
4166 * Segments with FIN/SYN are not collapsed (only because this 4198 * Segments with FIN/SYN are not collapsed (only because this
@@ -4178,11 +4210,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4178 for (skb = head; skb != tail;) { 4210 for (skb = head; skb != tail;) {
4179 /* No new bits? It is possible on ofo queue. */ 4211 /* No new bits? It is possible on ofo queue. */
4180 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4212 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4181 struct sk_buff *next = skb->next; 4213 skb = tcp_collapse_one(sk, skb, list);
4182 __skb_unlink(skb, list);
4183 __kfree_skb(skb);
4184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next;
4186 continue; 4214 continue;
4187 } 4215 }
4188 4216
@@ -4228,7 +4256,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4228 memcpy(nskb->head, skb->head, header); 4256 memcpy(nskb->head, skb->head, header);
4229 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4257 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4230 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4258 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4231 __skb_insert(nskb, skb->prev, skb, list); 4259 __skb_queue_before(list, skb, nskb);
4232 skb_set_owner_r(nskb, sk); 4260 skb_set_owner_r(nskb, sk);
4233 4261
4234 /* Copy data, releasing collapsed skbs. */ 4262 /* Copy data, releasing collapsed skbs. */
@@ -4246,11 +4274,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4246 start += size; 4274 start += size;
4247 } 4275 }
4248 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4276 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4249 struct sk_buff *next = skb->next; 4277 skb = tcp_collapse_one(sk, skb, list);
4250 __skb_unlink(skb, list);
4251 __kfree_skb(skb);
4252 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next;
4254 if (skb == tail || 4278 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4279 tcp_hdr(skb)->syn ||
4256 tcp_hdr(skb)->fin) 4280 tcp_hdr(skb)->fin)
@@ -4691,6 +4715,67 @@ out:
4691} 4715}
4692#endif /* CONFIG_NET_DMA */ 4716#endif /* CONFIG_NET_DMA */
4693 4717
4718/* Does PAWS and seqno based validation of an incoming segment, flags will
4719 * play significant role here.
4720 */
4721static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
4722 struct tcphdr *th, int syn_inerr)
4723{
4724 struct tcp_sock *tp = tcp_sk(sk);
4725
4726 /* RFC1323: H1. Apply PAWS check first. */
4727 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4728 tcp_paws_discard(sk, skb)) {
4729 if (!th->rst) {
4730 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4731 tcp_send_dupack(sk, skb);
4732 goto discard;
4733 }
4734 /* Reset is accepted even if it did not pass PAWS. */
4735 }
4736
4737 /* Step 1: check sequence number */
4738 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4739 /* RFC793, page 37: "In all states except SYN-SENT, all reset
4740 * (RST) segments are validated by checking their SEQ-fields."
4741 * And page 69: "If an incoming segment is not acceptable,
4742 * an acknowledgment should be sent in reply (unless the RST
4743 * bit is set, if so drop the segment and return)".
4744 */
4745 if (!th->rst)
4746 tcp_send_dupack(sk, skb);
4747 goto discard;
4748 }
4749
4750 /* Step 2: check RST bit */
4751 if (th->rst) {
4752 tcp_reset(sk);
4753 goto discard;
4754 }
4755
4756 /* ts_recent update must be made after we are sure that the packet
4757 * is in window.
4758 */
4759 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4760
4761 /* step 3: check security and precedence [ignored] */
4762
4763 /* step 4: Check for a SYN in window. */
4764 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4765 if (syn_inerr)
4766 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4768 tcp_reset(sk);
4769 return -1;
4770 }
4771
4772 return 1;
4773
4774discard:
4775 __kfree_skb(skb);
4776 return 0;
4777}
4778
4694/* 4779/*
4695 * TCP receive function for the ESTABLISHED state. 4780 * TCP receive function for the ESTABLISHED state.
4696 * 4781 *
@@ -4718,6 +4803,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4718 struct tcphdr *th, unsigned len) 4803 struct tcphdr *th, unsigned len)
4719{ 4804{
4720 struct tcp_sock *tp = tcp_sk(sk); 4805 struct tcp_sock *tp = tcp_sk(sk);
4806 int res;
4721 4807
4722 /* 4808 /*
4723 * Header prediction. 4809 * Header prediction.
@@ -4756,19 +4842,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4756 4842
4757 /* Check timestamp */ 4843 /* Check timestamp */
4758 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 4844 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
4759 __be32 *ptr = (__be32 *)(th + 1);
4760
4761 /* No? Slow path! */ 4845 /* No? Slow path! */
4762 if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4846 if (!tcp_parse_aligned_timestamp(tp, th))
4763 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
4764 goto slow_path; 4847 goto slow_path;
4765 4848
4766 tp->rx_opt.saw_tstamp = 1;
4767 ++ptr;
4768 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4769 ++ptr;
4770 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4771
4772 /* If PAWS failed, check it more carefully in slow path */ 4849 /* If PAWS failed, check it more carefully in slow path */
4773 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 4850 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
4774 goto slow_path; 4851 goto slow_path;
@@ -4899,51 +4976,12 @@ slow_path:
4899 goto csum_error; 4976 goto csum_error;
4900 4977
4901 /* 4978 /*
4902 * RFC1323: H1. Apply PAWS check first.
4903 */
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) {
4907 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb);
4909 goto discard;
4910 }
4911 /* Resets are accepted even if PAWS failed.
4912
4913 ts_recent update must be made after we are sure
4914 that the packet is in window.
4915 */
4916 }
4917
4918 /*
4919 * Standard slow path. 4979 * Standard slow path.
4920 */ 4980 */
4921 4981
4922 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 4982 res = tcp_validate_incoming(sk, skb, th, 1);
4923 /* RFC793, page 37: "In all states except SYN-SENT, all reset 4983 if (res <= 0)
4924 * (RST) segments are validated by checking their SEQ-fields." 4984 return -res;
4925 * And page 69: "If an incoming segment is not acceptable,
4926 * an acknowledgment should be sent in reply (unless the RST bit
4927 * is set, if so drop the segment and return)".
4928 */
4929 if (!th->rst)
4930 tcp_send_dupack(sk, skb);
4931 goto discard;
4932 }
4933
4934 if (th->rst) {
4935 tcp_reset(sk);
4936 goto discard;
4937 }
4938
4939 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4940
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk);
4945 return 1;
4946 }
4947 4985
4948step5: 4986step5:
4949 if (th->ack) 4987 if (th->ack)
@@ -5225,6 +5263,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5225 struct tcp_sock *tp = tcp_sk(sk); 5263 struct tcp_sock *tp = tcp_sk(sk);
5226 struct inet_connection_sock *icsk = inet_csk(sk); 5264 struct inet_connection_sock *icsk = inet_csk(sk);
5227 int queued = 0; 5265 int queued = 0;
5266 int res;
5228 5267
5229 tp->rx_opt.saw_tstamp = 0; 5268 tp->rx_opt.saw_tstamp = 0;
5230 5269
@@ -5277,42 +5316,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5277 return 0; 5316 return 0;
5278 } 5317 }
5279 5318
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5319 res = tcp_validate_incoming(sk, skb, th, 0);
5281 tcp_paws_discard(sk, skb)) { 5320 if (res <= 0)
5282 if (!th->rst) { 5321 return -res;
5283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb);
5285 goto discard;
5286 }
5287 /* Reset is accepted even if it did not pass PAWS. */
5288 }
5289
5290 /* step 1: check sequence number */
5291 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
5292 if (!th->rst)
5293 tcp_send_dupack(sk, skb);
5294 goto discard;
5295 }
5296
5297 /* step 2: check RST bit */
5298 if (th->rst) {
5299 tcp_reset(sk);
5300 goto discard;
5301 }
5302
5303 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5304
5305 /* step 3: check security and precedence [ignored] */
5306
5307 /* step 4:
5308 *
5309 * Check for a SYN in window.
5310 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk);
5314 return 1;
5315 }
5316 5322
5317 /* step 5: check the ACK field */ 5323 /* step 5: check the ACK field */
5318 if (th->ack) { 5324 if (th->ack) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c1e934824b..44aef1c1f373 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1364,6 +1364,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1364 tcp_mtup_init(newsk); 1364 tcp_mtup_init(newsk);
1365 tcp_sync_mss(newsk, dst_mtu(dst)); 1365 tcp_sync_mss(newsk, dst_mtu(dst));
1366 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1366 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1367 if (tcp_sk(sk)->rx_opt.user_mss &&
1368 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1369 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1370
1367 tcp_initialize_rcv_mss(newsk); 1371 tcp_initialize_rcv_mss(newsk);
1368 1372
1369#ifdef CONFIG_TCP_MD5SIG 1373#ifdef CONFIG_TCP_MD5SIG
@@ -1946,6 +1950,12 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946 return rc; 1950 return rc;
1947} 1951}
1948 1952
1953static inline int empty_bucket(struct tcp_iter_state *st)
1954{
1955 return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1956 hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1957}
1958
1949static void *established_get_first(struct seq_file *seq) 1959static void *established_get_first(struct seq_file *seq)
1950{ 1960{
1951 struct tcp_iter_state* st = seq->private; 1961 struct tcp_iter_state* st = seq->private;
@@ -1958,6 +1968,10 @@ static void *established_get_first(struct seq_file *seq)
1958 struct inet_timewait_sock *tw; 1968 struct inet_timewait_sock *tw;
1959 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1969 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1960 1970
1971 /* Lockless fast path for the common case of empty buckets */
1972 if (empty_bucket(st))
1973 continue;
1974
1961 read_lock_bh(lock); 1975 read_lock_bh(lock);
1962 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1976 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1963 if (sk->sk_family != st->family || 1977 if (sk->sk_family != st->family ||
@@ -2008,13 +2022,15 @@ get_tw:
2008 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2022 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2009 st->state = TCP_SEQ_STATE_ESTABLISHED; 2023 st->state = TCP_SEQ_STATE_ESTABLISHED;
2010 2024
2011 if (++st->bucket < tcp_hashinfo.ehash_size) { 2025 /* Look for next non empty bucket */
2012 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2026 while (++st->bucket < tcp_hashinfo.ehash_size &&
2013 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2027 empty_bucket(st))
2014 } else { 2028 ;
2015 cur = NULL; 2029 if (st->bucket >= tcp_hashinfo.ehash_size)
2016 goto out; 2030 return NULL;
2017 } 2031
2032 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2033 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2018 } else 2034 } else
2019 sk = sk_next(sk); 2035 sk = sk_next(sk);
2020 2036
@@ -2376,6 +2392,7 @@ static int __net_init tcp_sk_init(struct net *net)
2376static void __net_exit tcp_sk_exit(struct net *net) 2392static void __net_exit tcp_sk_exit(struct net *net)
2377{ 2393{
2378 inet_ctl_sock_destroy(net->ipv4.tcp_sock); 2394 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2395 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
2379} 2396}
2380 2397
2381static struct pernet_operations __net_initdata tcp_sk_ops = { 2398static struct pernet_operations __net_initdata tcp_sk_ops = {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8165f5aa8c71..a8499ef3234a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1824,6 +1824,8 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
1824 1824
1825 /* changed transmit queue under us so clear hints */ 1825 /* changed transmit queue under us so clear hints */
1826 tcp_clear_retrans_hints_partial(tp); 1826 tcp_clear_retrans_hints_partial(tp);
1827 if (next_skb == tp->retransmit_skb_hint)
1828 tp->retransmit_skb_hint = skb;
1827 1829
1828 sk_wmem_free_skb(sk, next_skb); 1830 sk_wmem_free_skb(sk, next_skb);
1829} 1831}
@@ -1838,7 +1840,7 @@ void tcp_simple_retransmit(struct sock *sk)
1838 struct tcp_sock *tp = tcp_sk(sk); 1840 struct tcp_sock *tp = tcp_sk(sk);
1839 struct sk_buff *skb; 1841 struct sk_buff *skb;
1840 unsigned int mss = tcp_current_mss(sk, 0); 1842 unsigned int mss = tcp_current_mss(sk, 0);
1841 int lost = 0; 1843 u32 prior_lost = tp->lost_out;
1842 1844
1843 tcp_for_write_queue(skb, sk) { 1845 tcp_for_write_queue(skb, sk) {
1844 if (skb == tcp_send_head(sk)) 1846 if (skb == tcp_send_head(sk))
@@ -1849,17 +1851,13 @@ void tcp_simple_retransmit(struct sock *sk)
1849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1851 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1850 tp->retrans_out -= tcp_skb_pcount(skb); 1852 tp->retrans_out -= tcp_skb_pcount(skb);
1851 } 1853 }
1852 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { 1854 tcp_skb_mark_lost_uncond_verify(tp, skb);
1853 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1854 tp->lost_out += tcp_skb_pcount(skb);
1855 lost = 1;
1856 }
1857 } 1855 }
1858 } 1856 }
1859 1857
1860 tcp_clear_all_retrans_hints(tp); 1858 tcp_clear_retrans_hints_partial(tp);
1861 1859
1862 if (!lost) 1860 if (prior_lost == tp->lost_out)
1863 return; 1861 return;
1864 1862
1865 if (tcp_is_reno(tp)) 1863 if (tcp_is_reno(tp))
@@ -1934,8 +1932,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1934 /* Collapse two adjacent packets if worthwhile and we can. */ 1932 /* Collapse two adjacent packets if worthwhile and we can. */
1935 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1933 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1936 (skb->len < (cur_mss >> 1)) && 1934 (skb->len < (cur_mss >> 1)) &&
1937 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1938 (!tcp_skb_is_last(sk, skb)) && 1935 (!tcp_skb_is_last(sk, skb)) &&
1936 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1939 (skb_shinfo(skb)->nr_frags == 0 && 1937 (skb_shinfo(skb)->nr_frags == 0 &&
1940 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1938 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1941 (tcp_skb_pcount(skb) == 1 && 1939 (tcp_skb_pcount(skb) == 1 &&
@@ -1996,86 +1994,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1996 return err; 1994 return err;
1997} 1995}
1998 1996
1999/* This gets called after a retransmit timeout, and the initially 1997static int tcp_can_forward_retransmit(struct sock *sk)
2000 * retransmitted data is acknowledged. It tries to continue
2001 * resending the rest of the retransmit queue, until either
2002 * we've sent it all or the congestion window limit is reached.
2003 * If doing SACK, the first ACK which comes back for a timeout
2004 * based retransmit packet might feed us FACK information again.
2005 * If so, we use it to avoid unnecessarily retransmissions.
2006 */
2007void tcp_xmit_retransmit_queue(struct sock *sk)
2008{ 1998{
2009 const struct inet_connection_sock *icsk = inet_csk(sk); 1999 const struct inet_connection_sock *icsk = inet_csk(sk);
2010 struct tcp_sock *tp = tcp_sk(sk); 2000 struct tcp_sock *tp = tcp_sk(sk);
2011 struct sk_buff *skb;
2012 int packet_cnt;
2013
2014 if (tp->retransmit_skb_hint) {
2015 skb = tp->retransmit_skb_hint;
2016 packet_cnt = tp->retransmit_cnt_hint;
2017 } else {
2018 skb = tcp_write_queue_head(sk);
2019 packet_cnt = 0;
2020 }
2021
2022 /* First pass: retransmit lost packets. */
2023 if (tp->lost_out) {
2024 tcp_for_write_queue_from(skb, sk) {
2025 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2026
2027 if (skb == tcp_send_head(sk))
2028 break;
2029 /* we could do better than to assign each time */
2030 tp->retransmit_skb_hint = skb;
2031 tp->retransmit_cnt_hint = packet_cnt;
2032
2033 /* Assume this retransmit will generate
2034 * only one packet for congestion window
2035 * calculation purposes. This works because
2036 * tcp_retransmit_skb() will chop up the
2037 * packet to be MSS sized and all the
2038 * packet counting works out.
2039 */
2040 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2041 return;
2042
2043 if (sacked & TCPCB_LOST) {
2044 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
2045 int mib_idx;
2046
2047 if (tcp_retransmit_skb(sk, skb)) {
2048 tp->retransmit_skb_hint = NULL;
2049 return;
2050 }
2051 if (icsk->icsk_ca_state != TCP_CA_Loss)
2052 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2053 else
2054 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2055 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2056
2057 if (skb == tcp_write_queue_head(sk))
2058 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2059 inet_csk(sk)->icsk_rto,
2060 TCP_RTO_MAX);
2061 }
2062
2063 packet_cnt += tcp_skb_pcount(skb);
2064 if (packet_cnt >= tp->lost_out)
2065 break;
2066 }
2067 }
2068 }
2069
2070 /* OK, demanded retransmission is finished. */
2071 2001
2072 /* Forward retransmissions are possible only during Recovery. */ 2002 /* Forward retransmissions are possible only during Recovery. */
2073 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2003 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2074 return; 2004 return 0;
2075 2005
2076 /* No forward retransmissions in Reno are possible. */ 2006 /* No forward retransmissions in Reno are possible. */
2077 if (tcp_is_reno(tp)) 2007 if (tcp_is_reno(tp))
2078 return; 2008 return 0;
2079 2009
2080 /* Yeah, we have to make difficult choice between forward transmission 2010 /* Yeah, we have to make difficult choice between forward transmission
2081 * and retransmission... Both ways have their merits... 2011 * and retransmission... Both ways have their merits...
@@ -2086,43 +2016,104 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2086 */ 2016 */
2087 2017
2088 if (tcp_may_send_now(sk)) 2018 if (tcp_may_send_now(sk))
2089 return; 2019 return 0;
2090 2020
2091 /* If nothing is SACKed, highest_sack in the loop won't be valid */ 2021 return 1;
2092 if (!tp->sacked_out) 2022}
2093 return;
2094 2023
2095 if (tp->forward_skb_hint) 2024/* This gets called after a retransmit timeout, and the initially
2096 skb = tp->forward_skb_hint; 2025 * retransmitted data is acknowledged. It tries to continue
2097 else 2026 * resending the rest of the retransmit queue, until either
2027 * we've sent it all or the congestion window limit is reached.
2028 * If doing SACK, the first ACK which comes back for a timeout
2029 * based retransmit packet might feed us FACK information again.
2030 * If so, we use it to avoid unnecessarily retransmissions.
2031 */
2032void tcp_xmit_retransmit_queue(struct sock *sk)
2033{
2034 const struct inet_connection_sock *icsk = inet_csk(sk);
2035 struct tcp_sock *tp = tcp_sk(sk);
2036 struct sk_buff *skb;
2037 struct sk_buff *hole = NULL;
2038 u32 last_lost;
2039 int mib_idx;
2040 int fwd_rexmitting = 0;
2041
2042 if (!tp->lost_out)
2043 tp->retransmit_high = tp->snd_una;
2044
2045 if (tp->retransmit_skb_hint) {
2046 skb = tp->retransmit_skb_hint;
2047 last_lost = TCP_SKB_CB(skb)->end_seq;
2048 if (after(last_lost, tp->retransmit_high))
2049 last_lost = tp->retransmit_high;
2050 } else {
2098 skb = tcp_write_queue_head(sk); 2051 skb = tcp_write_queue_head(sk);
2052 last_lost = tp->snd_una;
2053 }
2099 2054
2055 /* First pass: retransmit lost packets. */
2100 tcp_for_write_queue_from(skb, sk) { 2056 tcp_for_write_queue_from(skb, sk) {
2101 if (skb == tcp_send_head(sk)) 2057 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2102 break;
2103 tp->forward_skb_hint = skb;
2104 2058
2105 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2059 if (skb == tcp_send_head(sk))
2106 break; 2060 break;
2061 /* we could do better than to assign each time */
2062 if (hole == NULL)
2063 tp->retransmit_skb_hint = skb;
2107 2064
2065 /* Assume this retransmit will generate
2066 * only one packet for congestion window
2067 * calculation purposes. This works because
2068 * tcp_retransmit_skb() will chop up the
2069 * packet to be MSS sized and all the
2070 * packet counting works out.
2071 */
2108 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2072 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2109 break; 2073 return;
2074
2075 if (fwd_rexmitting) {
2076begin_fwd:
2077 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2078 break;
2079 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2080
2081 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2082 tp->retransmit_high = last_lost;
2083 if (!tcp_can_forward_retransmit(sk))
2084 break;
2085 /* Backtrack if necessary to non-L'ed skb */
2086 if (hole != NULL) {
2087 skb = hole;
2088 hole = NULL;
2089 }
2090 fwd_rexmitting = 1;
2091 goto begin_fwd;
2110 2092
2111 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 2093 } else if (!(sacked & TCPCB_LOST)) {
2094 if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS))
2095 hole = skb;
2112 continue; 2096 continue;
2113 2097
2114 /* Ok, retransmit it. */ 2098 } else {
2115 if (tcp_retransmit_skb(sk, skb)) { 2099 last_lost = TCP_SKB_CB(skb)->end_seq;
2116 tp->forward_skb_hint = NULL; 2100 if (icsk->icsk_ca_state != TCP_CA_Loss)
2117 break; 2101 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2102 else
2103 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2118 } 2104 }
2119 2105
2106 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2107 continue;
2108
2109 if (tcp_retransmit_skb(sk, skb))
2110 return;
2111 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2112
2120 if (skb == tcp_write_queue_head(sk)) 2113 if (skb == tcp_write_queue_head(sk))
2121 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2114 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2122 inet_csk(sk)->icsk_rto, 2115 inet_csk(sk)->icsk_rto,
2123 TCP_RTO_MAX); 2116 TCP_RTO_MAX);
2124
2125 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
2126 } 2117 }
2127} 2118}
2128 2119
@@ -2241,6 +2232,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2241 struct sk_buff *skb; 2232 struct sk_buff *skb;
2242 struct tcp_md5sig_key *md5; 2233 struct tcp_md5sig_key *md5;
2243 __u8 *md5_hash_location; 2234 __u8 *md5_hash_location;
2235 int mss;
2244 2236
2245 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2237 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2246 if (skb == NULL) 2238 if (skb == NULL)
@@ -2251,13 +2243,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2251 2243
2252 skb->dst = dst_clone(dst); 2244 skb->dst = dst_clone(dst);
2253 2245
2246 mss = dst_metric(dst, RTAX_ADVMSS);
2247 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2248 mss = tp->rx_opt.user_mss;
2249
2254 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2250 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2255 __u8 rcv_wscale; 2251 __u8 rcv_wscale;
2256 /* Set this up on the first call only */ 2252 /* Set this up on the first call only */
2257 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2253 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2258 /* tcp_full_space because it is guaranteed to be the first packet */ 2254 /* tcp_full_space because it is guaranteed to be the first packet */
2259 tcp_select_initial_window(tcp_full_space(sk), 2255 tcp_select_initial_window(tcp_full_space(sk),
2260 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2256 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2261 &req->rcv_wnd, 2257 &req->rcv_wnd,
2262 &req->window_clamp, 2258 &req->window_clamp,
2263 ireq->wscale_ok, 2259 ireq->wscale_ok,
@@ -2267,8 +2263,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2267 2263
2268 memset(&opts, 0, sizeof(opts)); 2264 memset(&opts, 0, sizeof(opts));
2269 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2265 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2270 tcp_header_size = tcp_synack_options(sk, req, 2266 tcp_header_size = tcp_synack_options(sk, req, mss,
2271 dst_metric(dst, RTAX_ADVMSS),
2272 skb, &opts, &md5) + 2267 skb, &opts, &md5) +
2273 sizeof(struct tcphdr); 2268 sizeof(struct tcphdr);
2274 2269
@@ -2342,6 +2337,9 @@ static void tcp_connect_init(struct sock *sk)
2342 if (!tp->window_clamp) 2337 if (!tp->window_clamp)
2343 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2338 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2344 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2339 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2340 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2341 tp->advmss = tp->rx_opt.user_mss;
2342
2345 tcp_initialize_rcv_mss(sk); 2343 tcp_initialize_rcv_mss(sk);
2346 2344
2347 tcp_select_initial_window(tcp_full_space(sk), 2345 tcp_select_initial_window(tcp_full_space(sk),
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0e844c2736a7..3df2c442d90b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -943,39 +943,39 @@ static int ip6_dst_lookup_tail(struct sock *sk,
943 } 943 }
944 944
945#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 945#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
946 /* 946 /*
947 * Here if the dst entry we've looked up 947 * Here if the dst entry we've looked up
948 * has a neighbour entry that is in the INCOMPLETE 948 * has a neighbour entry that is in the INCOMPLETE
949 * state and the src address from the flow is 949 * state and the src address from the flow is
950 * marked as OPTIMISTIC, we release the found 950 * marked as OPTIMISTIC, we release the found
951 * dst entry and replace it instead with the 951 * dst entry and replace it instead with the
952 * dst entry of the nexthop router 952 * dst entry of the nexthop router
953 */ 953 */
954 if (!((*dst)->neighbour->nud_state & NUD_VALID)) { 954 if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) {
955 struct inet6_ifaddr *ifp; 955 struct inet6_ifaddr *ifp;
956 struct flowi fl_gw; 956 struct flowi fl_gw;
957 int redirect; 957 int redirect;
958 958
959 ifp = ipv6_get_ifaddr(net, &fl->fl6_src, 959 ifp = ipv6_get_ifaddr(net, &fl->fl6_src,
960 (*dst)->dev, 1); 960 (*dst)->dev, 1);
961 961
962 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); 962 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
963 if (ifp) 963 if (ifp)
964 in6_ifa_put(ifp); 964 in6_ifa_put(ifp);
965 965
966 if (redirect) { 966 if (redirect) {
967 /* 967 /*
968 * We need to get the dst entry for the 968 * We need to get the dst entry for the
969 * default router instead 969 * default router instead
970 */ 970 */
971 dst_release(*dst); 971 dst_release(*dst);
972 memcpy(&fl_gw, fl, sizeof(struct flowi)); 972 memcpy(&fl_gw, fl, sizeof(struct flowi));
973 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); 973 memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr));
974 *dst = ip6_route_output(net, sk, &fl_gw); 974 *dst = ip6_route_output(net, sk, &fl_gw);
975 if ((err = (*dst)->error)) 975 if ((err = (*dst)->error))
976 goto out_err_release; 976 goto out_err_release;
977 }
978 } 977 }
978 }
979#endif 979#endif
980 980
981 return 0; 981 return 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 17c7b098cdb0..64ce3d33d9c6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1050,10 +1050,10 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1050 } 1050 }
1051 1051
1052 switch (skb->protocol) { 1052 switch (skb->protocol) {
1053 case __constant_htons(ETH_P_IP): 1053 case htons(ETH_P_IP):
1054 ret = ip4ip6_tnl_xmit(skb, dev); 1054 ret = ip4ip6_tnl_xmit(skb, dev);
1055 break; 1055 break;
1056 case __constant_htons(ETH_P_IPV6): 1056 case htons(ETH_P_IPV6):
1057 ret = ip6ip6_tnl_xmit(skb, dev); 1057 ret = ip6ip6_tnl_xmit(skb, dev);
1058 break; 1058 break;
1059 default: 1059 default:
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 01d47674f7e5..e53e493606c5 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -377,14 +377,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
377 skb_checksum_complete(skb)) { 377 skb_checksum_complete(skb)) {
378 atomic_inc(&sk->sk_drops); 378 atomic_inc(&sk->sk_drops);
379 kfree_skb(skb); 379 kfree_skb(skb);
380 return 0; 380 return NET_RX_DROP;
381 } 381 }
382 382
383 /* Charge it to the socket. */ 383 /* Charge it to the socket. */
384 if (sock_queue_rcv_skb(sk,skb)<0) { 384 if (sock_queue_rcv_skb(sk,skb)<0) {
385 atomic_inc(&sk->sk_drops); 385 atomic_inc(&sk->sk_drops);
386 kfree_skb(skb); 386 kfree_skb(skb);
387 return 0; 387 return NET_RX_DROP;
388 } 388 }
389 389
390 return 0; 390 return 0;
@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
429 if (skb_checksum_complete(skb)) { 429 if (skb_checksum_complete(skb)) {
430 atomic_inc(&sk->sk_drops); 430 atomic_inc(&sk->sk_drops);
431 kfree_skb(skb); 431 kfree_skb(skb);
432 return 0; 432 return NET_RX_DROP;
433 } 433 }
434 } 434 }
435 435
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9af6115f0f50..776871ee2288 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1003,6 +1003,25 @@ int icmp6_dst_gc(void)
1003 return more; 1003 return more;
1004} 1004}
1005 1005
1006static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1007 void *arg)
1008{
1009 struct dst_entry *dst, **pprev;
1010
1011 spin_lock_bh(&icmp6_dst_lock);
1012 pprev = &icmp6_dst_gc_list;
1013 while ((dst = *pprev) != NULL) {
1014 struct rt6_info *rt = (struct rt6_info *) dst;
1015 if (func(rt, arg)) {
1016 *pprev = dst->next;
1017 dst_free(dst);
1018 } else {
1019 pprev = &dst->next;
1020 }
1021 }
1022 spin_unlock_bh(&icmp6_dst_lock);
1023}
1024
1006static int ip6_dst_gc(struct dst_ops *ops) 1025static int ip6_dst_gc(struct dst_ops *ops)
1007{ 1026{
1008 unsigned long now = jiffies; 1027 unsigned long now = jiffies;
@@ -1930,6 +1949,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
1930 }; 1949 };
1931 1950
1932 fib6_clean_all(net, fib6_ifdown, 0, &adn); 1951 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1952 icmp6_clean_all(fib6_ifdown, &adn);
1933} 1953}
1934 1954
1935struct rt6_mtu_change_arg 1955struct rt6_mtu_change_arg
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5b90b369ccb2..e85f377a8f82 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1286,7 +1286,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1286 struct request_sock *req, 1286 struct request_sock *req,
1287 struct dst_entry *dst) 1287 struct dst_entry *dst)
1288{ 1288{
1289 struct inet6_request_sock *treq = inet6_rsk(req); 1289 struct inet6_request_sock *treq;
1290 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1290 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1291 struct tcp6_sock *newtcp6sk; 1291 struct tcp6_sock *newtcp6sk;
1292 struct inet_sock *newinet; 1292 struct inet_sock *newinet;
@@ -1350,6 +1350,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1350 return newsk; 1350 return newsk;
1351 } 1351 }
1352 1352
1353 treq = inet6_rsk(req);
1353 opt = np->opt; 1354 opt = np->opt;
1354 1355
1355 if (sk_acceptq_is_full(sk)) 1356 if (sk_acceptq_is_full(sk))
@@ -2148,6 +2149,7 @@ static int tcpv6_net_init(struct net *net)
2148static void tcpv6_net_exit(struct net *net) 2149static void tcpv6_net_exit(struct net *net)
2149{ 2150{
2150 inet_ctl_sock_destroy(net->ipv6.tcp_sk); 2151 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2152 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
2151} 2153}
2152 2154
2153static struct pernet_operations tcpv6_net_ops = { 2155static struct pernet_operations tcpv6_net_ops = {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 80d693392b0f..8427518e4f20 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -179,19 +179,6 @@ config MAC80211_VERBOSE_MPL_DEBUG
179 179
180 Do not select this option. 180 Do not select this option.
181 181
182config MAC80211_LOWTX_FRAME_DUMP
183 bool "Debug frame dumping"
184 depends on MAC80211_DEBUG_MENU
185 ---help---
186 Selecting this option will cause the stack to
187 print a message for each frame that is handed
188 to the lowlevel driver for transmission. This
189 message includes all MAC addresses and the
190 frame control field.
191
192 If unsure, say N and insert the debugging code
193 you require into the driver you are debugging.
194
195config MAC80211_DEBUG_COUNTERS 182config MAC80211_DEBUG_COUNTERS
196 bool "Extra statistics for TX/RX debugging" 183 bool "Extra statistics for TX/RX debugging"
197 depends on MAC80211_DEBUG_MENU 184 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index a169b0201d61..2dc8f2bff27b 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -7,6 +7,8 @@ mac80211-y := \
7 sta_info.o \ 7 sta_info.o \
8 wep.o \ 8 wep.o \
9 wpa.o \ 9 wpa.o \
10 scan.o \
11 ht.o \
10 mlme.o \ 12 mlme.o \
11 iface.o \ 13 iface.o \
12 rate.o \ 14 rate.o \
@@ -15,6 +17,7 @@ mac80211-y := \
15 aes_ccm.o \ 17 aes_ccm.o \
16 cfg.o \ 18 cfg.o \
17 rx.o \ 19 rx.o \
20 spectmgmt.o \
18 tx.o \ 21 tx.o \
19 key.o \ 22 key.o \
20 util.o \ 23 util.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 297c257864c7..e2574885db4a 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,26 +17,26 @@
17#include "rate.h" 17#include "rate.h"
18#include "mesh.h" 18#include "mesh.h"
19 19
20static enum ieee80211_if_types 20struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy)
21nl80211_type_to_mac80211_type(enum nl80211_iftype type) 21{
22 struct ieee80211_local *local = wiphy_priv(wiphy);
23 return &local->hw;
24}
25EXPORT_SYMBOL(wiphy_to_hw);
26
27static bool nl80211_type_check(enum nl80211_iftype type)
22{ 28{
23 switch (type) { 29 switch (type) {
24 case NL80211_IFTYPE_UNSPECIFIED:
25 return IEEE80211_IF_TYPE_STA;
26 case NL80211_IFTYPE_ADHOC: 30 case NL80211_IFTYPE_ADHOC:
27 return IEEE80211_IF_TYPE_IBSS;
28 case NL80211_IFTYPE_STATION: 31 case NL80211_IFTYPE_STATION:
29 return IEEE80211_IF_TYPE_STA;
30 case NL80211_IFTYPE_MONITOR: 32 case NL80211_IFTYPE_MONITOR:
31 return IEEE80211_IF_TYPE_MNTR;
32#ifdef CONFIG_MAC80211_MESH 33#ifdef CONFIG_MAC80211_MESH
33 case NL80211_IFTYPE_MESH_POINT: 34 case NL80211_IFTYPE_MESH_POINT:
34 return IEEE80211_IF_TYPE_MESH_POINT;
35#endif 35#endif
36 case NL80211_IFTYPE_WDS: 36 case NL80211_IFTYPE_WDS:
37 return IEEE80211_IF_TYPE_WDS; 37 return true;
38 default: 38 default:
39 return IEEE80211_IF_TYPE_INVALID; 39 return false;
40 } 40 }
41} 41}
42 42
@@ -45,17 +45,15 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
45 struct vif_params *params) 45 struct vif_params *params)
46{ 46{
47 struct ieee80211_local *local = wiphy_priv(wiphy); 47 struct ieee80211_local *local = wiphy_priv(wiphy);
48 enum ieee80211_if_types itype;
49 struct net_device *dev; 48 struct net_device *dev;
50 struct ieee80211_sub_if_data *sdata; 49 struct ieee80211_sub_if_data *sdata;
51 int err; 50 int err;
52 51
53 itype = nl80211_type_to_mac80211_type(type); 52 if (!nl80211_type_check(type))
54 if (itype == IEEE80211_IF_TYPE_INVALID)
55 return -EINVAL; 53 return -EINVAL;
56 54
57 err = ieee80211_if_add(local, name, &dev, itype, params); 55 err = ieee80211_if_add(local, name, &dev, type, params);
58 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) 56 if (err || type != NL80211_IFTYPE_MONITOR || !flags)
59 return err; 57 return err;
60 58
61 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 59 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -66,13 +64,16 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
66static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 64static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
67{ 65{
68 struct net_device *dev; 66 struct net_device *dev;
67 struct ieee80211_sub_if_data *sdata;
69 68
70 /* we're under RTNL */ 69 /* we're under RTNL */
71 dev = __dev_get_by_index(&init_net, ifindex); 70 dev = __dev_get_by_index(&init_net, ifindex);
72 if (!dev) 71 if (!dev)
73 return -ENODEV; 72 return -ENODEV;
74 73
75 ieee80211_if_remove(dev); 74 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
75
76 ieee80211_if_remove(sdata);
76 77
77 return 0; 78 return 0;
78} 79}
@@ -83,7 +84,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
83{ 84{
84 struct ieee80211_local *local = wiphy_priv(wiphy); 85 struct ieee80211_local *local = wiphy_priv(wiphy);
85 struct net_device *dev; 86 struct net_device *dev;
86 enum ieee80211_if_types itype;
87 struct ieee80211_sub_if_data *sdata; 87 struct ieee80211_sub_if_data *sdata;
88 int ret; 88 int ret;
89 89
@@ -92,8 +92,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
92 if (!dev) 92 if (!dev)
93 return -ENODEV; 93 return -ENODEV;
94 94
95 itype = nl80211_type_to_mac80211_type(type); 95 if (!nl80211_type_check(type))
96 if (itype == IEEE80211_IF_TYPE_INVALID)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (dev == local->mdev) 98 if (dev == local->mdev)
@@ -101,16 +100,16 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
101 100
102 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 101 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
103 102
104 ret = ieee80211_if_change_type(sdata, itype); 103 ret = ieee80211_if_change_type(sdata, type);
105 if (ret) 104 if (ret)
106 return ret; 105 return ret;
107 106
108 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) 107 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
109 ieee80211_if_sta_set_mesh_id(&sdata->u.sta, 108 ieee80211_sdata_set_mesh_id(sdata,
110 params->mesh_id_len, 109 params->mesh_id_len,
111 params->mesh_id); 110 params->mesh_id);
112 111
113 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags) 112 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
114 return 0; 113 return 0;
115 114
116 sdata->u.mntr_flags = *flags; 115 sdata->u.mntr_flags = *flags;
@@ -365,7 +364,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
365 sta = sta_info_get_by_idx(local, idx, dev); 364 sta = sta_info_get_by_idx(local, idx, dev);
366 if (sta) { 365 if (sta) {
367 ret = 0; 366 ret = 0;
368 memcpy(mac, sta->addr, ETH_ALEN); 367 memcpy(mac, sta->sta.addr, ETH_ALEN);
369 sta_set_sinfo(sta, sinfo); 368 sta_set_sinfo(sta, sinfo);
370 } 369 }
371 370
@@ -506,7 +505,7 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
506 505
507 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 506 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
508 507
509 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 508 if (sdata->vif.type != NL80211_IFTYPE_AP)
510 return -EINVAL; 509 return -EINVAL;
511 510
512 old = sdata->u.ap.beacon; 511 old = sdata->u.ap.beacon;
@@ -529,7 +528,7 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
529 528
530 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 529 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
531 530
532 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 531 if (sdata->vif.type != NL80211_IFTYPE_AP)
533 return -EINVAL; 532 return -EINVAL;
534 533
535 old = sdata->u.ap.beacon; 534 old = sdata->u.ap.beacon;
@@ -551,7 +550,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
551 550
552 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 551 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
553 552
554 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 553 if (sdata->vif.type != NL80211_IFTYPE_AP)
555 return -EINVAL; 554 return -EINVAL;
556 555
557 old = sdata->u.ap.beacon; 556 old = sdata->u.ap.beacon;
@@ -594,7 +593,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
594 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ 593 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
595 594
596 memset(msg->da, 0xff, ETH_ALEN); 595 memset(msg->da, 0xff, ETH_ALEN);
597 memcpy(msg->sa, sta->addr, ETH_ALEN); 596 memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
598 msg->len = htons(6); 597 msg->len = htons(6);
599 msg->dsap = 0; 598 msg->dsap = 0;
600 msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ 599 msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
@@ -649,9 +648,9 @@ static void sta_apply_parameters(struct ieee80211_local *local,
649 */ 648 */
650 649
651 if (params->aid) { 650 if (params->aid) {
652 sta->aid = params->aid; 651 sta->sta.aid = params->aid;
653 if (sta->aid > IEEE80211_MAX_AID) 652 if (sta->sta.aid > IEEE80211_MAX_AID)
654 sta->aid = 0; /* XXX: should this be an error? */ 653 sta->sta.aid = 0; /* XXX: should this be an error? */
655 } 654 }
656 655
657 if (params->listen_interval >= 0) 656 if (params->listen_interval >= 0)
@@ -668,7 +667,12 @@ static void sta_apply_parameters(struct ieee80211_local *local,
668 rates |= BIT(j); 667 rates |= BIT(j);
669 } 668 }
670 } 669 }
671 sta->supp_rates[local->oper_channel->band] = rates; 670 sta->sta.supp_rates[local->oper_channel->band] = rates;
671 }
672
673 if (params->ht_capa) {
674 ieee80211_ht_cap_ie_to_ht_info(params->ht_capa,
675 &sta->sta.ht_info);
672 } 676 }
673 677
674 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { 678 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) {
@@ -701,8 +705,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
701 if (params->vlan) { 705 if (params->vlan) {
702 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 706 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
703 707
704 if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN && 708 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
705 sdata->vif.type != IEEE80211_IF_TYPE_AP) 709 sdata->vif.type != NL80211_IFTYPE_AP)
706 return -EINVAL; 710 return -EINVAL;
707 } else 711 } else
708 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 712 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -732,8 +736,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 return err; 736 return err;
733 } 737 }
734 738
735 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || 739 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
736 sdata->vif.type == IEEE80211_IF_TYPE_AP) 740 sdata->vif.type == NL80211_IFTYPE_AP)
737 ieee80211_send_layer2_update(sta); 741 ieee80211_send_layer2_update(sta);
738 742
739 rcu_read_unlock(); 743 rcu_read_unlock();
@@ -797,8 +801,8 @@ static int ieee80211_change_station(struct wiphy *wiphy,
797 if (params->vlan && params->vlan != sta->sdata->dev) { 801 if (params->vlan && params->vlan != sta->sdata->dev) {
798 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 802 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
799 803
800 if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN && 804 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
801 vlansdata->vif.type != IEEE80211_IF_TYPE_AP) { 805 vlansdata->vif.type != NL80211_IFTYPE_AP) {
802 rcu_read_unlock(); 806 rcu_read_unlock();
803 return -EINVAL; 807 return -EINVAL;
804 } 808 }
@@ -832,7 +836,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
832 836
833 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 837 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
834 838
835 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 839 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
836 return -ENOTSUPP; 840 return -ENOTSUPP;
837 841
838 rcu_read_lock(); 842 rcu_read_lock();
@@ -842,13 +846,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
842 return -ENOENT; 846 return -ENOENT;
843 } 847 }
844 848
845 err = mesh_path_add(dst, dev); 849 err = mesh_path_add(dst, sdata);
846 if (err) { 850 if (err) {
847 rcu_read_unlock(); 851 rcu_read_unlock();
848 return err; 852 return err;
849 } 853 }
850 854
851 mpath = mesh_path_lookup(dst, dev); 855 mpath = mesh_path_lookup(dst, sdata);
852 if (!mpath) { 856 if (!mpath) {
853 rcu_read_unlock(); 857 rcu_read_unlock();
854 return -ENXIO; 858 return -ENXIO;
@@ -862,10 +866,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
862static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 866static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
863 u8 *dst) 867 u8 *dst)
864{ 868{
869 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
870
865 if (dst) 871 if (dst)
866 return mesh_path_del(dst, dev); 872 return mesh_path_del(dst, sdata);
867 873
868 mesh_path_flush(dev); 874 mesh_path_flush(sdata);
869 return 0; 875 return 0;
870} 876}
871 877
@@ -886,7 +892,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
886 892
887 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 893 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
888 894
889 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 895 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
890 return -ENOTSUPP; 896 return -ENOTSUPP;
891 897
892 rcu_read_lock(); 898 rcu_read_lock();
@@ -897,7 +903,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
897 return -ENOENT; 903 return -ENOENT;
898 } 904 }
899 905
900 mpath = mesh_path_lookup(dst, dev); 906 mpath = mesh_path_lookup(dst, sdata);
901 if (!mpath) { 907 if (!mpath) {
902 rcu_read_unlock(); 908 rcu_read_unlock();
903 return -ENOENT; 909 return -ENOENT;
@@ -913,7 +919,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
913 struct mpath_info *pinfo) 919 struct mpath_info *pinfo)
914{ 920{
915 if (mpath->next_hop) 921 if (mpath->next_hop)
916 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); 922 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
917 else 923 else
918 memset(next_hop, 0, ETH_ALEN); 924 memset(next_hop, 0, ETH_ALEN);
919 925
@@ -961,11 +967,11 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
961 967
962 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 968 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
963 969
964 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 970 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
965 return -ENOTSUPP; 971 return -ENOTSUPP;
966 972
967 rcu_read_lock(); 973 rcu_read_lock();
968 mpath = mesh_path_lookup(dst, dev); 974 mpath = mesh_path_lookup(dst, sdata);
969 if (!mpath) { 975 if (!mpath) {
970 rcu_read_unlock(); 976 rcu_read_unlock();
971 return -ENOENT; 977 return -ENOENT;
@@ -989,11 +995,11 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
989 995
990 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 996 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
991 997
992 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 998 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
993 return -ENOTSUPP; 999 return -ENOTSUPP;
994 1000
995 rcu_read_lock(); 1001 rcu_read_lock();
996 mpath = mesh_path_lookup_by_idx(idx, dev); 1002 mpath = mesh_path_lookup_by_idx(idx, sdata);
997 if (!mpath) { 1003 if (!mpath) {
998 rcu_read_unlock(); 1004 rcu_read_unlock();
999 return -ENOENT; 1005 return -ENOENT;
@@ -1005,6 +1011,42 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1005} 1011}
1006#endif 1012#endif
1007 1013
1014static int ieee80211_change_bss(struct wiphy *wiphy,
1015 struct net_device *dev,
1016 struct bss_parameters *params)
1017{
1018 struct ieee80211_local *local = wiphy_priv(wiphy);
1019 struct ieee80211_sub_if_data *sdata;
1020 u32 changed = 0;
1021
1022 if (dev == local->mdev)
1023 return -EOPNOTSUPP;
1024
1025 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1026
1027 if (sdata->vif.type != NL80211_IFTYPE_AP)
1028 return -EINVAL;
1029
1030 if (params->use_cts_prot >= 0) {
1031 sdata->bss_conf.use_cts_prot = params->use_cts_prot;
1032 changed |= BSS_CHANGED_ERP_CTS_PROT;
1033 }
1034 if (params->use_short_preamble >= 0) {
1035 sdata->bss_conf.use_short_preamble =
1036 params->use_short_preamble;
1037 changed |= BSS_CHANGED_ERP_PREAMBLE;
1038 }
1039 if (params->use_short_slot_time >= 0) {
1040 sdata->bss_conf.use_short_slot =
1041 params->use_short_slot_time;
1042 changed |= BSS_CHANGED_ERP_SLOT;
1043 }
1044
1045 ieee80211_bss_info_change_notify(sdata, changed);
1046
1047 return 0;
1048}
1049
1008struct cfg80211_ops mac80211_config_ops = { 1050struct cfg80211_ops mac80211_config_ops = {
1009 .add_virtual_intf = ieee80211_add_iface, 1051 .add_virtual_intf = ieee80211_add_iface,
1010 .del_virtual_intf = ieee80211_del_iface, 1052 .del_virtual_intf = ieee80211_del_iface,
@@ -1028,4 +1070,5 @@ struct cfg80211_ops mac80211_config_ops = {
1028 .get_mpath = ieee80211_get_mpath, 1070 .get_mpath = ieee80211_get_mpath,
1029 .dump_mpath = ieee80211_dump_mpath, 1071 .dump_mpath = ieee80211_dump_mpath,
1030#endif 1072#endif
1073 .change_bss = ieee80211_change_bss,
1031}; 1074};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ee509f1109e2..24ce54463310 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -51,8 +51,6 @@ DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d",
51 local->hw.conf.antenna_sel_tx); 51 local->hw.conf.antenna_sel_tx);
52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", 52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d",
53 local->hw.conf.antenna_sel_rx); 53 local->hw.conf.antenna_sel_rx);
54DEBUGFS_READONLY_FILE(bridge_packets, 20, "%d",
55 local->bridge_packets);
56DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", 54DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
57 local->rts_threshold); 55 local->rts_threshold);
58DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", 56DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
@@ -206,7 +204,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
206 DEBUGFS_ADD(frequency); 204 DEBUGFS_ADD(frequency);
207 DEBUGFS_ADD(antenna_sel_tx); 205 DEBUGFS_ADD(antenna_sel_tx);
208 DEBUGFS_ADD(antenna_sel_rx); 206 DEBUGFS_ADD(antenna_sel_rx);
209 DEBUGFS_ADD(bridge_packets);
210 DEBUGFS_ADD(rts_threshold); 207 DEBUGFS_ADD(rts_threshold);
211 DEBUGFS_ADD(fragmentation_threshold); 208 DEBUGFS_ADD(fragmentation_threshold);
212 DEBUGFS_ADD(short_retry_limit); 209 DEBUGFS_ADD(short_retry_limit);
@@ -263,7 +260,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
263 DEBUGFS_DEL(frequency); 260 DEBUGFS_DEL(frequency);
264 DEBUGFS_DEL(antenna_sel_tx); 261 DEBUGFS_DEL(antenna_sel_tx);
265 DEBUGFS_DEL(antenna_sel_rx); 262 DEBUGFS_DEL(antenna_sel_rx);
266 DEBUGFS_DEL(bridge_packets);
267 DEBUGFS_DEL(rts_threshold); 263 DEBUGFS_DEL(rts_threshold);
268 DEBUGFS_DEL(fragmentation_threshold); 264 DEBUGFS_DEL(fragmentation_threshold);
269 DEBUGFS_DEL(short_retry_limit); 265 DEBUGFS_DEL(short_retry_limit);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7439b63df5d0..a3294d109322 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -206,7 +206,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
206 rcu_read_lock(); 206 rcu_read_lock();
207 sta = rcu_dereference(key->sta); 207 sta = rcu_dereference(key->sta);
208 if (sta) 208 if (sta)
209 sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr)); 209 sprintf(buf, "../../stations/%s",
210 print_mac(mac, sta->sta.addr));
210 rcu_read_unlock(); 211 rcu_read_unlock();
211 212
212 /* using sta as a boolean is fine outside RCU lock */ 213 /* using sta as a boolean is fine outside RCU lock */
@@ -265,7 +266,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
265 key = sdata->default_key; 266 key = sdata->default_key;
266 if (key) { 267 if (key) {
267 sprintf(buf, "../keys/%d", key->debugfs.cnt); 268 sprintf(buf, "../keys/%d", key->debugfs.cnt);
268 sdata->debugfs.default_key = 269 sdata->common_debugfs.default_key =
269 debugfs_create_symlink("default_key", 270 debugfs_create_symlink("default_key",
270 sdata->debugfsdir, buf); 271 sdata->debugfsdir, buf);
271 } else 272 } else
@@ -277,8 +278,8 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
277 if (!sdata) 278 if (!sdata)
278 return; 279 return;
279 280
280 debugfs_remove(sdata->debugfs.default_key); 281 debugfs_remove(sdata->common_debugfs.default_key);
281 sdata->debugfs.default_key = NULL; 282 sdata->common_debugfs.default_key = NULL;
282} 283}
283 284
284void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key, 285void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 8165df578c92..2a4515623776 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -173,7 +173,6 @@ IEEE80211_IF_FILE(assoc_tries, u.sta.assoc_tries, DEC);
173IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); 173IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX);
174IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); 174IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC);
175IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); 175IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC);
176IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC);
177 176
178static ssize_t ieee80211_if_fmt_flags( 177static ssize_t ieee80211_if_fmt_flags(
179 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 178 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -192,7 +191,6 @@ __IEEE80211_IF_FILE(flags);
192/* AP attributes */ 191/* AP attributes */
193IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 192IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
194IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 193IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
195IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC);
196 194
197static ssize_t ieee80211_if_fmt_num_buffered_multicast( 195static ssize_t ieee80211_if_fmt_num_buffered_multicast(
198 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 196 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -207,37 +205,37 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
207 205
208#ifdef CONFIG_MAC80211_MESH 206#ifdef CONFIG_MAC80211_MESH
209/* Mesh stats attributes */ 207/* Mesh stats attributes */
210IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC); 208IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
211IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC); 209IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
212IEEE80211_IF_FILE(dropped_frames_no_route, 210IEEE80211_IF_FILE(dropped_frames_no_route,
213 u.sta.mshstats.dropped_frames_no_route, DEC); 211 u.mesh.mshstats.dropped_frames_no_route, DEC);
214IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC); 212IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
215 213
216/* Mesh parameters */ 214/* Mesh parameters */
217IEEE80211_IF_WFILE(dot11MeshMaxRetries, 215IEEE80211_IF_WFILE(dot11MeshMaxRetries,
218 u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8); 216 u.mesh.mshcfg.dot11MeshMaxRetries, DEC, u8);
219IEEE80211_IF_WFILE(dot11MeshRetryTimeout, 217IEEE80211_IF_WFILE(dot11MeshRetryTimeout,
220 u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16); 218 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC, u16);
221IEEE80211_IF_WFILE(dot11MeshConfirmTimeout, 219IEEE80211_IF_WFILE(dot11MeshConfirmTimeout,
222 u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16); 220 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC, u16);
223IEEE80211_IF_WFILE(dot11MeshHoldingTimeout, 221IEEE80211_IF_WFILE(dot11MeshHoldingTimeout,
224 u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16); 222 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC, u16);
225IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8); 223IEEE80211_IF_WFILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC, u8);
226IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, u8); 224IEEE80211_IF_WFILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC, u8);
227IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks, 225IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks,
228 u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16); 226 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC, u16);
229IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout, 227IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout,
230 u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32); 228 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32);
231IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval, 229IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval,
232 u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16); 230 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16);
233IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime, 231IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime,
234 u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16); 232 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16);
235IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries, 233IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries,
236 u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8); 234 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8);
237IEEE80211_IF_WFILE(path_refresh_time, 235IEEE80211_IF_WFILE(path_refresh_time,
238 u.sta.mshcfg.path_refresh_time, DEC, u32); 236 u.mesh.mshcfg.path_refresh_time, DEC, u32);
239IEEE80211_IF_WFILE(min_discovery_timeout, 237IEEE80211_IF_WFILE(min_discovery_timeout,
240 u.sta.mshcfg.min_discovery_timeout, DEC, u16); 238 u.mesh.mshcfg.min_discovery_timeout, DEC, u16);
241#endif 239#endif
242 240
243 241
@@ -265,7 +263,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
265 DEBUGFS_ADD(auth_alg, sta); 263 DEBUGFS_ADD(auth_alg, sta);
266 DEBUGFS_ADD(auth_transaction, sta); 264 DEBUGFS_ADD(auth_transaction, sta);
267 DEBUGFS_ADD(flags, sta); 265 DEBUGFS_ADD(flags, sta);
268 DEBUGFS_ADD(num_beacons_sta, sta);
269} 266}
270 267
271static void add_ap_files(struct ieee80211_sub_if_data *sdata) 268static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -276,7 +273,6 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
276 273
277 DEBUGFS_ADD(num_sta_ps, ap); 274 DEBUGFS_ADD(num_sta_ps, ap);
278 DEBUGFS_ADD(dtim_count, ap); 275 DEBUGFS_ADD(dtim_count, ap);
279 DEBUGFS_ADD(num_beacons, ap);
280 DEBUGFS_ADD(num_buffered_multicast, ap); 276 DEBUGFS_ADD(num_buffered_multicast, ap);
281} 277}
282 278
@@ -345,26 +341,26 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
345 return; 341 return;
346 342
347 switch (sdata->vif.type) { 343 switch (sdata->vif.type) {
348 case IEEE80211_IF_TYPE_MESH_POINT: 344 case NL80211_IFTYPE_MESH_POINT:
349#ifdef CONFIG_MAC80211_MESH 345#ifdef CONFIG_MAC80211_MESH
350 add_mesh_stats(sdata); 346 add_mesh_stats(sdata);
351 add_mesh_config(sdata); 347 add_mesh_config(sdata);
352#endif 348#endif
353 /* fall through */ 349 break;
354 case IEEE80211_IF_TYPE_STA: 350 case NL80211_IFTYPE_STATION:
355 case IEEE80211_IF_TYPE_IBSS: 351 case NL80211_IFTYPE_ADHOC:
356 add_sta_files(sdata); 352 add_sta_files(sdata);
357 break; 353 break;
358 case IEEE80211_IF_TYPE_AP: 354 case NL80211_IFTYPE_AP:
359 add_ap_files(sdata); 355 add_ap_files(sdata);
360 break; 356 break;
361 case IEEE80211_IF_TYPE_WDS: 357 case NL80211_IFTYPE_WDS:
362 add_wds_files(sdata); 358 add_wds_files(sdata);
363 break; 359 break;
364 case IEEE80211_IF_TYPE_MNTR: 360 case NL80211_IFTYPE_MONITOR:
365 add_monitor_files(sdata); 361 add_monitor_files(sdata);
366 break; 362 break;
367 case IEEE80211_IF_TYPE_VLAN: 363 case NL80211_IFTYPE_AP_VLAN:
368 add_vlan_files(sdata); 364 add_vlan_files(sdata);
369 break; 365 break;
370 default: 366 default:
@@ -398,7 +394,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
398 DEBUGFS_DEL(auth_alg, sta); 394 DEBUGFS_DEL(auth_alg, sta);
399 DEBUGFS_DEL(auth_transaction, sta); 395 DEBUGFS_DEL(auth_transaction, sta);
400 DEBUGFS_DEL(flags, sta); 396 DEBUGFS_DEL(flags, sta);
401 DEBUGFS_DEL(num_beacons_sta, sta);
402} 397}
403 398
404static void del_ap_files(struct ieee80211_sub_if_data *sdata) 399static void del_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -409,7 +404,6 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
409 404
410 DEBUGFS_DEL(num_sta_ps, ap); 405 DEBUGFS_DEL(num_sta_ps, ap);
411 DEBUGFS_DEL(dtim_count, ap); 406 DEBUGFS_DEL(dtim_count, ap);
412 DEBUGFS_DEL(num_beacons, ap);
413 DEBUGFS_DEL(num_buffered_multicast, ap); 407 DEBUGFS_DEL(num_buffered_multicast, ap);
414} 408}
415 409
@@ -482,26 +476,26 @@ static void del_files(struct ieee80211_sub_if_data *sdata)
482 return; 476 return;
483 477
484 switch (sdata->vif.type) { 478 switch (sdata->vif.type) {
485 case IEEE80211_IF_TYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
486#ifdef CONFIG_MAC80211_MESH 480#ifdef CONFIG_MAC80211_MESH
487 del_mesh_stats(sdata); 481 del_mesh_stats(sdata);
488 del_mesh_config(sdata); 482 del_mesh_config(sdata);
489#endif 483#endif
490 /* fall through */ 484 break;
491 case IEEE80211_IF_TYPE_STA: 485 case NL80211_IFTYPE_STATION:
492 case IEEE80211_IF_TYPE_IBSS: 486 case NL80211_IFTYPE_ADHOC:
493 del_sta_files(sdata); 487 del_sta_files(sdata);
494 break; 488 break;
495 case IEEE80211_IF_TYPE_AP: 489 case NL80211_IFTYPE_AP:
496 del_ap_files(sdata); 490 del_ap_files(sdata);
497 break; 491 break;
498 case IEEE80211_IF_TYPE_WDS: 492 case NL80211_IFTYPE_WDS:
499 del_wds_files(sdata); 493 del_wds_files(sdata);
500 break; 494 break;
501 case IEEE80211_IF_TYPE_MNTR: 495 case NL80211_IFTYPE_MONITOR:
502 del_monitor_files(sdata); 496 del_monitor_files(sdata);
503 break; 497 break;
504 case IEEE80211_IF_TYPE_VLAN: 498 case NL80211_IFTYPE_AP_VLAN:
505 del_vlan_files(sdata); 499 del_vlan_files(sdata);
506 break; 500 break;
507 default: 501 default:
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 79a062782d52..81f350eaf8a3 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,7 @@ static const struct file_operations sta_ ##name## _ops = { \
50 STA_READ_##format(name, field) \ 50 STA_READ_##format(name, field) \
51 STA_OPS(name) 51 STA_OPS(name)
52 52
53STA_FILE(aid, aid, D); 53STA_FILE(aid, sta.aid, D);
54STA_FILE(dev, sdata->dev->name, S); 54STA_FILE(dev, sdata->dev->name, S);
55STA_FILE(rx_packets, rx_packets, LU); 55STA_FILE(rx_packets, rx_packets, LU);
56STA_FILE(tx_packets, tx_packets, LU); 56STA_FILE(tx_packets, tx_packets, LU);
@@ -176,7 +176,7 @@ static ssize_t sta_agg_status_write(struct file *file,
176 struct net_device *dev = sta->sdata->dev; 176 struct net_device *dev = sta->sdata->dev;
177 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 177 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
178 struct ieee80211_hw *hw = &local->hw; 178 struct ieee80211_hw *hw = &local->hw;
179 u8 *da = sta->addr; 179 u8 *da = sta->sta.addr;
180 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0, 180 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0,
181 0, 0, 0, 0, 0, 0, 0, 0}; 181 0, 0, 0, 0, 0, 0, 0, 0};
182 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1, 182 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1,
@@ -201,7 +201,7 @@ static ssize_t sta_agg_status_write(struct file *file,
201 tid_num = tid_num - 100; 201 tid_num = tid_num - 100;
202 if (tid_static_rx[tid_num] == 1) { 202 if (tid_static_rx[tid_num] == 1) {
203 strcpy(state, "off "); 203 strcpy(state, "off ");
204 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, 204 ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0,
205 WLAN_REASON_QSTA_REQUIRE_SETUP); 205 WLAN_REASON_QSTA_REQUIRE_SETUP);
206 sta->ampdu_mlme.tid_state_rx[tid_num] |= 206 sta->ampdu_mlme.tid_state_rx[tid_num] |=
207 HT_AGG_STATE_DEBUGFS_CTL; 207 HT_AGG_STATE_DEBUGFS_CTL;
@@ -253,7 +253,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
253 if (!stations_dir) 253 if (!stations_dir)
254 return; 254 return;
255 255
256 mac = print_mac(mbuf, sta->addr); 256 mac = print_mac(mbuf, sta->sta.addr);
257 257
258 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); 258 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
259 if (!sta->debugfs.dir) 259 if (!sta->debugfs.dir)
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 2280f40b4560..8de60de70bc9 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -8,7 +8,6 @@
8 * mac80211 - events 8 * mac80211 - events
9 */ 9 */
10 10
11#include <linux/netdevice.h>
12#include <net/iw_handler.h> 11#include <net/iw_handler.h>
13#include "ieee80211_i.h" 12#include "ieee80211_i.h"
14 13
@@ -17,7 +16,7 @@
17 * (in the variable hdr) must be long enough to extract the TKIP 16 * (in the variable hdr) must be long enough to extract the TKIP
18 * fields like TSC 17 * fields like TSC
19 */ 18 */
20void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
21 struct ieee80211_hdr *hdr) 20 struct ieee80211_hdr *hdr)
22{ 21{
23 union iwreq_data wrqu; 22 union iwreq_data wrqu;
@@ -32,7 +31,7 @@ void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
32 print_mac(mac, hdr->addr2)); 31 print_mac(mac, hdr->addr2));
33 memset(&wrqu, 0, sizeof(wrqu)); 32 memset(&wrqu, 0, sizeof(wrqu));
34 wrqu.data.length = strlen(buf); 33 wrqu.data.length = strlen(buf);
35 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 34 wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
36 kfree(buf); 35 kfree(buf);
37 } 36 }
38 37
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
new file mode 100644
index 000000000000..dc7d9a3d70d5
--- /dev/null
+++ b/net/mac80211/ht.c
@@ -0,0 +1,992 @@
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
17#include <net/wireless.h>
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
20#include "sta_info.h"
21#include "wme.h"
22
23int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
24 struct ieee80211_ht_info *ht_info)
25{
26
27 if (ht_info == NULL)
28 return -EINVAL;
29
30 memset(ht_info, 0, sizeof(*ht_info));
31
32 if (ht_cap_ie) {
33 u8 ampdu_info = ht_cap_ie->ampdu_params_info;
34
35 ht_info->ht_supported = 1;
36 ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
37 ht_info->ampdu_factor =
38 ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
39 ht_info->ampdu_density =
40 (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
41 memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
42 } else
43 ht_info->ht_supported = 0;
44
45 return 0;
46}
47
48int ieee80211_ht_addt_info_ie_to_ht_bss_info(
49 struct ieee80211_ht_addt_info *ht_add_info_ie,
50 struct ieee80211_ht_bss_info *bss_info)
51{
52 if (bss_info == NULL)
53 return -EINVAL;
54
55 memset(bss_info, 0, sizeof(*bss_info));
56
57 if (ht_add_info_ie) {
58 u16 op_mode;
59 op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
60
61 bss_info->primary_channel = ht_add_info_ie->control_chan;
62 bss_info->bss_cap = ht_add_info_ie->ht_param;
63 bss_info->bss_op_mode = (u8)(op_mode & 0xff);
64 }
65
66 return 0;
67}
68
69static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
70 const u8 *da, u16 tid,
71 u8 dialog_token, u16 start_seq_num,
72 u16 agg_size, u16 timeout)
73{
74 struct ieee80211_local *local = sdata->local;
75 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
76 struct sk_buff *skb;
77 struct ieee80211_mgmt *mgmt;
78 u16 capab;
79
80 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
81
82 if (!skb) {
83 printk(KERN_ERR "%s: failed to allocate buffer "
84 "for addba request frame\n", sdata->dev->name);
85 return;
86 }
87 skb_reserve(skb, local->hw.extra_tx_headroom);
88 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
89 memset(mgmt, 0, 24);
90 memcpy(mgmt->da, da, ETH_ALEN);
91 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
92 if (sdata->vif.type == NL80211_IFTYPE_AP)
93 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
94 else
95 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
96
97 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
98 IEEE80211_STYPE_ACTION);
99
100 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
101
102 mgmt->u.action.category = WLAN_CATEGORY_BACK;
103 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
104
105 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
106 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
107 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
108 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
109
110 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
111
112 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
113 mgmt->u.action.u.addba_req.start_seq_num =
114 cpu_to_le16(start_seq_num << 4);
115
116 ieee80211_tx_skb(sdata, skb, 0);
117}
118
119static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
120 u8 dialog_token, u16 status, u16 policy,
121 u16 buf_size, u16 timeout)
122{
123 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
124 struct ieee80211_local *local = sdata->local;
125 struct sk_buff *skb;
126 struct ieee80211_mgmt *mgmt;
127 u16 capab;
128
129 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
130
131 if (!skb) {
132 printk(KERN_DEBUG "%s: failed to allocate buffer "
133 "for addba resp frame\n", sdata->dev->name);
134 return;
135 }
136
137 skb_reserve(skb, local->hw.extra_tx_headroom);
138 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
139 memset(mgmt, 0, 24);
140 memcpy(mgmt->da, da, ETH_ALEN);
141 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
142 if (sdata->vif.type == NL80211_IFTYPE_AP)
143 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
144 else
145 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
146 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
147 IEEE80211_STYPE_ACTION);
148
149 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
150 mgmt->u.action.category = WLAN_CATEGORY_BACK;
151 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
152 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
153
154 capab = (u16)(policy << 1); /* bit 1 aggregation policy */
155 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
156 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
157
158 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
159 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
160 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
161
162 ieee80211_tx_skb(sdata, skb, 0);
163}
164
165static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
166 const u8 *da, u16 tid,
167 u16 initiator, u16 reason_code)
168{
169 struct ieee80211_local *local = sdata->local;
170 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
171 struct sk_buff *skb;
172 struct ieee80211_mgmt *mgmt;
173 u16 params;
174
175 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
176
177 if (!skb) {
178 printk(KERN_ERR "%s: failed to allocate buffer "
179 "for delba frame\n", sdata->dev->name);
180 return;
181 }
182
183 skb_reserve(skb, local->hw.extra_tx_headroom);
184 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
185 memset(mgmt, 0, 24);
186 memcpy(mgmt->da, da, ETH_ALEN);
187 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
188 if (sdata->vif.type == NL80211_IFTYPE_AP)
189 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
190 else
191 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
193 IEEE80211_STYPE_ACTION);
194
195 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
196
197 mgmt->u.action.category = WLAN_CATEGORY_BACK;
198 mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
199 params = (u16)(initiator << 11); /* bit 11 initiator */
200 params |= (u16)(tid << 12); /* bit 15:12 TID number */
201
202 mgmt->u.action.u.delba.params = cpu_to_le16(params);
203 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
204
205 ieee80211_tx_skb(sdata, skb, 0);
206}
207
208void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
209{
210 struct ieee80211_local *local = sdata->local;
211 struct sk_buff *skb;
212 struct ieee80211_bar *bar;
213 u16 bar_control = 0;
214
215 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
216 if (!skb) {
217 printk(KERN_ERR "%s: failed to allocate buffer for "
218 "bar frame\n", sdata->dev->name);
219 return;
220 }
221 skb_reserve(skb, local->hw.extra_tx_headroom);
222 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
223 memset(bar, 0, sizeof(*bar));
224 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
225 IEEE80211_STYPE_BACK_REQ);
226 memcpy(bar->ra, ra, ETH_ALEN);
227 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
228 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
229 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
230 bar_control |= (u16)(tid << 12);
231 bar->control = cpu_to_le16(bar_control);
232 bar->start_seq_num = cpu_to_le16(ssn);
233
234 ieee80211_tx_skb(sdata, skb, 0);
235}
236
237void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
238 u16 initiator, u16 reason)
239{
240 struct ieee80211_local *local = sdata->local;
241 struct ieee80211_hw *hw = &local->hw;
242 struct sta_info *sta;
243 int ret, i;
244 DECLARE_MAC_BUF(mac);
245
246 rcu_read_lock();
247
248 sta = sta_info_get(local, ra);
249 if (!sta) {
250 rcu_read_unlock();
251 return;
252 }
253
254 /* check if TID is in operational state */
255 spin_lock_bh(&sta->lock);
256 if (sta->ampdu_mlme.tid_state_rx[tid]
257 != HT_AGG_STATE_OPERATIONAL) {
258 spin_unlock_bh(&sta->lock);
259 rcu_read_unlock();
260 return;
261 }
262 sta->ampdu_mlme.tid_state_rx[tid] =
263 HT_AGG_STATE_REQ_STOP_BA_MSK |
264 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
265 spin_unlock_bh(&sta->lock);
266
267 /* stop HW Rx aggregation. ampdu_action existence
268 * already verified in session init so we add the BUG_ON */
269 BUG_ON(!local->ops->ampdu_action);
270
271#ifdef CONFIG_MAC80211_HT_DEBUG
272 printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
273 print_mac(mac, ra), tid);
274#endif /* CONFIG_MAC80211_HT_DEBUG */
275
276 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
277 &sta->sta, tid, NULL);
278 if (ret)
279 printk(KERN_DEBUG "HW problem - can not stop rx "
280 "aggregation for tid %d\n", tid);
281
282 /* shutdown timer has not expired */
283 if (initiator != WLAN_BACK_TIMER)
284 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
285
286 /* check if this is a self generated aggregation halt */
287 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
288 ieee80211_send_delba(sdata, ra, tid, 0, reason);
289
290 /* free the reordering buffer */
291 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
292 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
293 /* release the reordered frames */
294 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
295 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
296 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
297 }
298 }
299 /* free resources */
300 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
301 kfree(sta->ampdu_mlme.tid_rx[tid]);
302 sta->ampdu_mlme.tid_rx[tid] = NULL;
303 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
304
305 rcu_read_unlock();
306}
307
308
309/*
310 * After sending add Block Ack request we activated a timer until
311 * add Block Ack response will arrive from the recipient.
312 * If this timer expires sta_addba_resp_timer_expired will be executed.
313 */
314static void sta_addba_resp_timer_expired(unsigned long data)
315{
316 /* not an elegant detour, but there is no choice as the timer passes
317 * only one argument, and both sta_info and TID are needed, so init
318 * flow in sta_info_create gives the TID as data, while the timer_to_id
319 * array gives the sta through container_of */
320 u16 tid = *(u8 *)data;
321 struct sta_info *temp_sta = container_of((void *)data,
322 struct sta_info, timer_to_tid[tid]);
323
324 struct ieee80211_local *local = temp_sta->local;
325 struct ieee80211_hw *hw = &local->hw;
326 struct sta_info *sta;
327 u8 *state;
328
329 rcu_read_lock();
330
331 sta = sta_info_get(local, temp_sta->sta.addr);
332 if (!sta) {
333 rcu_read_unlock();
334 return;
335 }
336
337 state = &sta->ampdu_mlme.tid_state_tx[tid];
338 /* check if the TID waits for addBA response */
339 spin_lock_bh(&sta->lock);
340 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
341 spin_unlock_bh(&sta->lock);
342 *state = HT_AGG_STATE_IDLE;
343#ifdef CONFIG_MAC80211_HT_DEBUG
344 printk(KERN_DEBUG "timer expired on tid %d but we are not "
345 "expecting addBA response there", tid);
346#endif
347 goto timer_expired_exit;
348 }
349
350#ifdef CONFIG_MAC80211_HT_DEBUG
351 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
352#endif
353
354 /* go through the state check in stop_BA_session */
355 *state = HT_AGG_STATE_OPERATIONAL;
356 spin_unlock_bh(&sta->lock);
357 ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid,
358 WLAN_BACK_INITIATOR);
359
360timer_expired_exit:
361 rcu_read_unlock();
362}
363
364void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr)
365{
366 struct ieee80211_local *local = sdata->local;
367 int i;
368
369 for (i = 0; i < STA_TID_NUM; i++) {
370 ieee80211_stop_tx_ba_session(&local->hw, addr, i,
371 WLAN_BACK_INITIATOR);
372 ieee80211_sta_stop_rx_ba_session(sdata, addr, i,
373 WLAN_BACK_RECIPIENT,
374 WLAN_REASON_QSTA_LEAVE_QBSS);
375 }
376}
377
378int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
379{
380 struct ieee80211_local *local = hw_to_local(hw);
381 struct sta_info *sta;
382 struct ieee80211_sub_if_data *sdata;
383 u16 start_seq_num;
384 u8 *state;
385 int ret;
386 DECLARE_MAC_BUF(mac);
387
388 if (tid >= STA_TID_NUM)
389 return -EINVAL;
390
391#ifdef CONFIG_MAC80211_HT_DEBUG
392 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
393 print_mac(mac, ra), tid);
394#endif /* CONFIG_MAC80211_HT_DEBUG */
395
396 rcu_read_lock();
397
398 sta = sta_info_get(local, ra);
399 if (!sta) {
400#ifdef CONFIG_MAC80211_HT_DEBUG
401 printk(KERN_DEBUG "Could not find the station\n");
402#endif
403 ret = -ENOENT;
404 goto exit;
405 }
406
407 spin_lock_bh(&sta->lock);
408
409 /* we have tried too many times, receiver does not want A-MPDU */
410 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
411 ret = -EBUSY;
412 goto err_unlock_sta;
413 }
414
415 state = &sta->ampdu_mlme.tid_state_tx[tid];
416 /* check if the TID is not in aggregation flow already */
417 if (*state != HT_AGG_STATE_IDLE) {
418#ifdef CONFIG_MAC80211_HT_DEBUG
419 printk(KERN_DEBUG "BA request denied - session is not "
420 "idle on tid %u\n", tid);
421#endif /* CONFIG_MAC80211_HT_DEBUG */
422 ret = -EAGAIN;
423 goto err_unlock_sta;
424 }
425
426 /* prepare A-MPDU MLME for Tx aggregation */
427 sta->ampdu_mlme.tid_tx[tid] =
428 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
429 if (!sta->ampdu_mlme.tid_tx[tid]) {
430#ifdef CONFIG_MAC80211_HT_DEBUG
431 if (net_ratelimit())
432 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
433 tid);
434#endif
435 ret = -ENOMEM;
436 goto err_unlock_sta;
437 }
438 /* Tx timer */
439 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
440 sta_addba_resp_timer_expired;
441 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
442 (unsigned long)&sta->timer_to_tid[tid];
443 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
444
445 /* create a new queue for this aggregation */
446 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
447
448 /* case no queue is available to aggregation
449 * don't switch to aggregation */
450 if (ret) {
451#ifdef CONFIG_MAC80211_HT_DEBUG
452 printk(KERN_DEBUG "BA request denied - queue unavailable for"
453 " tid %d\n", tid);
454#endif /* CONFIG_MAC80211_HT_DEBUG */
455 goto err_unlock_queue;
456 }
457 sdata = sta->sdata;
458
459 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
460 * call back right away, it must see that the flow has begun */
461 *state |= HT_ADDBA_REQUESTED_MSK;
462
463 /* This is slightly racy because the queue isn't stopped */
464 start_seq_num = sta->tid_seq[tid];
465
466 if (local->ops->ampdu_action)
467 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
468 &sta->sta, tid, &start_seq_num);
469
470 if (ret) {
471 /* No need to requeue the packets in the agg queue, since we
472 * held the tx lock: no packet could be enqueued to the newly
473 * allocated queue */
474 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
475#ifdef CONFIG_MAC80211_HT_DEBUG
476 printk(KERN_DEBUG "BA request denied - HW unavailable for"
477 " tid %d\n", tid);
478#endif /* CONFIG_MAC80211_HT_DEBUG */
479 *state = HT_AGG_STATE_IDLE;
480 goto err_unlock_queue;
481 }
482
483 /* Will put all the packets in the new SW queue */
484 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
485 spin_unlock_bh(&sta->lock);
486
487 /* send an addBA request */
488 sta->ampdu_mlme.dialog_token_allocator++;
489 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
490 sta->ampdu_mlme.dialog_token_allocator;
491 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
492
493
494 ieee80211_send_addba_request(sta->sdata, ra, tid,
495 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
496 sta->ampdu_mlme.tid_tx[tid]->ssn,
497 0x40, 5000);
498 /* activate the timer for the recipient's addBA response */
499 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
500 jiffies + ADDBA_RESP_INTERVAL;
501 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
502#ifdef CONFIG_MAC80211_HT_DEBUG
503 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
504#endif
505 goto exit;
506
507err_unlock_queue:
508 kfree(sta->ampdu_mlme.tid_tx[tid]);
509 sta->ampdu_mlme.tid_tx[tid] = NULL;
510 ret = -EBUSY;
511err_unlock_sta:
512 spin_unlock_bh(&sta->lock);
513exit:
514 rcu_read_unlock();
515 return ret;
516}
517EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
518
519int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
520 u8 *ra, u16 tid,
521 enum ieee80211_back_parties initiator)
522{
523 struct ieee80211_local *local = hw_to_local(hw);
524 struct sta_info *sta;
525 u8 *state;
526 int ret = 0;
527 DECLARE_MAC_BUF(mac);
528
529 if (tid >= STA_TID_NUM)
530 return -EINVAL;
531
532 rcu_read_lock();
533 sta = sta_info_get(local, ra);
534 if (!sta) {
535 rcu_read_unlock();
536 return -ENOENT;
537 }
538
539 /* check if the TID is in aggregation */
540 state = &sta->ampdu_mlme.tid_state_tx[tid];
541 spin_lock_bh(&sta->lock);
542
543 if (*state != HT_AGG_STATE_OPERATIONAL) {
544 ret = -ENOENT;
545 goto stop_BA_exit;
546 }
547
548#ifdef CONFIG_MAC80211_HT_DEBUG
549 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
550 print_mac(mac, ra), tid);
551#endif /* CONFIG_MAC80211_HT_DEBUG */
552
553 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
554
555 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
556 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
557
558 if (local->ops->ampdu_action)
559 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
560 &sta->sta, tid, NULL);
561
562 /* case HW denied going back to legacy */
563 if (ret) {
564 WARN_ON(ret != -EBUSY);
565 *state = HT_AGG_STATE_OPERATIONAL;
566 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
567 goto stop_BA_exit;
568 }
569
570stop_BA_exit:
571 spin_unlock_bh(&sta->lock);
572 rcu_read_unlock();
573 return ret;
574}
575EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
576
577void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
578{
579 struct ieee80211_local *local = hw_to_local(hw);
580 struct sta_info *sta;
581 u8 *state;
582 DECLARE_MAC_BUF(mac);
583
584 if (tid >= STA_TID_NUM) {
585#ifdef CONFIG_MAC80211_HT_DEBUG
586 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
587 tid, STA_TID_NUM);
588#endif
589 return;
590 }
591
592 rcu_read_lock();
593 sta = sta_info_get(local, ra);
594 if (!sta) {
595 rcu_read_unlock();
596#ifdef CONFIG_MAC80211_HT_DEBUG
597 printk(KERN_DEBUG "Could not find station: %s\n",
598 print_mac(mac, ra));
599#endif
600 return;
601 }
602
603 state = &sta->ampdu_mlme.tid_state_tx[tid];
604 spin_lock_bh(&sta->lock);
605
606 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
607#ifdef CONFIG_MAC80211_HT_DEBUG
608 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
609 *state);
610#endif
611 spin_unlock_bh(&sta->lock);
612 rcu_read_unlock();
613 return;
614 }
615
616 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
617
618 *state |= HT_ADDBA_DRV_READY_MSK;
619
620 if (*state == HT_AGG_STATE_OPERATIONAL) {
621#ifdef CONFIG_MAC80211_HT_DEBUG
622 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
623#endif
624 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
625 }
626 spin_unlock_bh(&sta->lock);
627 rcu_read_unlock();
628}
629EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
630
631void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
632{
633 struct ieee80211_local *local = hw_to_local(hw);
634 struct sta_info *sta;
635 u8 *state;
636 int agg_queue;
637 DECLARE_MAC_BUF(mac);
638
639 if (tid >= STA_TID_NUM) {
640#ifdef CONFIG_MAC80211_HT_DEBUG
641 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
642 tid, STA_TID_NUM);
643#endif
644 return;
645 }
646
647#ifdef CONFIG_MAC80211_HT_DEBUG
648 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
649 print_mac(mac, ra), tid);
650#endif /* CONFIG_MAC80211_HT_DEBUG */
651
652 rcu_read_lock();
653 sta = sta_info_get(local, ra);
654 if (!sta) {
655#ifdef CONFIG_MAC80211_HT_DEBUG
656 printk(KERN_DEBUG "Could not find station: %s\n",
657 print_mac(mac, ra));
658#endif
659 rcu_read_unlock();
660 return;
661 }
662 state = &sta->ampdu_mlme.tid_state_tx[tid];
663
664 /* NOTE: no need to use sta->lock in this state check, as
665 * ieee80211_stop_tx_ba_session will let only one stop call to
666 * pass through per sta/tid
667 */
668 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
669#ifdef CONFIG_MAC80211_HT_DEBUG
670 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
671#endif
672 rcu_read_unlock();
673 return;
674 }
675
676 if (*state & HT_AGG_STATE_INITIATOR_MSK)
677 ieee80211_send_delba(sta->sdata, ra, tid,
678 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
679
680 agg_queue = sta->tid_to_tx_q[tid];
681
682 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
683
684 /* We just requeued the all the frames that were in the
685 * removed queue, and since we might miss a softirq we do
686 * netif_schedule_queue. ieee80211_wake_queue is not used
687 * here as this queue is not necessarily stopped
688 */
689 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
690 spin_lock_bh(&sta->lock);
691 *state = HT_AGG_STATE_IDLE;
692 sta->ampdu_mlme.addba_req_num[tid] = 0;
693 kfree(sta->ampdu_mlme.tid_tx[tid]);
694 sta->ampdu_mlme.tid_tx[tid] = NULL;
695 spin_unlock_bh(&sta->lock);
696
697 rcu_read_unlock();
698}
699EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
700
701void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
702 const u8 *ra, u16 tid)
703{
704 struct ieee80211_local *local = hw_to_local(hw);
705 struct ieee80211_ra_tid *ra_tid;
706 struct sk_buff *skb = dev_alloc_skb(0);
707
708 if (unlikely(!skb)) {
709#ifdef CONFIG_MAC80211_HT_DEBUG
710 if (net_ratelimit())
711 printk(KERN_WARNING "%s: Not enough memory, "
712 "dropping start BA session", skb->dev->name);
713#endif
714 return;
715 }
716 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
717 memcpy(&ra_tid->ra, ra, ETH_ALEN);
718 ra_tid->tid = tid;
719
720 skb->pkt_type = IEEE80211_ADDBA_MSG;
721 skb_queue_tail(&local->skb_queue, skb);
722 tasklet_schedule(&local->tasklet);
723}
724EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
725
726void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
727 const u8 *ra, u16 tid)
728{
729 struct ieee80211_local *local = hw_to_local(hw);
730 struct ieee80211_ra_tid *ra_tid;
731 struct sk_buff *skb = dev_alloc_skb(0);
732
733 if (unlikely(!skb)) {
734#ifdef CONFIG_MAC80211_HT_DEBUG
735 if (net_ratelimit())
736 printk(KERN_WARNING "%s: Not enough memory, "
737 "dropping stop BA session", skb->dev->name);
738#endif
739 return;
740 }
741 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
742 memcpy(&ra_tid->ra, ra, ETH_ALEN);
743 ra_tid->tid = tid;
744
745 skb->pkt_type = IEEE80211_DELBA_MSG;
746 skb_queue_tail(&local->skb_queue, skb);
747 tasklet_schedule(&local->tasklet);
748}
749EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
750
751/*
752 * After accepting the AddBA Request we activated a timer,
753 * resetting it after each frame that arrives from the originator.
754 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
755 */
756static void sta_rx_agg_session_timer_expired(unsigned long data)
757{
758 /* not an elegant detour, but there is no choice as the timer passes
759 * only one argument, and various sta_info are needed here, so init
760 * flow in sta_info_create gives the TID as data, while the timer_to_id
761 * array gives the sta through container_of */
762 u8 *ptid = (u8 *)data;
763 u8 *timer_to_id = ptid - *ptid;
764 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
765 timer_to_tid[0]);
766
767#ifdef CONFIG_MAC80211_HT_DEBUG
768 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
769#endif
770 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
771 (u16)*ptid, WLAN_BACK_TIMER,
772 WLAN_REASON_QSTA_TIMEOUT);
773}
774
775void ieee80211_process_addba_request(struct ieee80211_local *local,
776 struct sta_info *sta,
777 struct ieee80211_mgmt *mgmt,
778 size_t len)
779{
780 struct ieee80211_hw *hw = &local->hw;
781 struct ieee80211_conf *conf = &hw->conf;
782 struct tid_ampdu_rx *tid_agg_rx;
783 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
784 u8 dialog_token;
785 int ret = -EOPNOTSUPP;
786 DECLARE_MAC_BUF(mac);
787
788 /* extract session parameters from addba request frame */
789 dialog_token = mgmt->u.action.u.addba_req.dialog_token;
790 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
791 start_seq_num =
792 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
793
794 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
795 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
796 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
797 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
798
799 status = WLAN_STATUS_REQUEST_DECLINED;
800
801 /* sanity check for incoming parameters:
802 * check if configuration can support the BA policy
803 * and if buffer size does not exceeds max value */
804 if (((ba_policy != 1)
805 && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
806 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
807 status = WLAN_STATUS_INVALID_QOS_PARAM;
808#ifdef CONFIG_MAC80211_HT_DEBUG
809 if (net_ratelimit())
810 printk(KERN_DEBUG "AddBA Req with bad params from "
811 "%s on tid %u. policy %d, buffer size %d\n",
812 print_mac(mac, mgmt->sa), tid, ba_policy,
813 buf_size);
814#endif /* CONFIG_MAC80211_HT_DEBUG */
815 goto end_no_lock;
816 }
817 /* determine default buffer size */
818 if (buf_size == 0) {
819 struct ieee80211_supported_band *sband;
820
821 sband = local->hw.wiphy->bands[conf->channel->band];
822 buf_size = IEEE80211_MIN_AMPDU_BUF;
823 buf_size = buf_size << sband->ht_info.ampdu_factor;
824 }
825
826
827 /* examine state machine */
828 spin_lock_bh(&sta->lock);
829
830 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
831#ifdef CONFIG_MAC80211_HT_DEBUG
832 if (net_ratelimit())
833 printk(KERN_DEBUG "unexpected AddBA Req from "
834 "%s on tid %u\n",
835 print_mac(mac, mgmt->sa), tid);
836#endif /* CONFIG_MAC80211_HT_DEBUG */
837 goto end;
838 }
839
840 /* prepare A-MPDU MLME for Rx aggregation */
841 sta->ampdu_mlme.tid_rx[tid] =
842 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
843 if (!sta->ampdu_mlme.tid_rx[tid]) {
844#ifdef CONFIG_MAC80211_HT_DEBUG
845 if (net_ratelimit())
846 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
847 tid);
848#endif
849 goto end;
850 }
851 /* rx timer */
852 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
853 sta_rx_agg_session_timer_expired;
854 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
855 (unsigned long)&sta->timer_to_tid[tid];
856 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
857
858 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
859
860 /* prepare reordering buffer */
861 tid_agg_rx->reorder_buf =
862 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
863 if (!tid_agg_rx->reorder_buf) {
864#ifdef CONFIG_MAC80211_HT_DEBUG
865 if (net_ratelimit())
866 printk(KERN_ERR "can not allocate reordering buffer "
867 "to tid %d\n", tid);
868#endif
869 kfree(sta->ampdu_mlme.tid_rx[tid]);
870 goto end;
871 }
872 memset(tid_agg_rx->reorder_buf, 0,
873 buf_size * sizeof(struct sk_buff *));
874
875 if (local->ops->ampdu_action)
876 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
877 &sta->sta, tid, &start_seq_num);
878#ifdef CONFIG_MAC80211_HT_DEBUG
879 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
880#endif /* CONFIG_MAC80211_HT_DEBUG */
881
882 if (ret) {
883 kfree(tid_agg_rx->reorder_buf);
884 kfree(tid_agg_rx);
885 sta->ampdu_mlme.tid_rx[tid] = NULL;
886 goto end;
887 }
888
889 /* change state and send addba resp */
890 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
891 tid_agg_rx->dialog_token = dialog_token;
892 tid_agg_rx->ssn = start_seq_num;
893 tid_agg_rx->head_seq_num = start_seq_num;
894 tid_agg_rx->buf_size = buf_size;
895 tid_agg_rx->timeout = timeout;
896 tid_agg_rx->stored_mpdu_num = 0;
897 status = WLAN_STATUS_SUCCESS;
898end:
899 spin_unlock_bh(&sta->lock);
900
901end_no_lock:
902 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
903 dialog_token, status, 1, buf_size, timeout);
904}
905
906void ieee80211_process_addba_resp(struct ieee80211_local *local,
907 struct sta_info *sta,
908 struct ieee80211_mgmt *mgmt,
909 size_t len)
910{
911 struct ieee80211_hw *hw = &local->hw;
912 u16 capab;
913 u16 tid;
914 u8 *state;
915
916 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
917 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
918
919 state = &sta->ampdu_mlme.tid_state_tx[tid];
920
921 spin_lock_bh(&sta->lock);
922
923 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
924 spin_unlock_bh(&sta->lock);
925 return;
926 }
927
928 if (mgmt->u.action.u.addba_resp.dialog_token !=
929 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
930 spin_unlock_bh(&sta->lock);
931#ifdef CONFIG_MAC80211_HT_DEBUG
932 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
933#endif /* CONFIG_MAC80211_HT_DEBUG */
934 return;
935 }
936
937 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
938#ifdef CONFIG_MAC80211_HT_DEBUG
939 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
940#endif /* CONFIG_MAC80211_HT_DEBUG */
941 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
942 == WLAN_STATUS_SUCCESS) {
943 *state |= HT_ADDBA_RECEIVED_MSK;
944 sta->ampdu_mlme.addba_req_num[tid] = 0;
945
946 if (*state == HT_AGG_STATE_OPERATIONAL)
947 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
948
949 spin_unlock_bh(&sta->lock);
950 } else {
951 sta->ampdu_mlme.addba_req_num[tid]++;
952 /* this will allow the state check in stop_BA_session */
953 *state = HT_AGG_STATE_OPERATIONAL;
954 spin_unlock_bh(&sta->lock);
955 ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid,
956 WLAN_BACK_INITIATOR);
957 }
958}
959
960void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
961 struct sta_info *sta,
962 struct ieee80211_mgmt *mgmt, size_t len)
963{
964 struct ieee80211_local *local = sdata->local;
965 u16 tid, params;
966 u16 initiator;
967 DECLARE_MAC_BUF(mac);
968
969 params = le16_to_cpu(mgmt->u.action.u.delba.params);
970 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
971 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
972
973#ifdef CONFIG_MAC80211_HT_DEBUG
974 if (net_ratelimit())
975 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
976 print_mac(mac, mgmt->sa),
977 initiator ? "initiator" : "recipient", tid,
978 mgmt->u.action.u.delba.reason_code);
979#endif /* CONFIG_MAC80211_HT_DEBUG */
980
981 if (initiator == WLAN_BACK_INITIATOR)
982 ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid,
983 WLAN_BACK_INITIATOR, 0);
984 else { /* WLAN_BACK_RECIPIENT */
985 spin_lock_bh(&sta->lock);
986 sta->ampdu_mlme.tid_state_tx[tid] =
987 HT_AGG_STATE_OPERATIONAL;
988 spin_unlock_bh(&sta->lock);
989 ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
990 WLAN_BACK_RECIPIENT);
991 }
992}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 586a9b49b0fc..3912fba6d3d0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -29,17 +29,6 @@
29#include "key.h" 29#include "key.h"
30#include "sta_info.h" 30#include "sta_info.h"
31 31
32/* ieee80211.o internal definitions, etc. These are not included into
33 * low-level drivers. */
34
35#ifndef ETH_P_PAE
36#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
37#endif /* ETH_P_PAE */
38
39#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08)
40
41#define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype)
42
43struct ieee80211_local; 32struct ieee80211_local;
44 33
45/* Maximum number of broadcast/multicast frames to buffer when some of the 34/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -61,6 +50,12 @@ struct ieee80211_local;
61 * increased memory use (about 2 kB of RAM per entry). */ 50 * increased memory use (about 2 kB of RAM per entry). */
62#define IEEE80211_FRAGMENT_MAX 4 51#define IEEE80211_FRAGMENT_MAX 4
63 52
53/*
54 * Time after which we ignore scan results and no longer report/use
55 * them in any way.
56 */
57#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
58
64struct ieee80211_fragment_entry { 59struct ieee80211_fragment_entry {
65 unsigned long first_frag_time; 60 unsigned long first_frag_time;
66 unsigned int seq; 61 unsigned int seq;
@@ -73,9 +68,9 @@ struct ieee80211_fragment_entry {
73}; 68};
74 69
75 70
76struct ieee80211_sta_bss { 71struct ieee80211_bss {
77 struct list_head list; 72 struct list_head list;
78 struct ieee80211_sta_bss *hnext; 73 struct ieee80211_bss *hnext;
79 size_t ssid_len; 74 size_t ssid_len;
80 75
81 atomic_t users; 76 atomic_t users;
@@ -87,16 +82,11 @@ struct ieee80211_sta_bss {
87 enum ieee80211_band band; 82 enum ieee80211_band band;
88 int freq; 83 int freq;
89 int signal, noise, qual; 84 int signal, noise, qual;
90 u8 *wpa_ie; 85 u8 *ies; /* all information elements from the last Beacon or Probe
91 size_t wpa_ie_len; 86 * Response frames; note Beacon frame is not allowed to
92 u8 *rsn_ie; 87 * override values from Probe Response */
93 size_t rsn_ie_len; 88 size_t ies_len;
94 u8 *wmm_ie; 89 bool wmm_used;
95 size_t wmm_ie_len;
96 u8 *ht_ie;
97 size_t ht_ie_len;
98 u8 *ht_add_ie;
99 size_t ht_add_ie_len;
100#ifdef CONFIG_MAC80211_MESH 90#ifdef CONFIG_MAC80211_MESH
101 u8 *mesh_id; 91 u8 *mesh_id;
102 size_t mesh_id_len; 92 size_t mesh_id_len;
@@ -108,7 +98,7 @@ struct ieee80211_sta_bss {
108 u64 timestamp; 98 u64 timestamp;
109 int beacon_int; 99 int beacon_int;
110 100
111 bool probe_resp; 101 unsigned long last_probe_resp;
112 unsigned long last_update; 102 unsigned long last_update;
113 103
114 /* during assocation, we save an ERP value from a probe response so 104 /* during assocation, we save an ERP value from a probe response so
@@ -119,7 +109,7 @@ struct ieee80211_sta_bss {
119 u8 erp_value; 109 u8 erp_value;
120}; 110};
121 111
122static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss) 112static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
123{ 113{
124#ifdef CONFIG_MAC80211_MESH 114#ifdef CONFIG_MAC80211_MESH
125 return bss->mesh_cfg; 115 return bss->mesh_cfg;
@@ -127,7 +117,7 @@ static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss)
127 return NULL; 117 return NULL;
128} 118}
129 119
130static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss) 120static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
131{ 121{
132#ifdef CONFIG_MAC80211_MESH 122#ifdef CONFIG_MAC80211_MESH
133 return bss->mesh_id; 123 return bss->mesh_id;
@@ -135,7 +125,7 @@ static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss)
135 return NULL; 125 return NULL;
136} 126}
137 127
138static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss) 128static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
139{ 129{
140#ifdef CONFIG_MAC80211_MESH 130#ifdef CONFIG_MAC80211_MESH
141 return bss->mesh_id_len; 131 return bss->mesh_id_len;
@@ -174,7 +164,7 @@ struct ieee80211_tx_data {
174 struct sk_buff **extra_frag; 164 struct sk_buff **extra_frag;
175 int num_extra_frag; 165 int num_extra_frag;
176 166
177 u16 fc, ethertype; 167 u16 ethertype;
178 unsigned int flags; 168 unsigned int flags;
179}; 169};
180 170
@@ -202,7 +192,7 @@ struct ieee80211_rx_data {
202 struct ieee80211_rx_status *status; 192 struct ieee80211_rx_status *status;
203 struct ieee80211_rate *rate; 193 struct ieee80211_rate *rate;
204 194
205 u16 fc, ethertype; 195 u16 ethertype;
206 unsigned int flags; 196 unsigned int flags;
207 int sent_ps_buffered; 197 int sent_ps_buffered;
208 int queue; 198 int queue;
@@ -239,7 +229,6 @@ struct ieee80211_if_ap {
239 struct sk_buff_head ps_bc_buf; 229 struct sk_buff_head ps_bc_buf;
240 atomic_t num_sta_ps; /* number of stations in PS mode */ 230 atomic_t num_sta_ps; /* number of stations in PS mode */
241 int dtim_count; 231 int dtim_count;
242 int num_beacons; /* number of TXed beacon frames for this BSS */
243}; 232};
244 233
245struct ieee80211_if_wds { 234struct ieee80211_if_wds {
@@ -300,48 +289,37 @@ struct mesh_config {
300#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 289#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
301#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 290#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
302#define IEEE80211_STA_PRIVACY_INVOKED BIT(13) 291#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
292/* flags for MLME request */
293#define IEEE80211_STA_REQ_SCAN 0
294#define IEEE80211_STA_REQ_DIRECT_PROBE 1
295#define IEEE80211_STA_REQ_AUTH 2
296#define IEEE80211_STA_REQ_RUN 3
297
298/* STA/IBSS MLME states */
299enum ieee80211_sta_mlme_state {
300 IEEE80211_STA_MLME_DISABLED,
301 IEEE80211_STA_MLME_DIRECT_PROBE,
302 IEEE80211_STA_MLME_AUTHENTICATE,
303 IEEE80211_STA_MLME_ASSOCIATE,
304 IEEE80211_STA_MLME_ASSOCIATED,
305 IEEE80211_STA_MLME_IBSS_SEARCH,
306 IEEE80211_STA_MLME_IBSS_JOINED,
307};
308
309/* bitfield of allowed auth algs */
310#define IEEE80211_AUTH_ALG_OPEN BIT(0)
311#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
312#define IEEE80211_AUTH_ALG_LEAP BIT(2)
313
303struct ieee80211_if_sta { 314struct ieee80211_if_sta {
304 struct timer_list timer; 315 struct timer_list timer;
305 struct work_struct work; 316 struct work_struct work;
306 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 317 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
307 u8 ssid[IEEE80211_MAX_SSID_LEN]; 318 u8 ssid[IEEE80211_MAX_SSID_LEN];
308 enum { 319 enum ieee80211_sta_mlme_state state;
309 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
310 IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
311 IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED,
312 IEEE80211_MESH_UP
313 } state;
314 size_t ssid_len; 320 size_t ssid_len;
315 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 321 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
316 size_t scan_ssid_len; 322 size_t scan_ssid_len;
317#ifdef CONFIG_MAC80211_MESH
318 struct timer_list mesh_path_timer;
319 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
320 size_t mesh_id_len;
321 /* Active Path Selection Protocol Identifier */
322 u8 mesh_pp_id[4];
323 /* Active Path Selection Metric Identifier */
324 u8 mesh_pm_id[4];
325 /* Congestion Control Mode Identifier */
326 u8 mesh_cc_id[4];
327 /* Local mesh Destination Sequence Number */
328 u32 dsn;
329 /* Last used PREQ ID */
330 u32 preq_id;
331 atomic_t mpaths;
332 /* Timestamp of last DSN update */
333 unsigned long last_dsn_update;
334 /* Timestamp of last DSN sent */
335 unsigned long last_preq;
336 struct mesh_rmc *rmc;
337 spinlock_t mesh_preq_queue_lock;
338 struct mesh_preq_queue preq_queue;
339 int preq_queue_len;
340 struct mesh_stats mshstats;
341 struct mesh_config mshcfg;
342 u32 mesh_seqnum;
343 bool accepting_plinks;
344#endif
345 u16 aid; 323 u16 aid;
346 u16 ap_capab, capab; 324 u16 ap_capab, capab;
347 u8 *extra_ie; /* to be added to the end of AssocReq */ 325 u8 *extra_ie; /* to be added to the end of AssocReq */
@@ -353,20 +331,17 @@ struct ieee80211_if_sta {
353 331
354 struct sk_buff_head skb_queue; 332 struct sk_buff_head skb_queue;
355 333
356 int auth_tries, assoc_tries; 334 int assoc_scan_tries; /* number of scans done pre-association */
335 int direct_probe_tries; /* retries for direct probes */
336 int auth_tries; /* retries for auth req */
337 int assoc_tries; /* retries for assoc req */
357 338
358 unsigned long request; 339 unsigned long request;
359 340
360 unsigned long last_probe; 341 unsigned long last_probe;
361 342
362 unsigned int flags; 343 unsigned int flags;
363#define IEEE80211_STA_REQ_SCAN 0
364#define IEEE80211_STA_REQ_AUTH 1
365#define IEEE80211_STA_REQ_RUN 2
366 344
367#define IEEE80211_AUTH_ALG_OPEN BIT(0)
368#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
369#define IEEE80211_AUTH_ALG_LEAP BIT(2)
370 unsigned int auth_algs; /* bitfield of allowed auth algs */ 345 unsigned int auth_algs; /* bitfield of allowed auth algs */
371 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 346 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
372 int auth_transaction; 347 int auth_transaction;
@@ -376,31 +351,70 @@ struct ieee80211_if_sta {
376 u32 supp_rates_bits[IEEE80211_NUM_BANDS]; 351 u32 supp_rates_bits[IEEE80211_NUM_BANDS];
377 352
378 int wmm_last_param_set; 353 int wmm_last_param_set;
379 int num_beacons; /* number of TXed beacon frames by this STA */
380}; 354};
381 355
382static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta, 356struct ieee80211_if_mesh {
383 u8 mesh_id_len, u8 *mesh_id) 357 struct work_struct work;
384{ 358 struct timer_list housekeeping_timer;
385#ifdef CONFIG_MAC80211_MESH 359 struct timer_list mesh_path_timer;
386 ifsta->mesh_id_len = mesh_id_len; 360 struct sk_buff_head skb_queue;
387 memcpy(ifsta->mesh_id, mesh_id, mesh_id_len); 361
388#endif 362 bool housekeeping;
389} 363
364 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
365 size_t mesh_id_len;
366 /* Active Path Selection Protocol Identifier */
367 u8 mesh_pp_id[4];
368 /* Active Path Selection Metric Identifier */
369 u8 mesh_pm_id[4];
370 /* Congestion Control Mode Identifier */
371 u8 mesh_cc_id[4];
372 /* Local mesh Destination Sequence Number */
373 u32 dsn;
374 /* Last used PREQ ID */
375 u32 preq_id;
376 atomic_t mpaths;
377 /* Timestamp of last DSN update */
378 unsigned long last_dsn_update;
379 /* Timestamp of last DSN sent */
380 unsigned long last_preq;
381 struct mesh_rmc *rmc;
382 spinlock_t mesh_preq_queue_lock;
383 struct mesh_preq_queue preq_queue;
384 int preq_queue_len;
385 struct mesh_stats mshstats;
386 struct mesh_config mshcfg;
387 u32 mesh_seqnum;
388 bool accepting_plinks;
389};
390 390
391#ifdef CONFIG_MAC80211_MESH 391#ifdef CONFIG_MAC80211_MESH
392#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ 392#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
393 do { (sta)->mshstats.name++; } while (0) 393 do { (msh)->mshstats.name++; } while (0)
394#else 394#else
395#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ 395#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
396 do { } while (0) 396 do { } while (0)
397#endif 397#endif
398 398
399/* flags used in struct ieee80211_sub_if_data.flags */ 399/**
400#define IEEE80211_SDATA_ALLMULTI BIT(0) 400 * enum ieee80211_sub_if_data_flags - virtual interface flags
401#define IEEE80211_SDATA_PROMISC BIT(1) 401 *
402#define IEEE80211_SDATA_USERSPACE_MLME BIT(2) 402 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
403#define IEEE80211_SDATA_OPERATING_GMODE BIT(3) 403 * @IEEE80211_SDATA_PROMISC: interface is promisc
404 * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
405 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
406 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
407 * associated stations and deliver multicast frames both
408 * back to wireless media and to the local net stack.
409 */
410enum ieee80211_sub_if_data_flags {
411 IEEE80211_SDATA_ALLMULTI = BIT(0),
412 IEEE80211_SDATA_PROMISC = BIT(1),
413 IEEE80211_SDATA_USERSPACE_MLME = BIT(2),
414 IEEE80211_SDATA_OPERATING_GMODE = BIT(3),
415 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4),
416};
417
404struct ieee80211_sub_if_data { 418struct ieee80211_sub_if_data {
405 struct list_head list; 419 struct list_head list;
406 420
@@ -416,11 +430,6 @@ struct ieee80211_sub_if_data {
416 430
417 int drop_unencrypted; 431 int drop_unencrypted;
418 432
419 /*
420 * basic rates of this AP or the AP we're associated to
421 */
422 u64 basic_rates;
423
424 /* Fragment table for host-based reassembly */ 433 /* Fragment table for host-based reassembly */
425 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 434 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
426 unsigned int fragment_next; 435 unsigned int fragment_next;
@@ -447,6 +456,9 @@ struct ieee80211_sub_if_data {
447 struct ieee80211_if_wds wds; 456 struct ieee80211_if_wds wds;
448 struct ieee80211_if_vlan vlan; 457 struct ieee80211_if_vlan vlan;
449 struct ieee80211_if_sta sta; 458 struct ieee80211_if_sta sta;
459#ifdef CONFIG_MAC80211_MESH
460 struct ieee80211_if_mesh mesh;
461#endif
450 u32 mntr_flags; 462 u32 mntr_flags;
451 } u; 463 } u;
452 464
@@ -469,7 +481,6 @@ struct ieee80211_sub_if_data {
469 struct dentry *auth_alg; 481 struct dentry *auth_alg;
470 struct dentry *auth_transaction; 482 struct dentry *auth_transaction;
471 struct dentry *flags; 483 struct dentry *flags;
472 struct dentry *num_beacons_sta;
473 struct dentry *force_unicast_rateidx; 484 struct dentry *force_unicast_rateidx;
474 struct dentry *max_ratectrl_rateidx; 485 struct dentry *max_ratectrl_rateidx;
475 } sta; 486 } sta;
@@ -477,7 +488,6 @@ struct ieee80211_sub_if_data {
477 struct dentry *drop_unencrypted; 488 struct dentry *drop_unencrypted;
478 struct dentry *num_sta_ps; 489 struct dentry *num_sta_ps;
479 struct dentry *dtim_count; 490 struct dentry *dtim_count;
480 struct dentry *num_beacons;
481 struct dentry *force_unicast_rateidx; 491 struct dentry *force_unicast_rateidx;
482 struct dentry *max_ratectrl_rateidx; 492 struct dentry *max_ratectrl_rateidx;
483 struct dentry *num_buffered_multicast; 493 struct dentry *num_buffered_multicast;
@@ -496,8 +506,10 @@ struct ieee80211_sub_if_data {
496 struct { 506 struct {
497 struct dentry *mode; 507 struct dentry *mode;
498 } monitor; 508 } monitor;
499 struct dentry *default_key;
500 } debugfs; 509 } debugfs;
510 struct {
511 struct dentry *default_key;
512 } common_debugfs;
501 513
502#ifdef CONFIG_MAC80211_MESH 514#ifdef CONFIG_MAC80211_MESH
503 struct dentry *mesh_stats_dir; 515 struct dentry *mesh_stats_dir;
@@ -538,6 +550,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
538 return container_of(p, struct ieee80211_sub_if_data, vif); 550 return container_of(p, struct ieee80211_sub_if_data, vif);
539} 551}
540 552
553static inline void
554ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
555 u8 mesh_id_len, u8 *mesh_id)
556{
557#ifdef CONFIG_MAC80211_MESH
558 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
559 ifmsh->mesh_id_len = mesh_id_len;
560 memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len);
561#else
562 WARN_ON(1);
563#endif
564}
565
541enum { 566enum {
542 IEEE80211_RX_MSG = 1, 567 IEEE80211_RX_MSG = 1,
543 IEEE80211_TX_STATUS_MSG = 2, 568 IEEE80211_TX_STATUS_MSG = 2,
@@ -611,10 +636,6 @@ struct ieee80211_local {
611 struct crypto_blkcipher *wep_rx_tfm; 636 struct crypto_blkcipher *wep_rx_tfm;
612 u32 wep_iv; 637 u32 wep_iv;
613 638
614 int bridge_packets; /* bridge packets between associated stations and
615 * deliver multicast frames both back to wireless
616 * media and to the local net stack */
617
618 struct list_head interfaces; 639 struct list_head interfaces;
619 640
620 /* 641 /*
@@ -624,21 +645,21 @@ struct ieee80211_local {
624 spinlock_t key_lock; 645 spinlock_t key_lock;
625 646
626 647
627 bool sta_sw_scanning; 648 /* Scanning and BSS list */
628 bool sta_hw_scanning; 649 bool sw_scanning, hw_scanning;
629 int scan_channel_idx; 650 int scan_channel_idx;
630 enum ieee80211_band scan_band; 651 enum ieee80211_band scan_band;
631 652
632 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 653 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
633 unsigned long last_scan_completed; 654 unsigned long last_scan_completed;
634 struct delayed_work scan_work; 655 struct delayed_work scan_work;
635 struct net_device *scan_dev; 656 struct ieee80211_sub_if_data *scan_sdata;
636 struct ieee80211_channel *oper_channel, *scan_channel; 657 struct ieee80211_channel *oper_channel, *scan_channel;
637 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 658 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
638 size_t scan_ssid_len; 659 size_t scan_ssid_len;
639 struct list_head sta_bss_list; 660 struct list_head bss_list;
640 struct ieee80211_sta_bss *sta_bss_hash[STA_HASH_SIZE]; 661 struct ieee80211_bss *bss_hash[STA_HASH_SIZE];
641 spinlock_t sta_bss_lock; 662 spinlock_t bss_lock;
642 663
643 /* SNMP counters */ 664 /* SNMP counters */
644 /* dot11CountersTable */ 665 /* dot11CountersTable */
@@ -702,7 +723,6 @@ struct ieee80211_local {
702 struct dentry *frequency; 723 struct dentry *frequency;
703 struct dentry *antenna_sel_tx; 724 struct dentry *antenna_sel_tx;
704 struct dentry *antenna_sel_rx; 725 struct dentry *antenna_sel_rx;
705 struct dentry *bridge_packets;
706 struct dentry *rts_threshold; 726 struct dentry *rts_threshold;
707 struct dentry *fragmentation_threshold; 727 struct dentry *fragmentation_threshold;
708 struct dentry *short_retry_limit; 728 struct dentry *short_retry_limit;
@@ -772,6 +792,9 @@ struct ieee80211_ra_tid {
772 792
773/* Parsed Information Elements */ 793/* Parsed Information Elements */
774struct ieee802_11_elems { 794struct ieee802_11_elems {
795 u8 *ie_start;
796 size_t total_len;
797
775 /* pointers to IEs */ 798 /* pointers to IEs */
776 u8 *ssid; 799 u8 *ssid;
777 u8 *supp_rates; 800 u8 *supp_rates;
@@ -855,86 +878,82 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
855} 878}
856 879
857 880
858/* ieee80211.c */
859int ieee80211_hw_config(struct ieee80211_local *local); 881int ieee80211_hw_config(struct ieee80211_local *local);
860int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed); 882int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
861void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 883void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
862u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, 884u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
863 struct ieee80211_ht_info *req_ht_cap, 885 struct ieee80211_ht_info *req_ht_cap,
864 struct ieee80211_ht_bss_info *req_bss_cap); 886 struct ieee80211_ht_bss_info *req_bss_cap);
887void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
888 u32 changed);
889void ieee80211_configure_filter(struct ieee80211_local *local);
865 890
866/* ieee80211_ioctl.c */ 891/* wireless extensions */
867extern const struct iw_handler_def ieee80211_iw_handler_def; 892extern const struct iw_handler_def ieee80211_iw_handler_def;
868int ieee80211_set_freq(struct net_device *dev, int freq);
869 893
870/* ieee80211_sta.c */ 894/* STA/IBSS code */
871void ieee80211_sta_timer(unsigned long data); 895void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
872void ieee80211_sta_work(struct work_struct *work); 896void ieee80211_scan_work(struct work_struct *work);
873void ieee80211_sta_scan_work(struct work_struct *work); 897void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
874void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
875 struct ieee80211_rx_status *rx_status); 898 struct ieee80211_rx_status *rx_status);
876int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len); 899int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
877int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len); 900int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
878int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); 901int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
879int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); 902void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
880void ieee80211_sta_req_auth(struct net_device *dev,
881 struct ieee80211_if_sta *ifsta); 903 struct ieee80211_if_sta *ifsta);
882int ieee80211_sta_scan_results(struct net_device *dev, 904struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
883 struct iw_request_info *info,
884 char *buf, size_t len);
885ieee80211_rx_result ieee80211_sta_rx_scan(
886 struct net_device *dev, struct sk_buff *skb,
887 struct ieee80211_rx_status *rx_status);
888void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
889void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
890int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
891struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
892 struct sk_buff *skb, u8 *bssid, 905 struct sk_buff *skb, u8 *bssid,
893 u8 *addr, u64 supp_rates); 906 u8 *addr, u64 supp_rates);
894int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 907int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
895int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 908int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
896void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 909u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
897 u32 changed);
898u32 ieee80211_reset_erp_info(struct net_device *dev);
899int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
900 struct ieee80211_ht_info *ht_info);
901int ieee80211_ht_addt_info_ie_to_ht_bss_info(
902 struct ieee80211_ht_addt_info *ht_add_info_ie,
903 struct ieee80211_ht_bss_info *bss_info);
904void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
905 u16 tid, u8 dialog_token, u16 start_seq_num,
906 u16 agg_size, u16 timeout);
907void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
908 u16 initiator, u16 reason_code);
909void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn);
910
911void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
912 u16 tid, u16 initiator, u16 reason);
913void sta_addba_resp_timer_expired(unsigned long data);
914void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
915u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 910u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
916 struct ieee802_11_elems *elems, 911 struct ieee802_11_elems *elems,
917 enum ieee80211_band band); 912 enum ieee80211_band band);
918void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, 913void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
919 int encrypt); 914 u8 *ssid, size_t ssid_len);
920void ieee802_11_parse_elems(u8 *start, size_t len, 915
921 struct ieee802_11_elems *elems); 916/* scan/BSS handling */
922 917int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
923#ifdef CONFIG_MAC80211_MESH 918 u8 *ssid, size_t ssid_len);
924void ieee80211_start_mesh(struct net_device *dev); 919int ieee80211_scan_results(struct ieee80211_local *local,
925#else 920 struct iw_request_info *info,
926static inline void ieee80211_start_mesh(struct net_device *dev) 921 char *buf, size_t len);
927{} 922ieee80211_rx_result
928#endif 923ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
924 struct sk_buff *skb,
925 struct ieee80211_rx_status *rx_status);
926void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
927void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
928int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
929 char *ie, size_t len);
930
931void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
932int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
933 u8 *ssid, size_t ssid_len);
934struct ieee80211_bss *
935ieee80211_bss_info_update(struct ieee80211_local *local,
936 struct ieee80211_rx_status *rx_status,
937 struct ieee80211_mgmt *mgmt,
938 size_t len,
939 struct ieee802_11_elems *elems,
940 int freq, bool beacon);
941struct ieee80211_bss *
942ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
943 u8 *ssid, u8 ssid_len);
944struct ieee80211_bss *
945ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
946 u8 *ssid, u8 ssid_len);
947void ieee80211_rx_bss_put(struct ieee80211_local *local,
948 struct ieee80211_bss *bss);
929 949
930/* interface handling */ 950/* interface handling */
931void ieee80211_if_setup(struct net_device *dev);
932int ieee80211_if_add(struct ieee80211_local *local, const char *name, 951int ieee80211_if_add(struct ieee80211_local *local, const char *name,
933 struct net_device **new_dev, enum ieee80211_if_types type, 952 struct net_device **new_dev, enum nl80211_iftype type,
934 struct vif_params *params); 953 struct vif_params *params);
935int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 954int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
936 enum ieee80211_if_types type); 955 enum nl80211_iftype type);
937void ieee80211_if_remove(struct net_device *dev); 956void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
938void ieee80211_remove_interfaces(struct ieee80211_local *local); 957void ieee80211_remove_interfaces(struct ieee80211_local *local);
939 958
940/* tx handling */ 959/* tx handling */
@@ -944,16 +963,52 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
944int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); 963int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
945int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); 964int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
946 965
966/* HT */
967int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
968 struct ieee80211_ht_info *ht_info);
969int ieee80211_ht_addt_info_ie_to_ht_bss_info(
970 struct ieee80211_ht_addt_info *ht_add_info_ie,
971 struct ieee80211_ht_bss_info *bss_info);
972void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
973
974void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
975 u16 tid, u16 initiator, u16 reason);
976void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr);
977void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
978 struct sta_info *sta,
979 struct ieee80211_mgmt *mgmt, size_t len);
980void ieee80211_process_addba_resp(struct ieee80211_local *local,
981 struct sta_info *sta,
982 struct ieee80211_mgmt *mgmt,
983 size_t len);
984void ieee80211_process_addba_request(struct ieee80211_local *local,
985 struct sta_info *sta,
986 struct ieee80211_mgmt *mgmt,
987 size_t len);
988
989/* Spectrum management */
990void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
991 struct ieee80211_mgmt *mgmt,
992 size_t len);
993
947/* utility functions/constants */ 994/* utility functions/constants */
948extern void *mac80211_wiphy_privid; /* for wiphy privid */ 995extern void *mac80211_wiphy_privid; /* for wiphy privid */
949extern const unsigned char rfc1042_header[6]; 996extern const unsigned char rfc1042_header[6];
950extern const unsigned char bridge_tunnel_header[6]; 997extern const unsigned char bridge_tunnel_header[6];
951u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 998u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
952 enum ieee80211_if_types type); 999 enum nl80211_iftype type);
953int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1000int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
954 int rate, int erp, int short_preamble); 1001 int rate, int erp, int short_preamble);
955void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 1002void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
956 struct ieee80211_hdr *hdr); 1003 struct ieee80211_hdr *hdr);
1004void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1005void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1006 int encrypt);
1007void ieee802_11_parse_elems(u8 *start, size_t len,
1008 struct ieee802_11_elems *elems);
1009int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
1010u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
1011 enum ieee80211_band band);
957 1012
958#ifdef CONFIG_MAC80211_NOINLINE 1013#ifdef CONFIG_MAC80211_NOINLINE
959#define debug_noinline noinline 1014#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 610ed1d9893a..a72fbebb8ea2 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Interface handling (except master interface)
3 *
2 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
@@ -17,7 +19,540 @@
17#include "sta_info.h" 19#include "sta_info.h"
18#include "debugfs_netdev.h" 20#include "debugfs_netdev.h"
19#include "mesh.h" 21#include "mesh.h"
22#include "led.h"
23
24static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
25{
26 int meshhdrlen;
27 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
28
29 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
30
31 /* FIX: what would be proper limits for MTU?
32 * This interface uses 802.3 frames. */
33 if (new_mtu < 256 ||
34 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
35 return -EINVAL;
36 }
37
38#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
39 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
40#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
41 dev->mtu = new_mtu;
42 return 0;
43}
44
45static inline int identical_mac_addr_allowed(int type1, int type2)
46{
47 return type1 == NL80211_IFTYPE_MONITOR ||
48 type2 == NL80211_IFTYPE_MONITOR ||
49 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
50 (type1 == NL80211_IFTYPE_WDS &&
51 (type2 == NL80211_IFTYPE_WDS ||
52 type2 == NL80211_IFTYPE_AP)) ||
53 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) ||
54 (type1 == NL80211_IFTYPE_AP_VLAN &&
55 (type2 == NL80211_IFTYPE_AP ||
56 type2 == NL80211_IFTYPE_AP_VLAN));
57}
58
59static int ieee80211_open(struct net_device *dev)
60{
61 struct ieee80211_sub_if_data *sdata, *nsdata;
62 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
63 struct sta_info *sta;
64 struct ieee80211_if_init_conf conf;
65 u32 changed = 0;
66 int res;
67 bool need_hw_reconfig = 0;
68 u8 null_addr[ETH_ALEN] = {0};
69
70 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
71
72 /* fail early if user set an invalid address */
73 if (compare_ether_addr(dev->dev_addr, null_addr) &&
74 !is_valid_ether_addr(dev->dev_addr))
75 return -EADDRNOTAVAIL;
76
77 /* we hold the RTNL here so can safely walk the list */
78 list_for_each_entry(nsdata, &local->interfaces, list) {
79 struct net_device *ndev = nsdata->dev;
80
81 if (ndev != dev && netif_running(ndev)) {
82 /*
83 * Allow only a single IBSS interface to be up at any
84 * time. This is restricted because beacon distribution
85 * cannot work properly if both are in the same IBSS.
86 *
87 * To remove this restriction we'd have to disallow them
88 * from setting the same SSID on different IBSS interfaces
89 * belonging to the same hardware. Then, however, we're
90 * faced with having to adopt two different TSF timers...
91 */
92 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
93 nsdata->vif.type == NL80211_IFTYPE_ADHOC)
94 return -EBUSY;
95
96 /*
97 * The remaining checks are only performed for interfaces
98 * with the same MAC address.
99 */
100 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
101 continue;
102
103 /*
104 * check whether it may have the same address
105 */
106 if (!identical_mac_addr_allowed(sdata->vif.type,
107 nsdata->vif.type))
108 return -ENOTUNIQ;
109
110 /*
111 * can only add VLANs to enabled APs
112 */
113 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
114 nsdata->vif.type == NL80211_IFTYPE_AP)
115 sdata->bss = &nsdata->u.ap;
116 }
117 }
118
119 switch (sdata->vif.type) {
120 case NL80211_IFTYPE_WDS:
121 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
122 return -ENOLINK;
123 break;
124 case NL80211_IFTYPE_AP_VLAN:
125 if (!sdata->bss)
126 return -ENOLINK;
127 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
128 break;
129 case NL80211_IFTYPE_AP:
130 sdata->bss = &sdata->u.ap;
131 break;
132 case NL80211_IFTYPE_MESH_POINT:
133 if (!ieee80211_vif_is_mesh(&sdata->vif))
134 break;
135 /* mesh ifaces must set allmulti to forward mcast traffic */
136 atomic_inc(&local->iff_allmultis);
137 break;
138 case NL80211_IFTYPE_STATION:
139 case NL80211_IFTYPE_MONITOR:
140 case NL80211_IFTYPE_ADHOC:
141 /* no special treatment */
142 break;
143 case NL80211_IFTYPE_UNSPECIFIED:
144 case __NL80211_IFTYPE_AFTER_LAST:
145 /* cannot happen */
146 WARN_ON(1);
147 break;
148 }
149
150 if (local->open_count == 0) {
151 res = 0;
152 if (local->ops->start)
153 res = local->ops->start(local_to_hw(local));
154 if (res)
155 goto err_del_bss;
156 need_hw_reconfig = 1;
157 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
158 }
159
160 /*
161 * Check all interfaces and copy the hopefully now-present
162 * MAC address to those that have the special null one.
163 */
164 list_for_each_entry(nsdata, &local->interfaces, list) {
165 struct net_device *ndev = nsdata->dev;
166
167 /*
168 * No need to check netif_running since we do not allow
169 * it to start up with this invalid address.
170 */
171 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0)
172 memcpy(ndev->dev_addr,
173 local->hw.wiphy->perm_addr,
174 ETH_ALEN);
175 }
176
177 if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
178 memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
179 ETH_ALEN);
180
181 /*
182 * Validate the MAC address for this device.
183 */
184 if (!is_valid_ether_addr(dev->dev_addr)) {
185 if (!local->open_count && local->ops->stop)
186 local->ops->stop(local_to_hw(local));
187 return -EADDRNOTAVAIL;
188 }
189
190 switch (sdata->vif.type) {
191 case NL80211_IFTYPE_AP_VLAN:
192 /* no need to tell driver */
193 break;
194 case NL80211_IFTYPE_MONITOR:
195 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
196 local->cooked_mntrs++;
197 break;
198 }
199
200 /* must be before the call to ieee80211_configure_filter */
201 local->monitors++;
202 if (local->monitors == 1)
203 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
204
205 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
206 local->fif_fcsfail++;
207 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
208 local->fif_plcpfail++;
209 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
210 local->fif_control++;
211 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
212 local->fif_other_bss++;
213
214 netif_addr_lock_bh(local->mdev);
215 ieee80211_configure_filter(local);
216 netif_addr_unlock_bh(local->mdev);
217 break;
218 case NL80211_IFTYPE_STATION:
219 case NL80211_IFTYPE_ADHOC:
220 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
221 /* fall through */
222 default:
223 conf.vif = &sdata->vif;
224 conf.type = sdata->vif.type;
225 conf.mac_addr = dev->dev_addr;
226 res = local->ops->add_interface(local_to_hw(local), &conf);
227 if (res)
228 goto err_stop;
229
230 if (ieee80211_vif_is_mesh(&sdata->vif))
231 ieee80211_start_mesh(sdata);
232 changed |= ieee80211_reset_erp_info(sdata);
233 ieee80211_bss_info_change_notify(sdata, changed);
234 ieee80211_enable_keys(sdata);
235
236 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
237 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
238 netif_carrier_off(dev);
239 else
240 netif_carrier_on(dev);
241 }
242
243 if (sdata->vif.type == NL80211_IFTYPE_WDS) {
244 /* Create STA entry for the WDS peer */
245 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
246 GFP_KERNEL);
247 if (!sta) {
248 res = -ENOMEM;
249 goto err_del_interface;
250 }
251
252 /* no locking required since STA is not live yet */
253 sta->flags |= WLAN_STA_AUTHORIZED;
254
255 res = sta_info_insert(sta);
256 if (res) {
257 /* STA has been freed */
258 goto err_del_interface;
259 }
260 }
261
262 if (local->open_count == 0) {
263 res = dev_open(local->mdev);
264 WARN_ON(res);
265 if (res)
266 goto err_del_interface;
267 tasklet_enable(&local->tx_pending_tasklet);
268 tasklet_enable(&local->tasklet);
269 }
270
271 /*
272 * set_multicast_list will be invoked by the networking core
273 * which will check whether any increments here were done in
274 * error and sync them down to the hardware as filter flags.
275 */
276 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
277 atomic_inc(&local->iff_allmultis);
278
279 if (sdata->flags & IEEE80211_SDATA_PROMISC)
280 atomic_inc(&local->iff_promiscs);
281
282 local->open_count++;
283 if (need_hw_reconfig) {
284 ieee80211_hw_config(local);
285 /*
286 * set default queue parameters so drivers don't
287 * need to initialise the hardware if the hardware
288 * doesn't start up with sane defaults
289 */
290 ieee80211_set_wmm_default(sdata);
291 }
292
293 /*
294 * ieee80211_sta_work is disabled while network interface
295 * is down. Therefore, some configuration changes may not
296 * yet be effective. Trigger execution of ieee80211_sta_work
297 * to fix this.
298 */
299 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
300 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
301 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
302 queue_work(local->hw.workqueue, &ifsta->work);
303 }
304
305 netif_tx_start_all_queues(dev);
306
307 return 0;
308 err_del_interface:
309 local->ops->remove_interface(local_to_hw(local), &conf);
310 err_stop:
311 if (!local->open_count && local->ops->stop)
312 local->ops->stop(local_to_hw(local));
313 err_del_bss:
314 sdata->bss = NULL;
315 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
316 list_del(&sdata->u.vlan.list);
317 return res;
318}
319
320static int ieee80211_stop(struct net_device *dev)
321{
322 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
323 struct ieee80211_local *local = sdata->local;
324 struct ieee80211_if_init_conf conf;
325 struct sta_info *sta;
20 326
327 /*
328 * Stop TX on this interface first.
329 */
330 netif_tx_stop_all_queues(dev);
331
332 /*
333 * Now delete all active aggregation sessions.
334 */
335 rcu_read_lock();
336
337 list_for_each_entry_rcu(sta, &local->sta_list, list) {
338 if (sta->sdata == sdata)
339 ieee80211_sta_tear_down_BA_sessions(sdata,
340 sta->sta.addr);
341 }
342
343 rcu_read_unlock();
344
345 /*
346 * Remove all stations associated with this interface.
347 *
348 * This must be done before calling ops->remove_interface()
349 * because otherwise we can later invoke ops->sta_notify()
350 * whenever the STAs are removed, and that invalidates driver
351 * assumptions about always getting a vif pointer that is valid
352 * (because if we remove a STA after ops->remove_interface()
353 * the driver will have removed the vif info already!)
354 *
355 * We could relax this and only unlink the stations from the
356 * hash table and list but keep them on a per-sdata list that
357 * will be inserted back again when the interface is brought
358 * up again, but I don't currently see a use case for that,
359 * except with WDS which gets a STA entry created when it is
360 * brought up.
361 */
362 sta_info_flush(local, sdata);
363
364 /*
365 * Don't count this interface for promisc/allmulti while it
366 * is down. dev_mc_unsync() will invoke set_multicast_list
367 * on the master interface which will sync these down to the
368 * hardware as filter flags.
369 */
370 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
371 atomic_dec(&local->iff_allmultis);
372
373 if (sdata->flags & IEEE80211_SDATA_PROMISC)
374 atomic_dec(&local->iff_promiscs);
375
376 dev_mc_unsync(local->mdev, dev);
377
378 /* APs need special treatment */
379 if (sdata->vif.type == NL80211_IFTYPE_AP) {
380 struct ieee80211_sub_if_data *vlan, *tmp;
381 struct beacon_data *old_beacon = sdata->u.ap.beacon;
382
383 /* remove beacon */
384 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
385 synchronize_rcu();
386 kfree(old_beacon);
387
388 /* down all dependent devices, that is VLANs */
389 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
390 u.vlan.list)
391 dev_close(vlan->dev);
392 WARN_ON(!list_empty(&sdata->u.ap.vlans));
393 }
394
395 local->open_count--;
396
397 switch (sdata->vif.type) {
398 case NL80211_IFTYPE_AP_VLAN:
399 list_del(&sdata->u.vlan.list);
400 /* no need to tell driver */
401 break;
402 case NL80211_IFTYPE_MONITOR:
403 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
404 local->cooked_mntrs--;
405 break;
406 }
407
408 local->monitors--;
409 if (local->monitors == 0)
410 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
411
412 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
413 local->fif_fcsfail--;
414 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
415 local->fif_plcpfail--;
416 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
417 local->fif_control--;
418 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
419 local->fif_other_bss--;
420
421 netif_addr_lock_bh(local->mdev);
422 ieee80211_configure_filter(local);
423 netif_addr_unlock_bh(local->mdev);
424 break;
425 case NL80211_IFTYPE_STATION:
426 case NL80211_IFTYPE_ADHOC:
427 sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED;
428 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
429 del_timer_sync(&sdata->u.sta.timer);
430 /*
431 * If the timer fired while we waited for it, it will have
432 * requeued the work. Now the work will be running again
433 * but will not rearm the timer again because it checks
434 * whether the interface is running, which, at this point,
435 * it no longer is.
436 */
437 cancel_work_sync(&sdata->u.sta.work);
438 /*
439 * When we get here, the interface is marked down.
440 * Call synchronize_rcu() to wait for the RX path
441 * should it be using the interface and enqueuing
442 * frames at this very time on another CPU.
443 */
444 synchronize_rcu();
445 skb_queue_purge(&sdata->u.sta.skb_queue);
446
447 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
448 kfree(sdata->u.sta.extra_ie);
449 sdata->u.sta.extra_ie = NULL;
450 sdata->u.sta.extra_ie_len = 0;
451 /* fall through */
452 case NL80211_IFTYPE_MESH_POINT:
453 if (ieee80211_vif_is_mesh(&sdata->vif)) {
454 /* allmulti is always set on mesh ifaces */
455 atomic_dec(&local->iff_allmultis);
456 ieee80211_stop_mesh(sdata);
457 }
458 /* fall through */
459 default:
460 if (local->scan_sdata == sdata) {
461 if (!local->ops->hw_scan)
462 cancel_delayed_work_sync(&local->scan_work);
463 /*
464 * The software scan can no longer run now, so we can
465 * clear out the scan_sdata reference. However, the
466 * hardware scan may still be running. The complete
467 * function must be prepared to handle a NULL value.
468 */
469 local->scan_sdata = NULL;
470 /*
471 * The memory barrier guarantees that another CPU
472 * that is hardware-scanning will now see the fact
473 * that this interface is gone.
474 */
475 smp_mb();
476 /*
477 * If software scanning, complete the scan but since
478 * the scan_sdata is NULL already don't send out a
479 * scan event to userspace -- the scan is incomplete.
480 */
481 if (local->sw_scanning)
482 ieee80211_scan_completed(&local->hw);
483 }
484
485 conf.vif = &sdata->vif;
486 conf.type = sdata->vif.type;
487 conf.mac_addr = dev->dev_addr;
488 /* disable all keys for as long as this netdev is down */
489 ieee80211_disable_keys(sdata);
490 local->ops->remove_interface(local_to_hw(local), &conf);
491 }
492
493 sdata->bss = NULL;
494
495 if (local->open_count == 0) {
496 if (netif_running(local->mdev))
497 dev_close(local->mdev);
498
499 if (local->ops->stop)
500 local->ops->stop(local_to_hw(local));
501
502 ieee80211_led_radio(local, 0);
503
504 flush_workqueue(local->hw.workqueue);
505
506 tasklet_disable(&local->tx_pending_tasklet);
507 tasklet_disable(&local->tasklet);
508 }
509
510 return 0;
511}
512
513static void ieee80211_set_multicast_list(struct net_device *dev)
514{
515 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
516 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
517 int allmulti, promisc, sdata_allmulti, sdata_promisc;
518
519 allmulti = !!(dev->flags & IFF_ALLMULTI);
520 promisc = !!(dev->flags & IFF_PROMISC);
521 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
522 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
523
524 if (allmulti != sdata_allmulti) {
525 if (dev->flags & IFF_ALLMULTI)
526 atomic_inc(&local->iff_allmultis);
527 else
528 atomic_dec(&local->iff_allmultis);
529 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
530 }
531
532 if (promisc != sdata_promisc) {
533 if (dev->flags & IFF_PROMISC)
534 atomic_inc(&local->iff_promiscs);
535 else
536 atomic_dec(&local->iff_promiscs);
537 sdata->flags ^= IEEE80211_SDATA_PROMISC;
538 }
539
540 dev_mc_sync(local->mdev, dev);
541}
542
543static void ieee80211_if_setup(struct net_device *dev)
544{
545 ether_setup(dev);
546 dev->hard_start_xmit = ieee80211_subif_start_xmit;
547 dev->wireless_handlers = &ieee80211_iw_handler_def;
548 dev->set_multicast_list = ieee80211_set_multicast_list;
549 dev->change_mtu = ieee80211_change_mtu;
550 dev->open = ieee80211_open;
551 dev->stop = ieee80211_stop;
552 dev->destructor = free_netdev;
553 /* we will validate the address ourselves in ->open */
554 dev->validate_addr = NULL;
555}
21/* 556/*
22 * Called when the netdev is removed or, by the code below, before 557 * Called when the netdev is removed or, by the code below, before
23 * the interface type changes. 558 * the interface type changes.
@@ -31,17 +566,17 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
31 int flushed; 566 int flushed;
32 int i; 567 int i;
33 568
34 ieee80211_debugfs_remove_netdev(sdata);
35
36 /* free extra data */ 569 /* free extra data */
37 ieee80211_free_keys(sdata); 570 ieee80211_free_keys(sdata);
38 571
572 ieee80211_debugfs_remove_netdev(sdata);
573
39 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 574 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
40 __skb_queue_purge(&sdata->fragments[i].skb_list); 575 __skb_queue_purge(&sdata->fragments[i].skb_list);
41 sdata->fragment_next = 0; 576 sdata->fragment_next = 0;
42 577
43 switch (sdata->vif.type) { 578 switch (sdata->vif.type) {
44 case IEEE80211_IF_TYPE_AP: 579 case NL80211_IFTYPE_AP:
45 beacon = sdata->u.ap.beacon; 580 beacon = sdata->u.ap.beacon;
46 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 581 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
47 synchronize_rcu(); 582 synchronize_rcu();
@@ -53,23 +588,23 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
53 } 588 }
54 589
55 break; 590 break;
56 case IEEE80211_IF_TYPE_MESH_POINT: 591 case NL80211_IFTYPE_MESH_POINT:
57 /* Allow compiler to elide mesh_rmc_free call. */
58 if (ieee80211_vif_is_mesh(&sdata->vif)) 592 if (ieee80211_vif_is_mesh(&sdata->vif))
59 mesh_rmc_free(dev); 593 mesh_rmc_free(sdata);
60 /* fall through */ 594 break;
61 case IEEE80211_IF_TYPE_STA: 595 case NL80211_IFTYPE_STATION:
62 case IEEE80211_IF_TYPE_IBSS: 596 case NL80211_IFTYPE_ADHOC:
63 kfree(sdata->u.sta.extra_ie); 597 kfree(sdata->u.sta.extra_ie);
64 kfree(sdata->u.sta.assocreq_ies); 598 kfree(sdata->u.sta.assocreq_ies);
65 kfree(sdata->u.sta.assocresp_ies); 599 kfree(sdata->u.sta.assocresp_ies);
66 kfree_skb(sdata->u.sta.probe_resp); 600 kfree_skb(sdata->u.sta.probe_resp);
67 break; 601 break;
68 case IEEE80211_IF_TYPE_WDS: 602 case NL80211_IFTYPE_WDS:
69 case IEEE80211_IF_TYPE_VLAN: 603 case NL80211_IFTYPE_AP_VLAN:
70 case IEEE80211_IF_TYPE_MNTR: 604 case NL80211_IFTYPE_MONITOR:
71 break; 605 break;
72 case IEEE80211_IF_TYPE_INVALID: 606 case NL80211_IFTYPE_UNSPECIFIED:
607 case __NL80211_IFTYPE_AFTER_LAST:
73 BUG(); 608 BUG();
74 break; 609 break;
75 } 610 }
@@ -82,55 +617,42 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
82 * Helper function to initialise an interface to a specific type. 617 * Helper function to initialise an interface to a specific type.
83 */ 618 */
84static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, 619static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
85 enum ieee80211_if_types type) 620 enum nl80211_iftype type)
86{ 621{
87 struct ieee80211_if_sta *ifsta;
88
89 /* clear type-dependent union */ 622 /* clear type-dependent union */
90 memset(&sdata->u, 0, sizeof(sdata->u)); 623 memset(&sdata->u, 0, sizeof(sdata->u));
91 624
92 /* and set some type-dependent values */ 625 /* and set some type-dependent values */
93 sdata->vif.type = type; 626 sdata->vif.type = type;
627 sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit;
94 628
95 /* only monitor differs */ 629 /* only monitor differs */
96 sdata->dev->type = ARPHRD_ETHER; 630 sdata->dev->type = ARPHRD_ETHER;
97 631
98 switch (type) { 632 switch (type) {
99 case IEEE80211_IF_TYPE_AP: 633 case NL80211_IFTYPE_AP:
100 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 634 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
101 INIT_LIST_HEAD(&sdata->u.ap.vlans); 635 INIT_LIST_HEAD(&sdata->u.ap.vlans);
102 break; 636 break;
103 case IEEE80211_IF_TYPE_MESH_POINT: 637 case NL80211_IFTYPE_STATION:
104 case IEEE80211_IF_TYPE_STA: 638 case NL80211_IFTYPE_ADHOC:
105 case IEEE80211_IF_TYPE_IBSS: 639 ieee80211_sta_setup_sdata(sdata);
106 ifsta = &sdata->u.sta; 640 break;
107 INIT_WORK(&ifsta->work, ieee80211_sta_work); 641 case NL80211_IFTYPE_MESH_POINT:
108 setup_timer(&ifsta->timer, ieee80211_sta_timer,
109 (unsigned long) sdata);
110 skb_queue_head_init(&ifsta->skb_queue);
111
112 ifsta->capab = WLAN_CAPABILITY_ESS;
113 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
114 IEEE80211_AUTH_ALG_SHARED_KEY;
115 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
116 IEEE80211_STA_AUTO_BSSID_SEL |
117 IEEE80211_STA_AUTO_CHANNEL_SEL;
118 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
119 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
120
121 if (ieee80211_vif_is_mesh(&sdata->vif)) 642 if (ieee80211_vif_is_mesh(&sdata->vif))
122 ieee80211_mesh_init_sdata(sdata); 643 ieee80211_mesh_init_sdata(sdata);
123 break; 644 break;
124 case IEEE80211_IF_TYPE_MNTR: 645 case NL80211_IFTYPE_MONITOR:
125 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; 646 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
126 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; 647 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit;
127 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | 648 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
128 MONITOR_FLAG_OTHER_BSS; 649 MONITOR_FLAG_OTHER_BSS;
129 break; 650 break;
130 case IEEE80211_IF_TYPE_WDS: 651 case NL80211_IFTYPE_WDS:
131 case IEEE80211_IF_TYPE_VLAN: 652 case NL80211_IFTYPE_AP_VLAN:
132 break; 653 break;
133 case IEEE80211_IF_TYPE_INVALID: 654 case NL80211_IFTYPE_UNSPECIFIED:
655 case __NL80211_IFTYPE_AFTER_LAST:
134 BUG(); 656 BUG();
135 break; 657 break;
136 } 658 }
@@ -139,7 +661,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
139} 661}
140 662
141int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 663int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
142 enum ieee80211_if_types type) 664 enum nl80211_iftype type)
143{ 665{
144 ASSERT_RTNL(); 666 ASSERT_RTNL();
145 667
@@ -160,14 +682,16 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
160 ieee80211_setup_sdata(sdata, type); 682 ieee80211_setup_sdata(sdata, type);
161 683
162 /* reset some values that shouldn't be kept across type changes */ 684 /* reset some values that shouldn't be kept across type changes */
163 sdata->basic_rates = 0; 685 sdata->bss_conf.basic_rates =
686 ieee80211_mandatory_rates(sdata->local,
687 sdata->local->hw.conf.channel->band);
164 sdata->drop_unencrypted = 0; 688 sdata->drop_unencrypted = 0;
165 689
166 return 0; 690 return 0;
167} 691}
168 692
169int ieee80211_if_add(struct ieee80211_local *local, const char *name, 693int ieee80211_if_add(struct ieee80211_local *local, const char *name,
170 struct net_device **new_dev, enum ieee80211_if_types type, 694 struct net_device **new_dev, enum nl80211_iftype type,
171 struct vif_params *params) 695 struct vif_params *params)
172{ 696{
173 struct net_device *ndev; 697 struct net_device *ndev;
@@ -225,9 +749,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
225 749
226 if (ieee80211_vif_is_mesh(&sdata->vif) && 750 if (ieee80211_vif_is_mesh(&sdata->vif) &&
227 params && params->mesh_id_len) 751 params && params->mesh_id_len)
228 ieee80211_if_sta_set_mesh_id(&sdata->u.sta, 752 ieee80211_sdata_set_mesh_id(sdata,
229 params->mesh_id_len, 753 params->mesh_id_len,
230 params->mesh_id); 754 params->mesh_id);
231 755
232 list_add_tail_rcu(&sdata->list, &local->interfaces); 756 list_add_tail_rcu(&sdata->list, &local->interfaces);
233 757
@@ -241,15 +765,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
241 return ret; 765 return ret;
242} 766}
243 767
244void ieee80211_if_remove(struct net_device *dev) 768void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
245{ 769{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
247
248 ASSERT_RTNL(); 770 ASSERT_RTNL();
249 771
250 list_del_rcu(&sdata->list); 772 list_del_rcu(&sdata->list);
251 synchronize_rcu(); 773 synchronize_rcu();
252 unregister_netdevice(dev); 774 unregister_netdevice(sdata->dev);
253} 775}
254 776
255/* 777/*
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6597c779e35a..57afcd38cd9e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -118,12 +118,12 @@ static const u8 *get_mac_for_key(struct ieee80211_key *key)
118 * address to indicate a transmit-only key. 118 * address to indicate a transmit-only key.
119 */ 119 */
120 if (key->conf.alg != ALG_WEP && 120 if (key->conf.alg != ALG_WEP &&
121 (key->sdata->vif.type == IEEE80211_IF_TYPE_AP || 121 (key->sdata->vif.type == NL80211_IFTYPE_AP ||
122 key->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) 122 key->sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
123 addr = zero_addr; 123 addr = zero_addr;
124 124
125 if (key->sta) 125 if (key->sta)
126 addr = key->sta->addr; 126 addr = key->sta->sta.addr;
127 127
128 return addr; 128 return addr;
129} 129}
@@ -331,7 +331,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
331 */ 331 */
332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; 332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
333 } else { 333 } else {
334 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 334 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
335 struct sta_info *ap; 335 struct sta_info *ap;
336 336
337 /* 337 /*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aa5a191598c9..c307dba7ec03 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -45,16 +45,9 @@ struct ieee80211_tx_status_rtap_hdr {
45 u8 data_retries; 45 u8 data_retries;
46} __attribute__ ((packed)); 46} __attribute__ ((packed));
47 47
48/* common interface routines */
49
50static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
51{
52 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
53 return ETH_ALEN;
54}
55 48
56/* must be called under mdev tx lock */ 49/* must be called under mdev tx lock */
57static void ieee80211_configure_filter(struct ieee80211_local *local) 50void ieee80211_configure_filter(struct ieee80211_local *local)
58{ 51{
59 unsigned int changed_flags; 52 unsigned int changed_flags;
60 unsigned int new_flags = 0; 53 unsigned int new_flags = 0;
@@ -97,6 +90,20 @@ static void ieee80211_configure_filter(struct ieee80211_local *local)
97 90
98/* master interface */ 91/* master interface */
99 92
93static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
94{
95 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
96 return ETH_ALEN;
97}
98
99static const struct header_ops ieee80211_header_ops = {
100 .create = eth_header,
101 .parse = header_parse_80211,
102 .rebuild = eth_rebuild_header,
103 .cache = eth_header_cache,
104 .cache_update = eth_header_cache_update,
105};
106
100static int ieee80211_master_open(struct net_device *dev) 107static int ieee80211_master_open(struct net_device *dev)
101{ 108{
102 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 109 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
@@ -139,844 +146,6 @@ static void ieee80211_master_set_multicast_list(struct net_device *dev)
139 ieee80211_configure_filter(local); 146 ieee80211_configure_filter(local);
140} 147}
141 148
142/* regular interfaces */
143
144static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
145{
146 int meshhdrlen;
147 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
148
149 meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
150
151 /* FIX: what would be proper limits for MTU?
152 * This interface uses 802.3 frames. */
153 if (new_mtu < 256 ||
154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
155 return -EINVAL;
156 }
157
158#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
159 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
160#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
161 dev->mtu = new_mtu;
162 return 0;
163}
164
165static inline int identical_mac_addr_allowed(int type1, int type2)
166{
167 return (type1 == IEEE80211_IF_TYPE_MNTR ||
168 type2 == IEEE80211_IF_TYPE_MNTR ||
169 (type1 == IEEE80211_IF_TYPE_AP &&
170 type2 == IEEE80211_IF_TYPE_WDS) ||
171 (type1 == IEEE80211_IF_TYPE_WDS &&
172 (type2 == IEEE80211_IF_TYPE_WDS ||
173 type2 == IEEE80211_IF_TYPE_AP)) ||
174 (type1 == IEEE80211_IF_TYPE_AP &&
175 type2 == IEEE80211_IF_TYPE_VLAN) ||
176 (type1 == IEEE80211_IF_TYPE_VLAN &&
177 (type2 == IEEE80211_IF_TYPE_AP ||
178 type2 == IEEE80211_IF_TYPE_VLAN)));
179}
180
181static int ieee80211_open(struct net_device *dev)
182{
183 struct ieee80211_sub_if_data *sdata, *nsdata;
184 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
185 struct sta_info *sta;
186 struct ieee80211_if_init_conf conf;
187 u32 changed = 0;
188 int res;
189 bool need_hw_reconfig = 0;
190
191 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
192
193 /* we hold the RTNL here so can safely walk the list */
194 list_for_each_entry(nsdata, &local->interfaces, list) {
195 struct net_device *ndev = nsdata->dev;
196
197 if (ndev != dev && netif_running(ndev)) {
198 /*
199 * Allow only a single IBSS interface to be up at any
200 * time. This is restricted because beacon distribution
201 * cannot work properly if both are in the same IBSS.
202 *
203 * To remove this restriction we'd have to disallow them
204 * from setting the same SSID on different IBSS interfaces
205 * belonging to the same hardware. Then, however, we're
206 * faced with having to adopt two different TSF timers...
207 */
208 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
209 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
210 return -EBUSY;
211
212 /*
213 * The remaining checks are only performed for interfaces
214 * with the same MAC address.
215 */
216 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
217 continue;
218
219 /*
220 * check whether it may have the same address
221 */
222 if (!identical_mac_addr_allowed(sdata->vif.type,
223 nsdata->vif.type))
224 return -ENOTUNIQ;
225
226 /*
227 * can only add VLANs to enabled APs
228 */
229 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
230 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
231 sdata->bss = &nsdata->u.ap;
232 }
233 }
234
235 switch (sdata->vif.type) {
236 case IEEE80211_IF_TYPE_WDS:
237 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
238 return -ENOLINK;
239 break;
240 case IEEE80211_IF_TYPE_VLAN:
241 if (!sdata->bss)
242 return -ENOLINK;
243 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
244 break;
245 case IEEE80211_IF_TYPE_AP:
246 sdata->bss = &sdata->u.ap;
247 break;
248 case IEEE80211_IF_TYPE_MESH_POINT:
249 /* mesh ifaces must set allmulti to forward mcast traffic */
250 atomic_inc(&local->iff_allmultis);
251 break;
252 case IEEE80211_IF_TYPE_STA:
253 case IEEE80211_IF_TYPE_MNTR:
254 case IEEE80211_IF_TYPE_IBSS:
255 /* no special treatment */
256 break;
257 case IEEE80211_IF_TYPE_INVALID:
258 /* cannot happen */
259 WARN_ON(1);
260 break;
261 }
262
263 if (local->open_count == 0) {
264 res = 0;
265 if (local->ops->start)
266 res = local->ops->start(local_to_hw(local));
267 if (res)
268 goto err_del_bss;
269 need_hw_reconfig = 1;
270 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
271 }
272
273 switch (sdata->vif.type) {
274 case IEEE80211_IF_TYPE_VLAN:
275 /* no need to tell driver */
276 break;
277 case IEEE80211_IF_TYPE_MNTR:
278 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
279 local->cooked_mntrs++;
280 break;
281 }
282
283 /* must be before the call to ieee80211_configure_filter */
284 local->monitors++;
285 if (local->monitors == 1)
286 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
287
288 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
289 local->fif_fcsfail++;
290 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
291 local->fif_plcpfail++;
292 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
293 local->fif_control++;
294 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
295 local->fif_other_bss++;
296
297 netif_addr_lock_bh(local->mdev);
298 ieee80211_configure_filter(local);
299 netif_addr_unlock_bh(local->mdev);
300 break;
301 case IEEE80211_IF_TYPE_STA:
302 case IEEE80211_IF_TYPE_IBSS:
303 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
304 /* fall through */
305 default:
306 conf.vif = &sdata->vif;
307 conf.type = sdata->vif.type;
308 conf.mac_addr = dev->dev_addr;
309 res = local->ops->add_interface(local_to_hw(local), &conf);
310 if (res)
311 goto err_stop;
312
313 if (ieee80211_vif_is_mesh(&sdata->vif))
314 ieee80211_start_mesh(sdata->dev);
315 changed |= ieee80211_reset_erp_info(dev);
316 ieee80211_bss_info_change_notify(sdata, changed);
317 ieee80211_enable_keys(sdata);
318
319 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
320 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
321 netif_carrier_off(dev);
322 else
323 netif_carrier_on(dev);
324 }
325
326 if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
327 /* Create STA entry for the WDS peer */
328 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
329 GFP_KERNEL);
330 if (!sta) {
331 res = -ENOMEM;
332 goto err_del_interface;
333 }
334
335 /* no locking required since STA is not live yet */
336 sta->flags |= WLAN_STA_AUTHORIZED;
337
338 res = sta_info_insert(sta);
339 if (res) {
340 /* STA has been freed */
341 goto err_del_interface;
342 }
343 }
344
345 if (local->open_count == 0) {
346 res = dev_open(local->mdev);
347 WARN_ON(res);
348 if (res)
349 goto err_del_interface;
350 tasklet_enable(&local->tx_pending_tasklet);
351 tasklet_enable(&local->tasklet);
352 }
353
354 /*
355 * set_multicast_list will be invoked by the networking core
356 * which will check whether any increments here were done in
357 * error and sync them down to the hardware as filter flags.
358 */
359 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
360 atomic_inc(&local->iff_allmultis);
361
362 if (sdata->flags & IEEE80211_SDATA_PROMISC)
363 atomic_inc(&local->iff_promiscs);
364
365 local->open_count++;
366 if (need_hw_reconfig)
367 ieee80211_hw_config(local);
368
369 /*
370 * ieee80211_sta_work is disabled while network interface
371 * is down. Therefore, some configuration changes may not
372 * yet be effective. Trigger execution of ieee80211_sta_work
373 * to fix this.
374 */
375 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
376 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
377 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
378 queue_work(local->hw.workqueue, &ifsta->work);
379 }
380
381 netif_tx_start_all_queues(dev);
382
383 return 0;
384 err_del_interface:
385 local->ops->remove_interface(local_to_hw(local), &conf);
386 err_stop:
387 if (!local->open_count && local->ops->stop)
388 local->ops->stop(local_to_hw(local));
389 err_del_bss:
390 sdata->bss = NULL;
391 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
392 list_del(&sdata->u.vlan.list);
393 return res;
394}
395
396static int ieee80211_stop(struct net_device *dev)
397{
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
399 struct ieee80211_local *local = sdata->local;
400 struct ieee80211_if_init_conf conf;
401 struct sta_info *sta;
402
403 /*
404 * Stop TX on this interface first.
405 */
406 netif_tx_stop_all_queues(dev);
407
408 /*
409 * Now delete all active aggregation sessions.
410 */
411 rcu_read_lock();
412
413 list_for_each_entry_rcu(sta, &local->sta_list, list) {
414 if (sta->sdata == sdata)
415 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
416 }
417
418 rcu_read_unlock();
419
420 /*
421 * Remove all stations associated with this interface.
422 *
423 * This must be done before calling ops->remove_interface()
424 * because otherwise we can later invoke ops->sta_notify()
425 * whenever the STAs are removed, and that invalidates driver
426 * assumptions about always getting a vif pointer that is valid
427 * (because if we remove a STA after ops->remove_interface()
428 * the driver will have removed the vif info already!)
429 *
430 * We could relax this and only unlink the stations from the
431 * hash table and list but keep them on a per-sdata list that
432 * will be inserted back again when the interface is brought
433 * up again, but I don't currently see a use case for that,
434 * except with WDS which gets a STA entry created when it is
435 * brought up.
436 */
437 sta_info_flush(local, sdata);
438
439 /*
440 * Don't count this interface for promisc/allmulti while it
441 * is down. dev_mc_unsync() will invoke set_multicast_list
442 * on the master interface which will sync these down to the
443 * hardware as filter flags.
444 */
445 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
446 atomic_dec(&local->iff_allmultis);
447
448 if (sdata->flags & IEEE80211_SDATA_PROMISC)
449 atomic_dec(&local->iff_promiscs);
450
451 dev_mc_unsync(local->mdev, dev);
452
453 /* APs need special treatment */
454 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
455 struct ieee80211_sub_if_data *vlan, *tmp;
456 struct beacon_data *old_beacon = sdata->u.ap.beacon;
457
458 /* remove beacon */
459 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
460 synchronize_rcu();
461 kfree(old_beacon);
462
463 /* down all dependent devices, that is VLANs */
464 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
465 u.vlan.list)
466 dev_close(vlan->dev);
467 WARN_ON(!list_empty(&sdata->u.ap.vlans));
468 }
469
470 local->open_count--;
471
472 switch (sdata->vif.type) {
473 case IEEE80211_IF_TYPE_VLAN:
474 list_del(&sdata->u.vlan.list);
475 /* no need to tell driver */
476 break;
477 case IEEE80211_IF_TYPE_MNTR:
478 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
479 local->cooked_mntrs--;
480 break;
481 }
482
483 local->monitors--;
484 if (local->monitors == 0)
485 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
486
487 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
488 local->fif_fcsfail--;
489 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
490 local->fif_plcpfail--;
491 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
492 local->fif_control--;
493 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
494 local->fif_other_bss--;
495
496 netif_addr_lock_bh(local->mdev);
497 ieee80211_configure_filter(local);
498 netif_addr_unlock_bh(local->mdev);
499 break;
500 case IEEE80211_IF_TYPE_MESH_POINT:
501 /* allmulti is always set on mesh ifaces */
502 atomic_dec(&local->iff_allmultis);
503 /* fall through */
504 case IEEE80211_IF_TYPE_STA:
505 case IEEE80211_IF_TYPE_IBSS:
506 sdata->u.sta.state = IEEE80211_DISABLED;
507 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
508 del_timer_sync(&sdata->u.sta.timer);
509 /*
510 * When we get here, the interface is marked down.
511 * Call synchronize_rcu() to wait for the RX path
512 * should it be using the interface and enqueuing
513 * frames at this very time on another CPU.
514 */
515 synchronize_rcu();
516 skb_queue_purge(&sdata->u.sta.skb_queue);
517
518 if (local->scan_dev == sdata->dev) {
519 if (!local->ops->hw_scan) {
520 local->sta_sw_scanning = 0;
521 cancel_delayed_work(&local->scan_work);
522 } else
523 local->sta_hw_scanning = 0;
524 }
525
526 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
527 kfree(sdata->u.sta.extra_ie);
528 sdata->u.sta.extra_ie = NULL;
529 sdata->u.sta.extra_ie_len = 0;
530 /* fall through */
531 default:
532 conf.vif = &sdata->vif;
533 conf.type = sdata->vif.type;
534 conf.mac_addr = dev->dev_addr;
535 /* disable all keys for as long as this netdev is down */
536 ieee80211_disable_keys(sdata);
537 local->ops->remove_interface(local_to_hw(local), &conf);
538 }
539
540 sdata->bss = NULL;
541
542 if (local->open_count == 0) {
543 if (netif_running(local->mdev))
544 dev_close(local->mdev);
545
546 if (local->ops->stop)
547 local->ops->stop(local_to_hw(local));
548
549 ieee80211_led_radio(local, 0);
550
551 flush_workqueue(local->hw.workqueue);
552
553 tasklet_disable(&local->tx_pending_tasklet);
554 tasklet_disable(&local->tasklet);
555 }
556
557 return 0;
558}
559
560int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
561{
562 struct ieee80211_local *local = hw_to_local(hw);
563 struct sta_info *sta;
564 struct ieee80211_sub_if_data *sdata;
565 u16 start_seq_num = 0;
566 u8 *state;
567 int ret;
568 DECLARE_MAC_BUF(mac);
569
570 if (tid >= STA_TID_NUM)
571 return -EINVAL;
572
573#ifdef CONFIG_MAC80211_HT_DEBUG
574 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
575 print_mac(mac, ra), tid);
576#endif /* CONFIG_MAC80211_HT_DEBUG */
577
578 rcu_read_lock();
579
580 sta = sta_info_get(local, ra);
581 if (!sta) {
582#ifdef CONFIG_MAC80211_HT_DEBUG
583 printk(KERN_DEBUG "Could not find the station\n");
584#endif
585 ret = -ENOENT;
586 goto exit;
587 }
588
589 spin_lock_bh(&sta->lock);
590
591 /* we have tried too many times, receiver does not want A-MPDU */
592 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
593 ret = -EBUSY;
594 goto err_unlock_sta;
595 }
596
597 state = &sta->ampdu_mlme.tid_state_tx[tid];
598 /* check if the TID is not in aggregation flow already */
599 if (*state != HT_AGG_STATE_IDLE) {
600#ifdef CONFIG_MAC80211_HT_DEBUG
601 printk(KERN_DEBUG "BA request denied - session is not "
602 "idle on tid %u\n", tid);
603#endif /* CONFIG_MAC80211_HT_DEBUG */
604 ret = -EAGAIN;
605 goto err_unlock_sta;
606 }
607
608 /* prepare A-MPDU MLME for Tx aggregation */
609 sta->ampdu_mlme.tid_tx[tid] =
610 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
611 if (!sta->ampdu_mlme.tid_tx[tid]) {
612#ifdef CONFIG_MAC80211_HT_DEBUG
613 if (net_ratelimit())
614 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
615 tid);
616#endif
617 ret = -ENOMEM;
618 goto err_unlock_sta;
619 }
620 /* Tx timer */
621 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
622 sta_addba_resp_timer_expired;
623 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
624 (unsigned long)&sta->timer_to_tid[tid];
625 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
626
627 /* create a new queue for this aggregation */
628 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
629
630 /* case no queue is available to aggregation
631 * don't switch to aggregation */
632 if (ret) {
633#ifdef CONFIG_MAC80211_HT_DEBUG
634 printk(KERN_DEBUG "BA request denied - queue unavailable for"
635 " tid %d\n", tid);
636#endif /* CONFIG_MAC80211_HT_DEBUG */
637 goto err_unlock_queue;
638 }
639 sdata = sta->sdata;
640
641 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
642 * call back right away, it must see that the flow has begun */
643 *state |= HT_ADDBA_REQUESTED_MSK;
644
645 if (local->ops->ampdu_action)
646 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
647 ra, tid, &start_seq_num);
648
649 if (ret) {
650 /* No need to requeue the packets in the agg queue, since we
651 * held the tx lock: no packet could be enqueued to the newly
652 * allocated queue */
653 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
654#ifdef CONFIG_MAC80211_HT_DEBUG
655 printk(KERN_DEBUG "BA request denied - HW unavailable for"
656 " tid %d\n", tid);
657#endif /* CONFIG_MAC80211_HT_DEBUG */
658 *state = HT_AGG_STATE_IDLE;
659 goto err_unlock_queue;
660 }
661
662 /* Will put all the packets in the new SW queue */
663 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
664 spin_unlock_bh(&sta->lock);
665
666 /* send an addBA request */
667 sta->ampdu_mlme.dialog_token_allocator++;
668 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
669 sta->ampdu_mlme.dialog_token_allocator;
670 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
671
672
673 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
674 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
675 sta->ampdu_mlme.tid_tx[tid]->ssn,
676 0x40, 5000);
677 /* activate the timer for the recipient's addBA response */
678 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
679 jiffies + ADDBA_RESP_INTERVAL;
680 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
681#ifdef CONFIG_MAC80211_HT_DEBUG
682 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
683#endif
684 goto exit;
685
686err_unlock_queue:
687 kfree(sta->ampdu_mlme.tid_tx[tid]);
688 sta->ampdu_mlme.tid_tx[tid] = NULL;
689 ret = -EBUSY;
690err_unlock_sta:
691 spin_unlock_bh(&sta->lock);
692exit:
693 rcu_read_unlock();
694 return ret;
695}
696EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
697
698int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
699 u8 *ra, u16 tid,
700 enum ieee80211_back_parties initiator)
701{
702 struct ieee80211_local *local = hw_to_local(hw);
703 struct sta_info *sta;
704 u8 *state;
705 int ret = 0;
706 DECLARE_MAC_BUF(mac);
707
708 if (tid >= STA_TID_NUM)
709 return -EINVAL;
710
711 rcu_read_lock();
712 sta = sta_info_get(local, ra);
713 if (!sta) {
714 rcu_read_unlock();
715 return -ENOENT;
716 }
717
718 /* check if the TID is in aggregation */
719 state = &sta->ampdu_mlme.tid_state_tx[tid];
720 spin_lock_bh(&sta->lock);
721
722 if (*state != HT_AGG_STATE_OPERATIONAL) {
723 ret = -ENOENT;
724 goto stop_BA_exit;
725 }
726
727#ifdef CONFIG_MAC80211_HT_DEBUG
728 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
729 print_mac(mac, ra), tid);
730#endif /* CONFIG_MAC80211_HT_DEBUG */
731
732 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
733
734 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
735 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
736
737 if (local->ops->ampdu_action)
738 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
739 ra, tid, NULL);
740
741 /* case HW denied going back to legacy */
742 if (ret) {
743 WARN_ON(ret != -EBUSY);
744 *state = HT_AGG_STATE_OPERATIONAL;
745 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
746 goto stop_BA_exit;
747 }
748
749stop_BA_exit:
750 spin_unlock_bh(&sta->lock);
751 rcu_read_unlock();
752 return ret;
753}
754EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
755
756void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
757{
758 struct ieee80211_local *local = hw_to_local(hw);
759 struct sta_info *sta;
760 u8 *state;
761 DECLARE_MAC_BUF(mac);
762
763 if (tid >= STA_TID_NUM) {
764#ifdef CONFIG_MAC80211_HT_DEBUG
765 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
766 tid, STA_TID_NUM);
767#endif
768 return;
769 }
770
771 rcu_read_lock();
772 sta = sta_info_get(local, ra);
773 if (!sta) {
774 rcu_read_unlock();
775#ifdef CONFIG_MAC80211_HT_DEBUG
776 printk(KERN_DEBUG "Could not find station: %s\n",
777 print_mac(mac, ra));
778#endif
779 return;
780 }
781
782 state = &sta->ampdu_mlme.tid_state_tx[tid];
783 spin_lock_bh(&sta->lock);
784
785 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
786#ifdef CONFIG_MAC80211_HT_DEBUG
787 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
788 *state);
789#endif
790 spin_unlock_bh(&sta->lock);
791 rcu_read_unlock();
792 return;
793 }
794
795 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
796
797 *state |= HT_ADDBA_DRV_READY_MSK;
798
799 if (*state == HT_AGG_STATE_OPERATIONAL) {
800#ifdef CONFIG_MAC80211_HT_DEBUG
801 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
802#endif
803 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
804 }
805 spin_unlock_bh(&sta->lock);
806 rcu_read_unlock();
807}
808EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
809
810void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
811{
812 struct ieee80211_local *local = hw_to_local(hw);
813 struct sta_info *sta;
814 u8 *state;
815 int agg_queue;
816 DECLARE_MAC_BUF(mac);
817
818 if (tid >= STA_TID_NUM) {
819#ifdef CONFIG_MAC80211_HT_DEBUG
820 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
821 tid, STA_TID_NUM);
822#endif
823 return;
824 }
825
826#ifdef CONFIG_MAC80211_HT_DEBUG
827 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
828 print_mac(mac, ra), tid);
829#endif /* CONFIG_MAC80211_HT_DEBUG */
830
831 rcu_read_lock();
832 sta = sta_info_get(local, ra);
833 if (!sta) {
834#ifdef CONFIG_MAC80211_HT_DEBUG
835 printk(KERN_DEBUG "Could not find station: %s\n",
836 print_mac(mac, ra));
837#endif
838 rcu_read_unlock();
839 return;
840 }
841 state = &sta->ampdu_mlme.tid_state_tx[tid];
842
843 /* NOTE: no need to use sta->lock in this state check, as
844 * ieee80211_stop_tx_ba_session will let only one stop call to
845 * pass through per sta/tid
846 */
847 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
848#ifdef CONFIG_MAC80211_HT_DEBUG
849 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
850#endif
851 rcu_read_unlock();
852 return;
853 }
854
855 if (*state & HT_AGG_STATE_INITIATOR_MSK)
856 ieee80211_send_delba(sta->sdata->dev, ra, tid,
857 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
858
859 agg_queue = sta->tid_to_tx_q[tid];
860
861 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
862
863 /* We just requeued the all the frames that were in the
864 * removed queue, and since we might miss a softirq we do
865 * netif_schedule_queue. ieee80211_wake_queue is not used
866 * here as this queue is not necessarily stopped
867 */
868 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
869 spin_lock_bh(&sta->lock);
870 *state = HT_AGG_STATE_IDLE;
871 sta->ampdu_mlme.addba_req_num[tid] = 0;
872 kfree(sta->ampdu_mlme.tid_tx[tid]);
873 sta->ampdu_mlme.tid_tx[tid] = NULL;
874 spin_unlock_bh(&sta->lock);
875
876 rcu_read_unlock();
877}
878EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
879
880void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
881 const u8 *ra, u16 tid)
882{
883 struct ieee80211_local *local = hw_to_local(hw);
884 struct ieee80211_ra_tid *ra_tid;
885 struct sk_buff *skb = dev_alloc_skb(0);
886
887 if (unlikely(!skb)) {
888#ifdef CONFIG_MAC80211_HT_DEBUG
889 if (net_ratelimit())
890 printk(KERN_WARNING "%s: Not enough memory, "
891 "dropping start BA session", skb->dev->name);
892#endif
893 return;
894 }
895 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
896 memcpy(&ra_tid->ra, ra, ETH_ALEN);
897 ra_tid->tid = tid;
898
899 skb->pkt_type = IEEE80211_ADDBA_MSG;
900 skb_queue_tail(&local->skb_queue, skb);
901 tasklet_schedule(&local->tasklet);
902}
903EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
904
905void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
906 const u8 *ra, u16 tid)
907{
908 struct ieee80211_local *local = hw_to_local(hw);
909 struct ieee80211_ra_tid *ra_tid;
910 struct sk_buff *skb = dev_alloc_skb(0);
911
912 if (unlikely(!skb)) {
913#ifdef CONFIG_MAC80211_HT_DEBUG
914 if (net_ratelimit())
915 printk(KERN_WARNING "%s: Not enough memory, "
916 "dropping stop BA session", skb->dev->name);
917#endif
918 return;
919 }
920 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
921 memcpy(&ra_tid->ra, ra, ETH_ALEN);
922 ra_tid->tid = tid;
923
924 skb->pkt_type = IEEE80211_DELBA_MSG;
925 skb_queue_tail(&local->skb_queue, skb);
926 tasklet_schedule(&local->tasklet);
927}
928EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
929
930static void ieee80211_set_multicast_list(struct net_device *dev)
931{
932 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
933 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
934 int allmulti, promisc, sdata_allmulti, sdata_promisc;
935
936 allmulti = !!(dev->flags & IFF_ALLMULTI);
937 promisc = !!(dev->flags & IFF_PROMISC);
938 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
939 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
940
941 if (allmulti != sdata_allmulti) {
942 if (dev->flags & IFF_ALLMULTI)
943 atomic_inc(&local->iff_allmultis);
944 else
945 atomic_dec(&local->iff_allmultis);
946 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
947 }
948
949 if (promisc != sdata_promisc) {
950 if (dev->flags & IFF_PROMISC)
951 atomic_inc(&local->iff_promiscs);
952 else
953 atomic_dec(&local->iff_promiscs);
954 sdata->flags ^= IEEE80211_SDATA_PROMISC;
955 }
956
957 dev_mc_sync(local->mdev, dev);
958}
959
960static const struct header_ops ieee80211_header_ops = {
961 .create = eth_header,
962 .parse = header_parse_80211,
963 .rebuild = eth_rebuild_header,
964 .cache = eth_header_cache,
965 .cache_update = eth_header_cache_update,
966};
967
968void ieee80211_if_setup(struct net_device *dev)
969{
970 ether_setup(dev);
971 dev->hard_start_xmit = ieee80211_subif_start_xmit;
972 dev->wireless_handlers = &ieee80211_iw_handler_def;
973 dev->set_multicast_list = ieee80211_set_multicast_list;
974 dev->change_mtu = ieee80211_change_mtu;
975 dev->open = ieee80211_open;
976 dev->stop = ieee80211_stop;
977 dev->destructor = free_netdev;
978}
979
980/* everything else */ 149/* everything else */
981 150
982int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) 151int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
@@ -987,18 +156,21 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
987 if (WARN_ON(!netif_running(sdata->dev))) 156 if (WARN_ON(!netif_running(sdata->dev)))
988 return 0; 157 return 0;
989 158
159 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
160 return -EINVAL;
161
990 if (!local->ops->config_interface) 162 if (!local->ops->config_interface)
991 return 0; 163 return 0;
992 164
993 memset(&conf, 0, sizeof(conf)); 165 memset(&conf, 0, sizeof(conf));
994 conf.changed = changed; 166 conf.changed = changed;
995 167
996 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 168 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
997 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 169 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
998 conf.bssid = sdata->u.sta.bssid; 170 conf.bssid = sdata->u.sta.bssid;
999 conf.ssid = sdata->u.sta.ssid; 171 conf.ssid = sdata->u.sta.ssid;
1000 conf.ssid_len = sdata->u.sta.ssid_len; 172 conf.ssid_len = sdata->u.sta.ssid_len;
1001 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 173 } else if (sdata->vif.type == NL80211_IFTYPE_AP) {
1002 conf.bssid = sdata->dev->dev_addr; 174 conf.bssid = sdata->dev->dev_addr;
1003 conf.ssid = sdata->u.ap.ssid; 175 conf.ssid = sdata->u.ap.ssid;
1004 conf.ssid_len = sdata->u.ap.ssid_len; 176 conf.ssid_len = sdata->u.ap.ssid_len;
@@ -1027,7 +199,7 @@ int ieee80211_hw_config(struct ieee80211_local *local)
1027 struct ieee80211_channel *chan; 199 struct ieee80211_channel *chan;
1028 int ret = 0; 200 int ret = 0;
1029 201
1030 if (local->sta_sw_scanning) 202 if (local->sw_scanning)
1031 chan = local->scan_channel; 203 chan = local->scan_channel;
1032 else 204 else
1033 chan = local->oper_channel; 205 chan = local->oper_channel;
@@ -1099,8 +271,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1099 ht_conf.ht_supported = 1; 271 ht_conf.ht_supported = 1;
1100 272
1101 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; 273 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1102 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 274 ht_conf.cap &= ~(IEEE80211_HT_CAP_SM_PS);
1103 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; 275 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_SM_PS;
1104 ht_bss_conf.primary_channel = req_bss_cap->primary_channel; 276 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1105 ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 277 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1106 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 278 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
@@ -1152,6 +324,9 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1152{ 324{
1153 struct ieee80211_local *local = sdata->local; 325 struct ieee80211_local *local = sdata->local;
1154 326
327 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
328 return;
329
1155 if (!changed) 330 if (!changed)
1156 return; 331 return;
1157 332
@@ -1162,10 +337,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1162 changed); 337 changed);
1163} 338}
1164 339
1165u32 ieee80211_reset_erp_info(struct net_device *dev) 340u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
1166{ 341{
1167 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1168
1169 sdata->bss_conf.use_cts_prot = 0; 342 sdata->bss_conf.use_cts_prot = 0;
1170 sdata->bss_conf.use_short_preamble = 0; 343 sdata->bss_conf.use_short_preamble = 0;
1171 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; 344 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
@@ -1244,9 +417,10 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 417 struct ieee80211_key *key,
1245 struct sk_buff *skb) 418 struct sk_buff *skb)
1246{ 419{
1247 int hdrlen, iv_len, mic_len; 420 unsigned int hdrlen, iv_len, mic_len;
421 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1248 422
1249 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 423 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1250 424
1251 if (!key) 425 if (!key)
1252 goto no_key; 426 goto no_key;
@@ -1268,24 +442,20 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1268 goto no_key; 442 goto no_key;
1269 } 443 }
1270 444
1271 if (skb->len >= mic_len && 445 if (skb->len >= hdrlen + mic_len &&
1272 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 446 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
1273 skb_trim(skb, skb->len - mic_len); 447 skb_trim(skb, skb->len - mic_len);
1274 if (skb->len >= iv_len && skb->len > hdrlen) { 448 if (skb->len >= hdrlen + iv_len) {
1275 memmove(skb->data + iv_len, skb->data, hdrlen); 449 memmove(skb->data + iv_len, skb->data, hdrlen);
1276 skb_pull(skb, iv_len); 450 hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
1277 } 451 }
1278 452
1279no_key: 453no_key:
1280 { 454 if (ieee80211_is_data_qos(hdr->frame_control)) {
1281 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 455 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1282 u16 fc = le16_to_cpu(hdr->frame_control); 456 memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
1283 if ((fc & 0x8C) == 0x88) /* QoS Control Field */ { 457 hdrlen - IEEE80211_QOS_CTL_LEN);
1284 fc &= ~IEEE80211_STYPE_QOS_DATA; 458 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
1285 hdr->frame_control = cpu_to_le16(fc);
1286 memmove(skb->data + 2, skb->data, hdrlen - 2);
1287 skb_pull(skb, 2);
1288 }
1289 } 459 }
1290} 460}
1291 461
@@ -1376,47 +546,47 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1376 546
1377 rcu_read_lock(); 547 rcu_read_lock();
1378 548
1379 if (info->status.excessive_retries) { 549 sta = sta_info_get(local, hdr->addr1);
1380 sta = sta_info_get(local, hdr->addr1); 550
1381 if (sta) { 551 if (sta) {
1382 if (test_sta_flags(sta, WLAN_STA_PS)) { 552 if (info->status.excessive_retries &&
1383 /* 553 test_sta_flags(sta, WLAN_STA_PS)) {
1384 * The STA is in power save mode, so assume 554 /*
1385 * that this TX packet failed because of that. 555 * The STA is in power save mode, so assume
1386 */ 556 * that this TX packet failed because of that.
1387 ieee80211_handle_filtered_frame(local, sta, skb); 557 */
1388 rcu_read_unlock(); 558 ieee80211_handle_filtered_frame(local, sta, skb);
1389 return; 559 rcu_read_unlock();
1390 } 560 return;
1391 } 561 }
1392 }
1393 562
1394 fc = hdr->frame_control; 563 fc = hdr->frame_control;
564
565 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
566 (ieee80211_is_data_qos(fc))) {
567 u16 tid, ssn;
568 u8 *qc;
1395 569
1396 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
1397 (ieee80211_is_data_qos(fc))) {
1398 u16 tid, ssn;
1399 u8 *qc;
1400 sta = sta_info_get(local, hdr->addr1);
1401 if (sta) {
1402 qc = ieee80211_get_qos_ctl(hdr); 570 qc = ieee80211_get_qos_ctl(hdr);
1403 tid = qc[0] & 0xf; 571 tid = qc[0] & 0xf;
1404 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) 572 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1405 & IEEE80211_SCTL_SEQ); 573 & IEEE80211_SCTL_SEQ);
1406 ieee80211_send_bar(sta->sdata->dev, hdr->addr1, 574 ieee80211_send_bar(sta->sdata, hdr->addr1,
1407 tid, ssn); 575 tid, ssn);
1408 } 576 }
1409 }
1410 577
1411 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { 578 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1412 sta = sta_info_get(local, hdr->addr1);
1413 if (sta) {
1414 ieee80211_handle_filtered_frame(local, sta, skb); 579 ieee80211_handle_filtered_frame(local, sta, skb);
1415 rcu_read_unlock(); 580 rcu_read_unlock();
1416 return; 581 return;
582 } else {
583 if (info->status.excessive_retries)
584 sta->tx_retry_failed++;
585 sta->tx_retry_count += info->status.retry_count;
1417 } 586 }
1418 } else 587
1419 rate_control_tx_status(local->mdev, skb); 588 rate_control_tx_status(local->mdev, skb);
589 }
1420 590
1421 rcu_read_unlock(); 591 rcu_read_unlock();
1422 592
@@ -1504,7 +674,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1504 674
1505 rcu_read_lock(); 675 rcu_read_lock();
1506 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 676 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1507 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { 677 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
1508 if (!netif_running(sdata->dev)) 678 if (!netif_running(sdata->dev))
1509 continue; 679 continue;
1510 680
@@ -1580,8 +750,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1580 750
1581 local->hw.queues = 1; /* default */ 751 local->hw.queues = 1; /* default */
1582 752
1583 local->bridge_packets = 1;
1584
1585 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 753 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
1586 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 754 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
1587 local->short_retry_limit = 7; 755 local->short_retry_limit = 7;
@@ -1592,7 +760,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1592 760
1593 spin_lock_init(&local->key_lock); 761 spin_lock_init(&local->key_lock);
1594 762
1595 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); 763 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
1596 764
1597 sta_info_init(local); 765 sta_info_init(local);
1598 766
@@ -1639,6 +807,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1639 } 807 }
1640 } 808 }
1641 809
810 /* if low-level driver supports AP, we also support VLAN */
811 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
812 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
813
814 /* mac80211 always supports monitor */
815 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
816
1642 result = wiphy_register(local->hw.wiphy); 817 result = wiphy_register(local->hw.wiphy);
1643 if (result < 0) 818 if (result < 0)
1644 return result; 819 return result;
@@ -1745,7 +920,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1745 920
1746 /* add one default STA interface */ 921 /* add one default STA interface */
1747 result = ieee80211_if_add(local, "wlan%d", NULL, 922 result = ieee80211_if_add(local, "wlan%d", NULL,
1748 IEEE80211_IF_TYPE_STA, NULL); 923 NL80211_IFTYPE_STATION, NULL);
1749 if (result) 924 if (result)
1750 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 925 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1751 wiphy_name(local->hw.wiphy)); 926 wiphy_name(local->hw.wiphy));
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 35f2f95f2fa7..30cf891fd3a8 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -12,6 +12,9 @@
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "mesh.h" 13#include "mesh.h"
14 14
15#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
16#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
17
15#define PP_OFFSET 1 /* Path Selection Protocol */ 18#define PP_OFFSET 1 /* Path Selection Protocol */
16#define PM_OFFSET 5 /* Path Selection Metric */ 19#define PM_OFFSET 5 /* Path Selection Metric */
17#define CC_OFFSET 9 /* Congestion Control Mode */ 20#define CC_OFFSET 9 /* Congestion Control Mode */
@@ -35,19 +38,28 @@ void ieee80211s_stop(void)
35 kmem_cache_destroy(rm_cache); 38 kmem_cache_destroy(rm_cache);
36} 39}
37 40
41static void ieee80211_mesh_housekeeping_timer(unsigned long data)
42{
43 struct ieee80211_sub_if_data *sdata = (void *) data;
44 struct ieee80211_local *local = sdata->local;
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46
47 ifmsh->housekeeping = true;
48 queue_work(local->hw.workqueue, &ifmsh->work);
49}
50
38/** 51/**
39 * mesh_matches_local - check if the config of a mesh point matches ours 52 * mesh_matches_local - check if the config of a mesh point matches ours
40 * 53 *
41 * @ie: information elements of a management frame from the mesh peer 54 * @ie: information elements of a management frame from the mesh peer
42 * @dev: local mesh interface 55 * @sdata: local mesh subif
43 * 56 *
44 * This function checks if the mesh configuration of a mesh point matches the 57 * This function checks if the mesh configuration of a mesh point matches the
45 * local mesh configuration, i.e. if both nodes belong to the same mesh network. 58 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
46 */ 59 */
47bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) 60bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
48{ 61{
49 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 62 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
50 struct ieee80211_if_sta *sta = &sdata->u.sta;
51 63
52 /* 64 /*
53 * As support for each feature is added, check for matching 65 * As support for each feature is added, check for matching
@@ -59,11 +71,11 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
59 * - MDA enabled 71 * - MDA enabled
60 * - Power management control on fc 72 * - Power management control on fc
61 */ 73 */
62 if (sta->mesh_id_len == ie->mesh_id_len && 74 if (ifmsh->mesh_id_len == ie->mesh_id_len &&
63 memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 75 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
64 memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && 76 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
65 memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && 77 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
66 memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) 78 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
67 return true; 79 return true;
68 80
69 return false; 81 return false;
@@ -73,10 +85,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
73 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links 85 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
74 * 86 *
75 * @ie: information elements of a management frame from the mesh peer 87 * @ie: information elements of a management frame from the mesh peer
76 * @dev: local mesh interface
77 */ 88 */
78bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 89bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
79 struct net_device *dev)
80{ 90{
81 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; 91 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
82} 92}
@@ -98,11 +108,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
98 */ 108 */
99 free_plinks = mesh_plink_availables(sdata); 109 free_plinks = mesh_plink_availables(sdata);
100 110
101 if (free_plinks != sdata->u.sta.accepting_plinks) 111 if (free_plinks != sdata->u.mesh.accepting_plinks)
102 ieee80211_sta_timer((unsigned long) sdata); 112 ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
103} 113}
104 114
105void mesh_ids_set_default(struct ieee80211_if_sta *sta) 115void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
106{ 116{
107 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; 117 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
108 118
@@ -111,28 +121,26 @@ void mesh_ids_set_default(struct ieee80211_if_sta *sta)
111 memcpy(sta->mesh_cc_id, def_id, 4); 121 memcpy(sta->mesh_cc_id, def_id, 4);
112} 122}
113 123
114int mesh_rmc_init(struct net_device *dev) 124int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
115{ 125{
116 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
117 int i; 126 int i;
118 127
119 sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); 128 sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
120 if (!sdata->u.sta.rmc) 129 if (!sdata->u.mesh.rmc)
121 return -ENOMEM; 130 return -ENOMEM;
122 sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1; 131 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
123 for (i = 0; i < RMC_BUCKETS; i++) 132 for (i = 0; i < RMC_BUCKETS; i++)
124 INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list); 133 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
125 return 0; 134 return 0;
126} 135}
127 136
128void mesh_rmc_free(struct net_device *dev) 137void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
129{ 138{
130 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 139 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
131 struct mesh_rmc *rmc = sdata->u.sta.rmc;
132 struct rmc_entry *p, *n; 140 struct rmc_entry *p, *n;
133 int i; 141 int i;
134 142
135 if (!sdata->u.sta.rmc) 143 if (!sdata->u.mesh.rmc)
136 return; 144 return;
137 145
138 for (i = 0; i < RMC_BUCKETS; i++) 146 for (i = 0; i < RMC_BUCKETS; i++)
@@ -142,7 +150,7 @@ void mesh_rmc_free(struct net_device *dev)
142 } 150 }
143 151
144 kfree(rmc); 152 kfree(rmc);
145 sdata->u.sta.rmc = NULL; 153 sdata->u.mesh.rmc = NULL;
146} 154}
147 155
148/** 156/**
@@ -158,10 +166,9 @@ void mesh_rmc_free(struct net_device *dev)
158 * it. 166 * it.
159 */ 167 */
160int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, 168int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
161 struct net_device *dev) 169 struct ieee80211_sub_if_data *sdata)
162{ 170{
163 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 171 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
164 struct mesh_rmc *rmc = sdata->u.sta.rmc;
165 u32 seqnum = 0; 172 u32 seqnum = 0;
166 int entries = 0; 173 int entries = 0;
167 u8 idx; 174 u8 idx;
@@ -194,10 +201,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
194 return 0; 201 return 0;
195} 202}
196 203
197void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) 204void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
198{ 205{
199 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 206 struct ieee80211_local *local = sdata->local;
200 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
201 struct ieee80211_supported_band *sband; 207 struct ieee80211_supported_band *sband;
202 u8 *pos; 208 u8 *pos;
203 int len, i, rate; 209 int len, i, rate;
@@ -224,11 +230,11 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
224 } 230 }
225 } 231 }
226 232
227 pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len); 233 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
228 *pos++ = WLAN_EID_MESH_ID; 234 *pos++ = WLAN_EID_MESH_ID;
229 *pos++ = sdata->u.sta.mesh_id_len; 235 *pos++ = sdata->u.mesh.mesh_id_len;
230 if (sdata->u.sta.mesh_id_len) 236 if (sdata->u.mesh.mesh_id_len)
231 memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len); 237 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
232 238
233 pos = skb_put(skb, 21); 239 pos = skb_put(skb, 21);
234 *pos++ = WLAN_EID_MESH_CONFIG; 240 *pos++ = WLAN_EID_MESH_CONFIG;
@@ -237,15 +243,15 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
237 *pos++ = 1; 243 *pos++ = 1;
238 244
239 /* Active path selection protocol ID */ 245 /* Active path selection protocol ID */
240 memcpy(pos, sdata->u.sta.mesh_pp_id, 4); 246 memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
241 pos += 4; 247 pos += 4;
242 248
243 /* Active path selection metric ID */ 249 /* Active path selection metric ID */
244 memcpy(pos, sdata->u.sta.mesh_pm_id, 4); 250 memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
245 pos += 4; 251 pos += 4;
246 252
247 /* Congestion control mode identifier */ 253 /* Congestion control mode identifier */
248 memcpy(pos, sdata->u.sta.mesh_cc_id, 4); 254 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
249 pos += 4; 255 pos += 4;
250 256
251 /* Channel precedence: 257 /* Channel precedence:
@@ -255,17 +261,17 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
255 pos += 4; 261 pos += 4;
256 262
257 /* Mesh capability */ 263 /* Mesh capability */
258 sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata); 264 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
259 *pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00; 265 *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
260 *pos++ = 0x00; 266 *pos++ = 0x00;
261 267
262 return; 268 return;
263} 269}
264 270
265u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) 271u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
266{ 272{
267 /* Use last four bytes of hw addr and interface index as hash index */ 273 /* Use last four bytes of hw addr and interface index as hash index */
268 return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) 274 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
269 & tbl->hash_mask; 275 & tbl->hash_mask;
270} 276}
271 277
@@ -344,10 +350,10 @@ static void ieee80211_mesh_path_timer(unsigned long data)
344{ 350{
345 struct ieee80211_sub_if_data *sdata = 351 struct ieee80211_sub_if_data *sdata =
346 (struct ieee80211_sub_if_data *) data; 352 (struct ieee80211_sub_if_data *) data;
347 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 353 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
348 struct ieee80211_local *local = wdev_priv(&sdata->wdev); 354 struct ieee80211_local *local = wdev_priv(&sdata->wdev);
349 355
350 queue_work(local->hw.workqueue, &ifsta->work); 356 queue_work(local->hw.workqueue, &ifmsh->work);
351} 357}
352 358
353struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 359struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
@@ -399,50 +405,264 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
399 struct ieee80211_sub_if_data *sdata) 405 struct ieee80211_sub_if_data *sdata)
400{ 406{
401 meshhdr->flags = 0; 407 meshhdr->flags = 0;
402 meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL; 408 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
403 put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); 409 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
404 sdata->u.sta.mesh_seqnum++; 410 sdata->u.mesh.mesh_seqnum++;
405 411
406 return 6; 412 return 6;
407} 413}
408 414
415static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
416 struct ieee80211_if_mesh *ifmsh)
417{
418 bool free_plinks;
419
420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
421 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
422 sdata->dev->name);
423#endif
424
425 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
426 mesh_path_expire(sdata);
427
428 free_plinks = mesh_plink_availables(sdata);
429 if (free_plinks != sdata->u.mesh.accepting_plinks)
430 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
431
432 ifmsh->housekeeping = false;
433 mod_timer(&ifmsh->housekeeping_timer,
434 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
435}
436
437
438void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
439{
440 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
441 struct ieee80211_local *local = sdata->local;
442
443 ifmsh->housekeeping = true;
444 queue_work(local->hw.workqueue, &ifmsh->work);
445 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
446}
447
448void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
449{
450 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
451 /*
452 * If the timer fired while we waited for it, it will have
453 * requeued the work. Now the work will be running again
454 * but will not rearm the timer again because it checks
455 * whether the interface is running, which, at this point,
456 * it no longer is.
457 */
458 cancel_work_sync(&sdata->u.mesh.work);
459
460 /*
461 * When we get here, the interface is marked down.
462 * Call synchronize_rcu() to wait for the RX path
463 * should it be using the interface and enqueuing
464 * frames at this very time on another CPU.
465 */
466 synchronize_rcu();
467 skb_queue_purge(&sdata->u.mesh.skb_queue);
468}
469
470static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
471 u16 stype,
472 struct ieee80211_mgmt *mgmt,
473 size_t len,
474 struct ieee80211_rx_status *rx_status)
475{
476 struct ieee80211_local *local= sdata->local;
477 struct ieee802_11_elems elems;
478 struct ieee80211_channel *channel;
479 u64 supp_rates = 0;
480 size_t baselen;
481 int freq;
482 enum ieee80211_band band = rx_status->band;
483
484 /* ignore ProbeResp to foreign address */
485 if (stype == IEEE80211_STYPE_PROBE_RESP &&
486 compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
487 return;
488
489 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
490 if (baselen > len)
491 return;
492
493 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
494 &elems);
495
496 if (elems.ds_params && elems.ds_params_len == 1)
497 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
498 else
499 freq = rx_status->freq;
500
501 channel = ieee80211_get_channel(local->hw.wiphy, freq);
502
503 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
504 return;
505
506 if (elems.mesh_id && elems.mesh_config &&
507 mesh_matches_local(&elems, sdata)) {
508 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
509
510 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
511 mesh_peer_accepts_plinks(&elems));
512 }
513}
514
515static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
516 struct ieee80211_mgmt *mgmt,
517 size_t len,
518 struct ieee80211_rx_status *rx_status)
519{
520 switch (mgmt->u.action.category) {
521 case PLINK_CATEGORY:
522 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
523 break;
524 case MESH_PATH_SEL_CATEGORY:
525 mesh_rx_path_sel_frame(sdata, mgmt, len);
526 break;
527 }
528}
529
530static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
531 struct sk_buff *skb)
532{
533 struct ieee80211_rx_status *rx_status;
534 struct ieee80211_if_mesh *ifmsh;
535 struct ieee80211_mgmt *mgmt;
536 u16 stype;
537
538 ifmsh = &sdata->u.mesh;
539
540 rx_status = (struct ieee80211_rx_status *) skb->cb;
541 mgmt = (struct ieee80211_mgmt *) skb->data;
542 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
543
544 switch (stype) {
545 case IEEE80211_STYPE_PROBE_RESP:
546 case IEEE80211_STYPE_BEACON:
547 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
548 rx_status);
549 break;
550 case IEEE80211_STYPE_ACTION:
551 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
552 break;
553 }
554
555 kfree_skb(skb);
556}
557
558static void ieee80211_mesh_work(struct work_struct *work)
559{
560 struct ieee80211_sub_if_data *sdata =
561 container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
562 struct ieee80211_local *local = sdata->local;
563 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
564 struct sk_buff *skb;
565
566 if (!netif_running(sdata->dev))
567 return;
568
569 if (local->sw_scanning || local->hw_scanning)
570 return;
571
572 while ((skb = skb_dequeue(&ifmsh->skb_queue)))
573 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
574
575 if (ifmsh->preq_queue_len &&
576 time_after(jiffies,
577 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
578 mesh_path_start_discovery(sdata);
579
580 if (ifmsh->housekeeping)
581 ieee80211_mesh_housekeeping(sdata, ifmsh);
582}
583
584void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
585{
586 struct ieee80211_sub_if_data *sdata;
587
588 rcu_read_lock();
589 list_for_each_entry_rcu(sdata, &local->interfaces, list)
590 if (ieee80211_vif_is_mesh(&sdata->vif))
591 queue_work(local->hw.workqueue, &sdata->u.mesh.work);
592 rcu_read_unlock();
593}
594
409void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) 595void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
410{ 596{
411 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 597 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
412 598
413 ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; 599 INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
414 ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; 600 setup_timer(&ifmsh->housekeeping_timer,
415 ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; 601 ieee80211_mesh_housekeeping_timer,
416 ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; 602 (unsigned long) sdata);
417 ifsta->mshcfg.dot11MeshTTL = MESH_TTL; 603 skb_queue_head_init(&sdata->u.mesh.skb_queue);
418 ifsta->mshcfg.auto_open_plinks = true; 604
419 ifsta->mshcfg.dot11MeshMaxPeerLinks = 605 ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
606 ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
607 ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
608 ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
609 ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
610 ifmsh->mshcfg.auto_open_plinks = true;
611 ifmsh->mshcfg.dot11MeshMaxPeerLinks =
420 MESH_MAX_ESTAB_PLINKS; 612 MESH_MAX_ESTAB_PLINKS;
421 ifsta->mshcfg.dot11MeshHWMPactivePathTimeout = 613 ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
422 MESH_PATH_TIMEOUT; 614 MESH_PATH_TIMEOUT;
423 ifsta->mshcfg.dot11MeshHWMPpreqMinInterval = 615 ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
424 MESH_PREQ_MIN_INT; 616 MESH_PREQ_MIN_INT;
425 ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = 617 ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
426 MESH_DIAM_TRAVERSAL_TIME; 618 MESH_DIAM_TRAVERSAL_TIME;
427 ifsta->mshcfg.dot11MeshHWMPmaxPREQretries = 619 ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
428 MESH_MAX_PREQ_RETRIES; 620 MESH_MAX_PREQ_RETRIES;
429 ifsta->mshcfg.path_refresh_time = 621 ifmsh->mshcfg.path_refresh_time =
430 MESH_PATH_REFRESH_TIME; 622 MESH_PATH_REFRESH_TIME;
431 ifsta->mshcfg.min_discovery_timeout = 623 ifmsh->mshcfg.min_discovery_timeout =
432 MESH_MIN_DISCOVERY_TIMEOUT; 624 MESH_MIN_DISCOVERY_TIMEOUT;
433 ifsta->accepting_plinks = true; 625 ifmsh->accepting_plinks = true;
434 ifsta->preq_id = 0; 626 ifmsh->preq_id = 0;
435 ifsta->dsn = 0; 627 ifmsh->dsn = 0;
436 atomic_set(&ifsta->mpaths, 0); 628 atomic_set(&ifmsh->mpaths, 0);
437 mesh_rmc_init(sdata->dev); 629 mesh_rmc_init(sdata);
438 ifsta->last_preq = jiffies; 630 ifmsh->last_preq = jiffies;
439 /* Allocate all mesh structures when creating the first mesh interface. */ 631 /* Allocate all mesh structures when creating the first mesh interface. */
440 if (!mesh_allocated) 632 if (!mesh_allocated)
441 ieee80211s_init(); 633 ieee80211s_init();
442 mesh_ids_set_default(ifsta); 634 mesh_ids_set_default(ifmsh);
443 setup_timer(&ifsta->mesh_path_timer, 635 setup_timer(&ifmsh->mesh_path_timer,
444 ieee80211_mesh_path_timer, 636 ieee80211_mesh_path_timer,
445 (unsigned long) sdata); 637 (unsigned long) sdata);
446 INIT_LIST_HEAD(&ifsta->preq_queue.list); 638 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
447 spin_lock_init(&ifsta->mesh_preq_queue_lock); 639 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
640}
641
642ieee80211_rx_result
643ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
644 struct ieee80211_rx_status *rx_status)
645{
646 struct ieee80211_local *local = sdata->local;
647 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
648 struct ieee80211_mgmt *mgmt;
649 u16 fc;
650
651 if (skb->len < 24)
652 return RX_DROP_MONITOR;
653
654 mgmt = (struct ieee80211_mgmt *) skb->data;
655 fc = le16_to_cpu(mgmt->frame_control);
656
657 switch (fc & IEEE80211_FCTL_STYPE) {
658 case IEEE80211_STYPE_PROBE_RESP:
659 case IEEE80211_STYPE_BEACON:
660 case IEEE80211_STYPE_ACTION:
661 memcpy(skb->cb, rx_status, sizeof(*rx_status));
662 skb_queue_tail(&ifmsh->skb_queue, skb);
663 queue_work(local->hw.workqueue, &ifmsh->work);
664 return RX_QUEUED;
665 }
666
667 return RX_CONTINUE;
448} 668}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7495fbb0d211..8ee414a0447c 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -47,7 +47,7 @@ enum mesh_path_flags {
47 * struct mesh_path - mac80211 mesh path structure 47 * struct mesh_path - mac80211 mesh path structure
48 * 48 *
49 * @dst: mesh path destination mac address 49 * @dst: mesh path destination mac address
50 * @dev: mesh path device 50 * @sdata: mesh subif
51 * @next_hop: mesh neighbor to which frames for this destination will be 51 * @next_hop: mesh neighbor to which frames for this destination will be
52 * forwarded 52 * forwarded
53 * @timer: mesh path discovery timer 53 * @timer: mesh path discovery timer
@@ -64,14 +64,14 @@ enum mesh_path_flags {
64 * @state_lock: mesh pat state lock 64 * @state_lock: mesh pat state lock
65 * 65 *
66 * 66 *
67 * The combination of dst and dev is unique in the mesh path table. Since the 67 * The combination of dst and sdata is unique in the mesh path table. Since the
68 * next_hop STA is only protected by RCU as well, deleting the STA must also 68 * next_hop STA is only protected by RCU as well, deleting the STA must also
69 * remove/substitute the mesh_path structure and wait until that is no longer 69 * remove/substitute the mesh_path structure and wait until that is no longer
70 * reachable before destroying the STA completely. 70 * reachable before destroying the STA completely.
71 */ 71 */
72struct mesh_path { 72struct mesh_path {
73 u8 dst[ETH_ALEN]; 73 u8 dst[ETH_ALEN];
74 struct net_device *dev; 74 struct ieee80211_sub_if_data *sdata;
75 struct sta_info *next_hop; 75 struct sta_info *next_hop;
76 struct timer_list timer; 76 struct timer_list timer;
77 struct sk_buff_head frame_queue; 77 struct sk_buff_head frame_queue;
@@ -203,67 +203,79 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
203int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 203int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
204 struct ieee80211_sub_if_data *sdata); 204 struct ieee80211_sub_if_data *sdata);
205int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 205int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
206 struct net_device *dev); 206 struct ieee80211_sub_if_data *sdata);
207bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); 207bool mesh_matches_local(struct ieee802_11_elems *ie,
208void mesh_ids_set_default(struct ieee80211_if_sta *sta); 208 struct ieee80211_sub_if_data *sdata);
209void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); 209void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
210void mesh_rmc_free(struct net_device *dev); 210void mesh_mgmt_ies_add(struct sk_buff *skb,
211int mesh_rmc_init(struct net_device *dev); 211 struct ieee80211_sub_if_data *sdata);
212void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
213int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
212void ieee80211s_init(void); 214void ieee80211s_init(void);
213void ieee80211s_stop(void); 215void ieee80211s_stop(void);
214void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 216void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
217ieee80211_rx_result
218ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
219 struct ieee80211_rx_status *rx_status);
220void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
221void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
215 222
216/* Mesh paths */ 223/* Mesh paths */
217int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); 224int mesh_nexthop_lookup(struct sk_buff *skb,
218void mesh_path_start_discovery(struct net_device *dev); 225 struct ieee80211_sub_if_data *sdata);
219struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); 226void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
220struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); 227struct mesh_path *mesh_path_lookup(u8 *dst,
228 struct ieee80211_sub_if_data *sdata);
229struct mesh_path *mesh_path_lookup_by_idx(int idx,
230 struct ieee80211_sub_if_data *sdata);
221void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 231void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
222void mesh_path_expire(struct net_device *dev); 232void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
223void mesh_path_flush(struct net_device *dev); 233void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
224void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 234void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
225 size_t len); 235 struct ieee80211_mgmt *mgmt, size_t len);
226int mesh_path_add(u8 *dst, struct net_device *dev); 236int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 237/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 238void mesh_neighbour_update(u8 *hw_addr, u64 rates,
229 bool add); 239 struct ieee80211_sub_if_data *sdata, bool add);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 240bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231 struct net_device *dev);
232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 241void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
233void mesh_plink_broken(struct sta_info *sta); 242void mesh_plink_broken(struct sta_info *sta);
234void mesh_plink_deactivate(struct sta_info *sta); 243void mesh_plink_deactivate(struct sta_info *sta);
235int mesh_plink_open(struct sta_info *sta); 244int mesh_plink_open(struct sta_info *sta);
236int mesh_plink_close(struct sta_info *sta); 245int mesh_plink_close(struct sta_info *sta);
237void mesh_plink_block(struct sta_info *sta); 246void mesh_plink_block(struct sta_info *sta);
238void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 247void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
239 size_t len, struct ieee80211_rx_status *rx_status); 248 struct ieee80211_mgmt *mgmt, size_t len,
249 struct ieee80211_rx_status *rx_status);
240 250
241/* Private interfaces */ 251/* Private interfaces */
242/* Mesh tables */ 252/* Mesh tables */
243struct mesh_table *mesh_table_alloc(int size_order); 253struct mesh_table *mesh_table_alloc(int size_order);
244void mesh_table_free(struct mesh_table *tbl, bool free_leafs); 254void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
245struct mesh_table *mesh_table_grow(struct mesh_table *tbl); 255struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
246u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); 256u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
257 struct mesh_table *tbl);
247/* Mesh paths */ 258/* Mesh paths */
248int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, 259int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
249 struct net_device *dev); 260 struct ieee80211_sub_if_data *sdata);
250void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 261void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
251void mesh_path_flush_pending(struct mesh_path *mpath); 262void mesh_path_flush_pending(struct mesh_path *mpath);
252void mesh_path_tx_pending(struct mesh_path *mpath); 263void mesh_path_tx_pending(struct mesh_path *mpath);
253int mesh_pathtbl_init(void); 264int mesh_pathtbl_init(void);
254void mesh_pathtbl_unregister(void); 265void mesh_pathtbl_unregister(void);
255int mesh_path_del(u8 *addr, struct net_device *dev); 266int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
256void mesh_path_timer(unsigned long data); 267void mesh_path_timer(unsigned long data);
257void mesh_path_flush_by_nexthop(struct sta_info *sta); 268void mesh_path_flush_by_nexthop(struct sta_info *sta);
258void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); 269void mesh_path_discard_frame(struct sk_buff *skb,
270 struct ieee80211_sub_if_data *sdata);
259 271
260#ifdef CONFIG_MAC80211_MESH 272#ifdef CONFIG_MAC80211_MESH
261extern int mesh_allocated; 273extern int mesh_allocated;
262 274
263static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) 275static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
264{ 276{
265 return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks - 277 return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks -
266 atomic_read(&sdata->u.sta.mshstats.estab_plinks); 278 atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
267} 279}
268 280
269static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) 281static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
@@ -281,8 +293,12 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
281 for (i = 0; i <= x->hash_mask; i++) \ 293 for (i = 0; i <= x->hash_mask; i++) \
282 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) 294 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
283 295
296void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
297
284#else 298#else
285#define mesh_allocated 0 299#define mesh_allocated 0
300static inline void
301ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
286#endif 302#endif
287 303
288#endif /* IEEE80211S_H */ 304#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 08aca446ca01..501c7831adb4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -64,14 +64,14 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
64#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) 64#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
65 65
66#define net_traversal_jiffies(s) \ 66#define net_traversal_jiffies(s) \
67 msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 67 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
68#define default_lifetime(s) \ 68#define default_lifetime(s) \
69 MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout) 69 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
70#define min_preq_int_jiff(s) \ 70#define min_preq_int_jiff(s) \
71 (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval)) 71 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
72#define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries) 72#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
73#define disc_timeout_jiff(s) \ 73#define disc_timeout_jiff(s) \
74 msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout) 74 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
75 75
76enum mpath_frame_type { 76enum mpath_frame_type {
77 MPATH_PREQ = 0, 77 MPATH_PREQ = 0,
@@ -82,9 +82,9 @@ enum mpath_frame_type {
82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, 83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, 84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
85 __le32 metric, __le32 preq_id, struct net_device *dev) 85 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
86{ 86{
87 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 87 struct ieee80211_local *local = sdata->local;
88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
89 struct ieee80211_mgmt *mgmt; 89 struct ieee80211_mgmt *mgmt;
90 u8 *pos; 90 u8 *pos;
@@ -99,11 +99,11 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
99 mgmt = (struct ieee80211_mgmt *) 99 mgmt = (struct ieee80211_mgmt *)
100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 102 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
103 IEEE80211_STYPE_ACTION); 103 IEEE80211_STYPE_ACTION);
104 104
105 memcpy(mgmt->da, da, ETH_ALEN); 105 memcpy(mgmt->da, da, ETH_ALEN);
106 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 106 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
107 /* BSSID is left zeroed, wildcard value */ 107 /* BSSID is left zeroed, wildcard value */
108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
109 mgmt->u.action.u.mesh_action.action_code = action; 109 mgmt->u.action.u.mesh_action.action_code = action;
@@ -149,7 +149,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
149 pos += ETH_ALEN; 149 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4); 150 memcpy(pos, &dst_dsn, 4);
151 151
152 ieee80211_sta_tx(dev, skb, 0); 152 ieee80211_tx_skb(sdata, skb, 0);
153 return 0; 153 return 0;
154} 154}
155 155
@@ -161,9 +161,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
161 * @ra: node this frame is addressed to 161 * @ra: node this frame is addressed to
162 */ 162 */
163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, 163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
164 struct net_device *dev) 164 struct ieee80211_sub_if_data *sdata)
165{ 165{
166 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 166 struct ieee80211_local *local = sdata->local;
167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
168 struct ieee80211_mgmt *mgmt; 168 struct ieee80211_mgmt *mgmt;
169 u8 *pos; 169 u8 *pos;
@@ -178,11 +178,11 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
178 mgmt = (struct ieee80211_mgmt *) 178 mgmt = (struct ieee80211_mgmt *)
179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 181 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
182 IEEE80211_STYPE_ACTION); 182 IEEE80211_STYPE_ACTION);
183 183
184 memcpy(mgmt->da, ra, ETH_ALEN); 184 memcpy(mgmt->da, ra, ETH_ALEN);
185 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 185 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
186 /* BSSID is left zeroed, wildcard value */ 186 /* BSSID is left zeroed, wildcard value */
187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; 188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
@@ -198,7 +198,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
198 pos += ETH_ALEN; 198 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4); 199 memcpy(pos, &dst_dsn, 4);
200 200
201 ieee80211_sta_tx(dev, skb, 0); 201 ieee80211_tx_skb(sdata, skb, 0);
202 return 0; 202 return 0;
203} 203}
204 204
@@ -223,7 +223,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
223 /* bitrate is in units of 100 Kbps, while we need rate in units of 223 /* bitrate is in units of 100 Kbps, while we need rate in units of
224 * 1Mbps. This will be corrected on tx_time computation. 224 * 1Mbps. This will be corrected on tx_time computation.
225 */ 225 */
226 rate = sband->bitrates[sta->txrate_idx].bitrate; 226 rate = sband->bitrates[sta->last_txrate_idx].bitrate;
227 tx_time = (device_constant + 10 * test_frame_len / rate); 227 tx_time = (device_constant + 10 * test_frame_len / rate);
228 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 228 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
229 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; 229 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -233,7 +233,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
233/** 233/**
234 * hwmp_route_info_get - Update routing info to originator and transmitter 234 * hwmp_route_info_get - Update routing info to originator and transmitter
235 * 235 *
236 * @dev: local mesh interface 236 * @sdata: local mesh subif
237 * @mgmt: mesh management frame 237 * @mgmt: mesh management frame
238 * @hwmp_ie: hwmp information element (PREP or PREQ) 238 * @hwmp_ie: hwmp information element (PREP or PREQ)
239 * 239 *
@@ -246,11 +246,11 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
246 * Notes: this function is the only place (besides user-provided info) where 246 * Notes: this function is the only place (besides user-provided info) where
247 * path routing information is updated. 247 * path routing information is updated.
248 */ 248 */
249static u32 hwmp_route_info_get(struct net_device *dev, 249static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
250 struct ieee80211_mgmt *mgmt, 250 struct ieee80211_mgmt *mgmt,
251 u8 *hwmp_ie) 251 u8 *hwmp_ie)
252{ 252{
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 253 struct ieee80211_local *local = sdata->local;
254 struct mesh_path *mpath; 254 struct mesh_path *mpath;
255 struct sta_info *sta; 255 struct sta_info *sta;
256 bool fresh_info; 256 bool fresh_info;
@@ -301,14 +301,14 @@ static u32 hwmp_route_info_get(struct net_device *dev,
301 new_metric = MAX_METRIC; 301 new_metric = MAX_METRIC;
302 exp_time = TU_TO_EXP_TIME(orig_lifetime); 302 exp_time = TU_TO_EXP_TIME(orig_lifetime);
303 303
304 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { 304 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
305 /* This MP is the originator, we are not interested in this 305 /* This MP is the originator, we are not interested in this
306 * frame, except for updating transmitter's path info. 306 * frame, except for updating transmitter's path info.
307 */ 307 */
308 process = false; 308 process = false;
309 fresh_info = false; 309 fresh_info = false;
310 } else { 310 } else {
311 mpath = mesh_path_lookup(orig_addr, dev); 311 mpath = mesh_path_lookup(orig_addr, sdata);
312 if (mpath) { 312 if (mpath) {
313 spin_lock_bh(&mpath->state_lock); 313 spin_lock_bh(&mpath->state_lock);
314 if (mpath->flags & MESH_PATH_FIXED) 314 if (mpath->flags & MESH_PATH_FIXED)
@@ -324,8 +324,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
324 } 324 }
325 } 325 }
326 } else { 326 } else {
327 mesh_path_add(orig_addr, dev); 327 mesh_path_add(orig_addr, sdata);
328 mpath = mesh_path_lookup(orig_addr, dev); 328 mpath = mesh_path_lookup(orig_addr, sdata);
329 if (!mpath) { 329 if (!mpath) {
330 rcu_read_unlock(); 330 rcu_read_unlock();
331 return 0; 331 return 0;
@@ -357,7 +357,7 @@ static u32 hwmp_route_info_get(struct net_device *dev,
357 else { 357 else {
358 fresh_info = true; 358 fresh_info = true;
359 359
360 mpath = mesh_path_lookup(ta, dev); 360 mpath = mesh_path_lookup(ta, sdata);
361 if (mpath) { 361 if (mpath) {
362 spin_lock_bh(&mpath->state_lock); 362 spin_lock_bh(&mpath->state_lock);
363 if ((mpath->flags & MESH_PATH_FIXED) || 363 if ((mpath->flags & MESH_PATH_FIXED) ||
@@ -365,8 +365,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
365 (last_hop_metric > mpath->metric))) 365 (last_hop_metric > mpath->metric)))
366 fresh_info = false; 366 fresh_info = false;
367 } else { 367 } else {
368 mesh_path_add(ta, dev); 368 mesh_path_add(ta, sdata);
369 mpath = mesh_path_lookup(ta, dev); 369 mpath = mesh_path_lookup(ta, sdata);
370 if (!mpath) { 370 if (!mpath) {
371 rcu_read_unlock(); 371 rcu_read_unlock();
372 return 0; 372 return 0;
@@ -392,11 +392,10 @@ static u32 hwmp_route_info_get(struct net_device *dev,
392 return process ? new_metric : 0; 392 return process ? new_metric : 0;
393} 393}
394 394
395static void hwmp_preq_frame_process(struct net_device *dev, 395static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
396 struct ieee80211_mgmt *mgmt, 396 struct ieee80211_mgmt *mgmt,
397 u8 *preq_elem, u32 metric) { 397 u8 *preq_elem, u32 metric) {
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 398 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
399 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
400 struct mesh_path *mpath; 399 struct mesh_path *mpath;
401 u8 *dst_addr, *orig_addr; 400 u8 *dst_addr, *orig_addr;
402 u8 dst_flags, ttl; 401 u8 dst_flags, ttl;
@@ -411,19 +410,19 @@ static void hwmp_preq_frame_process(struct net_device *dev,
411 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); 410 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
412 dst_flags = PREQ_IE_DST_F(preq_elem); 411 dst_flags = PREQ_IE_DST_F(preq_elem);
413 412
414 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { 413 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
415 forward = false; 414 forward = false;
416 reply = true; 415 reply = true;
417 metric = 0; 416 metric = 0;
418 if (time_after(jiffies, ifsta->last_dsn_update + 417 if (time_after(jiffies, ifmsh->last_dsn_update +
419 net_traversal_jiffies(sdata)) || 418 net_traversal_jiffies(sdata)) ||
420 time_before(jiffies, ifsta->last_dsn_update)) { 419 time_before(jiffies, ifmsh->last_dsn_update)) {
421 dst_dsn = ++ifsta->dsn; 420 dst_dsn = ++ifmsh->dsn;
422 ifsta->last_dsn_update = jiffies; 421 ifmsh->last_dsn_update = jiffies;
423 } 422 }
424 } else { 423 } else {
425 rcu_read_lock(); 424 rcu_read_lock();
426 mpath = mesh_path_lookup(dst_addr, dev); 425 mpath = mesh_path_lookup(dst_addr, sdata);
427 if (mpath) { 426 if (mpath) {
428 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 427 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
429 DSN_LT(mpath->dsn, dst_dsn)) { 428 DSN_LT(mpath->dsn, dst_dsn)) {
@@ -445,15 +444,15 @@ static void hwmp_preq_frame_process(struct net_device *dev,
445 444
446 if (reply) { 445 if (reply) {
447 lifetime = PREQ_IE_LIFETIME(preq_elem); 446 lifetime = PREQ_IE_LIFETIME(preq_elem);
448 ttl = ifsta->mshcfg.dot11MeshTTL; 447 ttl = ifmsh->mshcfg.dot11MeshTTL;
449 if (ttl != 0) 448 if (ttl != 0)
450 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, 449 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
451 cpu_to_le32(dst_dsn), 0, orig_addr, 450 cpu_to_le32(dst_dsn), 0, orig_addr,
452 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, 451 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
453 cpu_to_le32(lifetime), cpu_to_le32(metric), 452 cpu_to_le32(lifetime), cpu_to_le32(metric),
454 0, dev); 453 0, sdata);
455 else 454 else
456 ifsta->mshstats.dropped_frames_ttl++; 455 ifmsh->mshstats.dropped_frames_ttl++;
457 } 456 }
458 457
459 if (forward) { 458 if (forward) {
@@ -463,7 +462,7 @@ static void hwmp_preq_frame_process(struct net_device *dev,
463 ttl = PREQ_IE_TTL(preq_elem); 462 ttl = PREQ_IE_TTL(preq_elem);
464 lifetime = PREQ_IE_LIFETIME(preq_elem); 463 lifetime = PREQ_IE_LIFETIME(preq_elem);
465 if (ttl <= 1) { 464 if (ttl <= 1) {
466 ifsta->mshstats.dropped_frames_ttl++; 465 ifmsh->mshstats.dropped_frames_ttl++;
467 return; 466 return;
468 } 467 }
469 --ttl; 468 --ttl;
@@ -472,20 +471,19 @@ static void hwmp_preq_frame_process(struct net_device *dev,
472 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 471 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
473 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 472 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
474 cpu_to_le32(orig_dsn), dst_flags, dst_addr, 473 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
475 cpu_to_le32(dst_dsn), dev->broadcast, 474 cpu_to_le32(dst_dsn), sdata->dev->broadcast,
476 hopcount, ttl, cpu_to_le32(lifetime), 475 hopcount, ttl, cpu_to_le32(lifetime),
477 cpu_to_le32(metric), cpu_to_le32(preq_id), 476 cpu_to_le32(metric), cpu_to_le32(preq_id),
478 dev); 477 sdata);
479 ifsta->mshstats.fwded_frames++; 478 ifmsh->mshstats.fwded_frames++;
480 } 479 }
481} 480}
482 481
483 482
484static void hwmp_prep_frame_process(struct net_device *dev, 483static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
485 struct ieee80211_mgmt *mgmt, 484 struct ieee80211_mgmt *mgmt,
486 u8 *prep_elem, u32 metric) 485 u8 *prep_elem, u32 metric)
487{ 486{
488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
489 struct mesh_path *mpath; 487 struct mesh_path *mpath;
490 u8 *dst_addr, *orig_addr; 488 u8 *dst_addr, *orig_addr;
491 u8 ttl, hopcount, flags; 489 u8 ttl, hopcount, flags;
@@ -499,18 +497,18 @@ static void hwmp_prep_frame_process(struct net_device *dev,
499 * replies 497 * replies
500 */ 498 */
501 dst_addr = PREP_IE_DST_ADDR(prep_elem); 499 dst_addr = PREP_IE_DST_ADDR(prep_elem);
502 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) 500 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
503 /* destination, no forwarding required */ 501 /* destination, no forwarding required */
504 return; 502 return;
505 503
506 ttl = PREP_IE_TTL(prep_elem); 504 ttl = PREP_IE_TTL(prep_elem);
507 if (ttl <= 1) { 505 if (ttl <= 1) {
508 sdata->u.sta.mshstats.dropped_frames_ttl++; 506 sdata->u.mesh.mshstats.dropped_frames_ttl++;
509 return; 507 return;
510 } 508 }
511 509
512 rcu_read_lock(); 510 rcu_read_lock();
513 mpath = mesh_path_lookup(dst_addr, dev); 511 mpath = mesh_path_lookup(dst_addr, sdata);
514 if (mpath) 512 if (mpath)
515 spin_lock_bh(&mpath->state_lock); 513 spin_lock_bh(&mpath->state_lock);
516 else 514 else
@@ -519,7 +517,7 @@ static void hwmp_prep_frame_process(struct net_device *dev,
519 spin_unlock_bh(&mpath->state_lock); 517 spin_unlock_bh(&mpath->state_lock);
520 goto fail; 518 goto fail;
521 } 519 }
522 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); 520 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
523 spin_unlock_bh(&mpath->state_lock); 521 spin_unlock_bh(&mpath->state_lock);
524 --ttl; 522 --ttl;
525 flags = PREP_IE_FLAGS(prep_elem); 523 flags = PREP_IE_FLAGS(prep_elem);
@@ -531,20 +529,20 @@ static void hwmp_prep_frame_process(struct net_device *dev,
531 529
532 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 530 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
533 cpu_to_le32(orig_dsn), 0, dst_addr, 531 cpu_to_le32(orig_dsn), 0, dst_addr,
534 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, 532 cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
535 cpu_to_le32(lifetime), cpu_to_le32(metric), 533 cpu_to_le32(lifetime), cpu_to_le32(metric),
536 0, dev); 534 0, sdata);
537 rcu_read_unlock(); 535 rcu_read_unlock();
538 sdata->u.sta.mshstats.fwded_frames++; 536 sdata->u.mesh.mshstats.fwded_frames++;
539 return; 537 return;
540 538
541fail: 539fail:
542 rcu_read_unlock(); 540 rcu_read_unlock();
543 sdata->u.sta.mshstats.dropped_frames_no_route++; 541 sdata->u.mesh.mshstats.dropped_frames_no_route++;
544 return; 542 return;
545} 543}
546 544
547static void hwmp_perr_frame_process(struct net_device *dev, 545static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
548 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 546 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
549{ 547{
550 struct mesh_path *mpath; 548 struct mesh_path *mpath;
@@ -555,18 +553,18 @@ static void hwmp_perr_frame_process(struct net_device *dev,
555 dst_addr = PERR_IE_DST_ADDR(perr_elem); 553 dst_addr = PERR_IE_DST_ADDR(perr_elem);
556 dst_dsn = PERR_IE_DST_DSN(perr_elem); 554 dst_dsn = PERR_IE_DST_DSN(perr_elem);
557 rcu_read_lock(); 555 rcu_read_lock();
558 mpath = mesh_path_lookup(dst_addr, dev); 556 mpath = mesh_path_lookup(dst_addr, sdata);
559 if (mpath) { 557 if (mpath) {
560 spin_lock_bh(&mpath->state_lock); 558 spin_lock_bh(&mpath->state_lock);
561 if (mpath->flags & MESH_PATH_ACTIVE && 559 if (mpath->flags & MESH_PATH_ACTIVE &&
562 memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 && 560 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
563 (!(mpath->flags & MESH_PATH_DSN_VALID) || 561 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
564 DSN_GT(dst_dsn, mpath->dsn))) { 562 DSN_GT(dst_dsn, mpath->dsn))) {
565 mpath->flags &= ~MESH_PATH_ACTIVE; 563 mpath->flags &= ~MESH_PATH_ACTIVE;
566 mpath->dsn = dst_dsn; 564 mpath->dsn = dst_dsn;
567 spin_unlock_bh(&mpath->state_lock); 565 spin_unlock_bh(&mpath->state_lock);
568 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), 566 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
569 dev->broadcast, dev); 567 sdata->dev->broadcast, sdata);
570 } else 568 } else
571 spin_unlock_bh(&mpath->state_lock); 569 spin_unlock_bh(&mpath->state_lock);
572 } 570 }
@@ -575,7 +573,7 @@ static void hwmp_perr_frame_process(struct net_device *dev,
575 573
576 574
577 575
578void mesh_rx_path_sel_frame(struct net_device *dev, 576void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
579 struct ieee80211_mgmt *mgmt, 577 struct ieee80211_mgmt *mgmt,
580 size_t len) 578 size_t len)
581{ 579{
@@ -583,6 +581,10 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
583 size_t baselen; 581 size_t baselen;
584 u32 last_hop_metric; 582 u32 last_hop_metric;
585 583
584 /* need action_code */
585 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
586 return;
587
586 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; 588 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
587 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 589 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
588 len - baselen, &elems); 590 len - baselen, &elems);
@@ -592,25 +594,25 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
592 if (!elems.preq || elems.preq_len != 37) 594 if (!elems.preq || elems.preq_len != 37)
593 /* Right now we support just 1 destination and no AE */ 595 /* Right now we support just 1 destination and no AE */
594 return; 596 return;
595 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); 597 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
596 if (!last_hop_metric) 598 if (!last_hop_metric)
597 return; 599 return;
598 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); 600 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
599 break; 601 break;
600 case MPATH_PREP: 602 case MPATH_PREP:
601 if (!elems.prep || elems.prep_len != 31) 603 if (!elems.prep || elems.prep_len != 31)
602 /* Right now we support no AE */ 604 /* Right now we support no AE */
603 return; 605 return;
604 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); 606 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
605 if (!last_hop_metric) 607 if (!last_hop_metric)
606 return; 608 return;
607 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); 609 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
608 break; 610 break;
609 case MPATH_PERR: 611 case MPATH_PERR:
610 if (!elems.perr || elems.perr_len != 12) 612 if (!elems.perr || elems.perr_len != 12)
611 /* Right now we support only one destination per PERR */ 613 /* Right now we support only one destination per PERR */
612 return; 614 return;
613 hwmp_perr_frame_process(dev, mgmt, elems.perr); 615 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
614 default: 616 default:
615 return; 617 return;
616 } 618 }
@@ -628,9 +630,8 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
628 */ 630 */
629static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) 631static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
630{ 632{
631 struct ieee80211_sub_if_data *sdata = 633 struct ieee80211_sub_if_data *sdata = mpath->sdata;
632 IEEE80211_DEV_TO_SUB_IF(mpath->dev); 634 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
633 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
634 struct mesh_preq_queue *preq_node; 635 struct mesh_preq_queue *preq_node;
635 636
636 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); 637 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
@@ -639,9 +640,9 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
639 return; 640 return;
640 } 641 }
641 642
642 spin_lock(&ifsta->mesh_preq_queue_lock); 643 spin_lock(&ifmsh->mesh_preq_queue_lock);
643 if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 644 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
644 spin_unlock(&ifsta->mesh_preq_queue_lock); 645 spin_unlock(&ifmsh->mesh_preq_queue_lock);
645 kfree(preq_node); 646 kfree(preq_node);
646 if (printk_ratelimit()) 647 if (printk_ratelimit())
647 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); 648 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
@@ -651,55 +652,53 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
651 memcpy(preq_node->dst, mpath->dst, ETH_ALEN); 652 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
652 preq_node->flags = flags; 653 preq_node->flags = flags;
653 654
654 list_add_tail(&preq_node->list, &ifsta->preq_queue.list); 655 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
655 ++ifsta->preq_queue_len; 656 ++ifmsh->preq_queue_len;
656 spin_unlock(&ifsta->mesh_preq_queue_lock); 657 spin_unlock(&ifmsh->mesh_preq_queue_lock);
657 658
658 if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata))) 659 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
659 queue_work(sdata->local->hw.workqueue, &ifsta->work); 660 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
660 661
661 else if (time_before(jiffies, ifsta->last_preq)) { 662 else if (time_before(jiffies, ifmsh->last_preq)) {
662 /* avoid long wait if did not send preqs for a long time 663 /* avoid long wait if did not send preqs for a long time
663 * and jiffies wrapped around 664 * and jiffies wrapped around
664 */ 665 */
665 ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 666 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
666 queue_work(sdata->local->hw.workqueue, &ifsta->work); 667 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
667 } else 668 } else
668 mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq + 669 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
669 min_preq_int_jiff(sdata)); 670 min_preq_int_jiff(sdata));
670} 671}
671 672
672/** 673/**
673 * mesh_path_start_discovery - launch a path discovery from the PREQ queue 674 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
674 * 675 *
675 * @dev: local mesh interface 676 * @sdata: local mesh subif
676 */ 677 */
677void mesh_path_start_discovery(struct net_device *dev) 678void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
678{ 679{
679 struct ieee80211_sub_if_data *sdata = 680 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
680 IEEE80211_DEV_TO_SUB_IF(dev);
681 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
682 struct mesh_preq_queue *preq_node; 681 struct mesh_preq_queue *preq_node;
683 struct mesh_path *mpath; 682 struct mesh_path *mpath;
684 u8 ttl, dst_flags; 683 u8 ttl, dst_flags;
685 u32 lifetime; 684 u32 lifetime;
686 685
687 spin_lock(&ifsta->mesh_preq_queue_lock); 686 spin_lock(&ifmsh->mesh_preq_queue_lock);
688 if (!ifsta->preq_queue_len || 687 if (!ifmsh->preq_queue_len ||
689 time_before(jiffies, ifsta->last_preq + 688 time_before(jiffies, ifmsh->last_preq +
690 min_preq_int_jiff(sdata))) { 689 min_preq_int_jiff(sdata))) {
691 spin_unlock(&ifsta->mesh_preq_queue_lock); 690 spin_unlock(&ifmsh->mesh_preq_queue_lock);
692 return; 691 return;
693 } 692 }
694 693
695 preq_node = list_first_entry(&ifsta->preq_queue.list, 694 preq_node = list_first_entry(&ifmsh->preq_queue.list,
696 struct mesh_preq_queue, list); 695 struct mesh_preq_queue, list);
697 list_del(&preq_node->list); 696 list_del(&preq_node->list);
698 --ifsta->preq_queue_len; 697 --ifmsh->preq_queue_len;
699 spin_unlock(&ifsta->mesh_preq_queue_lock); 698 spin_unlock(&ifmsh->mesh_preq_queue_lock);
700 699
701 rcu_read_lock(); 700 rcu_read_lock();
702 mpath = mesh_path_lookup(preq_node->dst, dev); 701 mpath = mesh_path_lookup(preq_node->dst, sdata);
703 if (!mpath) 702 if (!mpath)
704 goto enddiscovery; 703 goto enddiscovery;
705 704
@@ -721,18 +720,18 @@ void mesh_path_start_discovery(struct net_device *dev)
721 goto enddiscovery; 720 goto enddiscovery;
722 } 721 }
723 722
724 ifsta->last_preq = jiffies; 723 ifmsh->last_preq = jiffies;
725 724
726 if (time_after(jiffies, ifsta->last_dsn_update + 725 if (time_after(jiffies, ifmsh->last_dsn_update +
727 net_traversal_jiffies(sdata)) || 726 net_traversal_jiffies(sdata)) ||
728 time_before(jiffies, ifsta->last_dsn_update)) { 727 time_before(jiffies, ifmsh->last_dsn_update)) {
729 ++ifsta->dsn; 728 ++ifmsh->dsn;
730 sdata->u.sta.last_dsn_update = jiffies; 729 sdata->u.mesh.last_dsn_update = jiffies;
731 } 730 }
732 lifetime = default_lifetime(sdata); 731 lifetime = default_lifetime(sdata);
733 ttl = sdata->u.sta.mshcfg.dot11MeshTTL; 732 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
734 if (ttl == 0) { 733 if (ttl == 0) {
735 sdata->u.sta.mshstats.dropped_frames_ttl++; 734 sdata->u.mesh.mshstats.dropped_frames_ttl++;
736 spin_unlock_bh(&mpath->state_lock); 735 spin_unlock_bh(&mpath->state_lock);
737 goto enddiscovery; 736 goto enddiscovery;
738 } 737 }
@@ -743,11 +742,11 @@ void mesh_path_start_discovery(struct net_device *dev)
743 dst_flags = MP_F_RF; 742 dst_flags = MP_F_RF;
744 743
745 spin_unlock_bh(&mpath->state_lock); 744 spin_unlock_bh(&mpath->state_lock);
746 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, 745 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
747 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, 746 cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
748 cpu_to_le32(mpath->dsn), dev->broadcast, 0, 747 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
749 ttl, cpu_to_le32(lifetime), 0, 748 ttl, cpu_to_le32(lifetime), 0,
750 cpu_to_le32(ifsta->preq_id++), dev); 749 cpu_to_le32(ifmsh->preq_id++), sdata);
751 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 750 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
752 751
753enddiscovery: 752enddiscovery:
@@ -759,7 +758,7 @@ enddiscovery:
759 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame 758 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
760 * 759 *
761 * @skb: 802.11 frame to be sent 760 * @skb: 802.11 frame to be sent
762 * @dev: network device the frame will be sent through 761 * @sdata: network subif the frame will be sent through
763 * @fwd_frame: true if this frame was originally from a different host 762 * @fwd_frame: true if this frame was originally from a different host
764 * 763 *
765 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is 764 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
@@ -767,9 +766,9 @@ enddiscovery:
767 * sent when the path is resolved. This means the caller must not free the skb 766 * sent when the path is resolved. This means the caller must not free the skb
768 * in this case. 767 * in this case.
769 */ 768 */
770int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) 769int mesh_nexthop_lookup(struct sk_buff *skb,
770 struct ieee80211_sub_if_data *sdata)
771{ 771{
772 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
773 struct sk_buff *skb_to_free = NULL; 772 struct sk_buff *skb_to_free = NULL;
774 struct mesh_path *mpath; 773 struct mesh_path *mpath;
775 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 774 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -777,14 +776,14 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
777 int err = 0; 776 int err = 0;
778 777
779 rcu_read_lock(); 778 rcu_read_lock();
780 mpath = mesh_path_lookup(dst_addr, dev); 779 mpath = mesh_path_lookup(dst_addr, sdata);
781 780
782 if (!mpath) { 781 if (!mpath) {
783 mesh_path_add(dst_addr, dev); 782 mesh_path_add(dst_addr, sdata);
784 mpath = mesh_path_lookup(dst_addr, dev); 783 mpath = mesh_path_lookup(dst_addr, sdata);
785 if (!mpath) { 784 if (!mpath) {
786 dev_kfree_skb(skb); 785 dev_kfree_skb(skb);
787 sdata->u.sta.mshstats.dropped_frames_no_route++; 786 sdata->u.mesh.mshstats.dropped_frames_no_route++;
788 err = -ENOSPC; 787 err = -ENOSPC;
789 goto endlookup; 788 goto endlookup;
790 } 789 }
@@ -792,14 +791,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
792 791
793 if (mpath->flags & MESH_PATH_ACTIVE) { 792 if (mpath->flags & MESH_PATH_ACTIVE) {
794 if (time_after(jiffies, mpath->exp_time - 793 if (time_after(jiffies, mpath->exp_time -
795 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) 794 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
796 && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) 795 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
796 ETH_ALEN)
797 && !(mpath->flags & MESH_PATH_RESOLVING) 797 && !(mpath->flags & MESH_PATH_RESOLVING)
798 && !(mpath->flags & MESH_PATH_FIXED)) { 798 && !(mpath->flags & MESH_PATH_FIXED)) {
799 mesh_queue_preq(mpath, 799 mesh_queue_preq(mpath,
800 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 800 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
801 } 801 }
802 memcpy(hdr->addr1, mpath->next_hop->addr, 802 memcpy(hdr->addr1, mpath->next_hop->sta.addr,
803 ETH_ALEN); 803 ETH_ALEN);
804 } else { 804 } else {
805 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 805 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -815,7 +815,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
815 815
816 skb_queue_tail(&mpath->frame_queue, skb); 816 skb_queue_tail(&mpath->frame_queue, skb);
817 if (skb_to_free) 817 if (skb_to_free)
818 mesh_path_discard_frame(skb_to_free, dev); 818 mesh_path_discard_frame(skb_to_free, sdata);
819 err = -ENOENT; 819 err = -ENOENT;
820 } 820 }
821 821
@@ -835,7 +835,7 @@ void mesh_path_timer(unsigned long data)
835 if (!mpath) 835 if (!mpath)
836 goto endmpathtimer; 836 goto endmpathtimer;
837 spin_lock_bh(&mpath->state_lock); 837 spin_lock_bh(&mpath->state_lock);
838 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); 838 sdata = mpath->sdata;
839 if (mpath->flags & MESH_PATH_RESOLVED || 839 if (mpath->flags & MESH_PATH_RESOLVED ||
840 (!(mpath->flags & MESH_PATH_RESOLVING))) 840 (!(mpath->flags & MESH_PATH_RESOLVING)))
841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 838ee60492ad..e4fa2905fadc 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/random.h> 12#include <linux/random.h>
14#include <linux/spinlock.h> 13#include <linux/spinlock.h>
15#include <linux/string.h> 14#include <linux/string.h>
@@ -62,13 +61,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
62/** 61/**
63 * mesh_path_lookup - look up a path in the mesh path table 62 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination 63 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface 64 * @sdata: local subif
66 * 65 *
67 * Returns: pointer to the mesh path structure, or NULL if not found 66 * Returns: pointer to the mesh path structure, or NULL if not found
68 * 67 *
69 * Locking: must be called within a read rcu section. 68 * Locking: must be called within a read rcu section.
70 */ 69 */
71struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) 70struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
72{ 71{
73 struct mesh_path *mpath; 72 struct mesh_path *mpath;
74 struct hlist_node *n; 73 struct hlist_node *n;
@@ -78,10 +77,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
78 77
79 tbl = rcu_dereference(mesh_paths); 78 tbl = rcu_dereference(mesh_paths);
80 79
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; 80 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) { 81 hlist_for_each_entry_rcu(node, n, bucket, list) {
83 mpath = node->mpath; 82 mpath = node->mpath;
84 if (mpath->dev == dev && 83 if (mpath->sdata == sdata &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 84 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) { 85 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock); 86 spin_lock_bh(&mpath->state_lock);
@@ -98,13 +97,13 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
98/** 97/**
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 98 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100 * @idx: index 99 * @idx: index
101 * @dev: local interface, or NULL for all entries 100 * @sdata: local subif, or NULL for all entries
102 * 101 *
103 * Returns: pointer to the mesh path structure, or NULL if not found. 102 * Returns: pointer to the mesh path structure, or NULL if not found.
104 * 103 *
105 * Locking: must be called within a read rcu section. 104 * Locking: must be called within a read rcu section.
106 */ 105 */
107struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) 106struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
108{ 107{
109 struct mpath_node *node; 108 struct mpath_node *node;
110 struct hlist_node *p; 109 struct hlist_node *p;
@@ -112,7 +111,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
112 int j = 0; 111 int j = 0;
113 112
114 for_each_mesh_entry(mesh_paths, p, node, i) { 113 for_each_mesh_entry(mesh_paths, p, node, i) {
115 if (dev && node->mpath->dev != dev) 114 if (sdata && node->mpath->sdata != sdata)
116 continue; 115 continue;
117 if (j++ == idx) { 116 if (j++ == idx) {
118 if (MPATH_EXPIRED(node->mpath)) { 117 if (MPATH_EXPIRED(node->mpath)) {
@@ -131,15 +130,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
131/** 130/**
132 * mesh_path_add - allocate and add a new path to the mesh path table 131 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length) 132 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface 133 * @sdata: local subif
135 * 134 *
136 * Returns: 0 on sucess 135 * Returns: 0 on sucess
137 * 136 *
138 * State: the initial state of the new path is set to 0 137 * State: the initial state of the new path is set to 0
139 */ 138 */
140int mesh_path_add(u8 *dst, struct net_device *dev) 139int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
141{ 140{
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath; 141 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node; 142 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket; 143 struct hlist_head *bucket;
@@ -148,14 +146,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
148 int err = 0; 146 int err = 0;
149 u32 hash_idx; 147 u32 hash_idx;
150 148
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) 149 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */ 150 /* never add ourselves as neighbours */
153 return -ENOTSUPP; 151 return -ENOTSUPP;
154 152
155 if (is_multicast_ether_addr(dst)) 153 if (is_multicast_ether_addr(dst))
156 return -ENOTSUPP; 154 return -ENOTSUPP;
157 155
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 156 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC; 157 return -ENOSPC;
160 158
161 err = -ENOMEM; 159 err = -ENOMEM;
@@ -169,7 +167,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
169 167
170 read_lock(&pathtbl_resize_lock); 168 read_lock(&pathtbl_resize_lock);
171 memcpy(new_mpath->dst, dst, ETH_ALEN); 169 memcpy(new_mpath->dst, dst, ETH_ALEN);
172 new_mpath->dev = dev; 170 new_mpath->sdata = sdata;
173 new_mpath->flags = 0; 171 new_mpath->flags = 0;
174 skb_queue_head_init(&new_mpath->frame_queue); 172 skb_queue_head_init(&new_mpath->frame_queue);
175 new_node->mpath = new_mpath; 173 new_node->mpath = new_mpath;
@@ -179,7 +177,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
179 spin_lock_init(&new_mpath->state_lock); 177 spin_lock_init(&new_mpath->state_lock);
180 init_timer(&new_mpath->timer); 178 init_timer(&new_mpath->timer);
181 179
182 hash_idx = mesh_table_hash(dst, dev, mesh_paths); 180 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
183 bucket = &mesh_paths->hash_buckets[hash_idx]; 181 bucket = &mesh_paths->hash_buckets[hash_idx];
184 182
185 spin_lock(&mesh_paths->hashwlock[hash_idx]); 183 spin_lock(&mesh_paths->hashwlock[hash_idx]);
@@ -187,7 +185,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
187 err = -EEXIST; 185 err = -EEXIST;
188 hlist_for_each_entry(node, n, bucket, list) { 186 hlist_for_each_entry(node, n, bucket, list) {
189 mpath = node->mpath; 187 mpath = node->mpath;
190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 188 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
191 goto err_exists; 189 goto err_exists;
192 } 190 }
193 191
@@ -223,7 +221,7 @@ err_exists:
223err_node_alloc: 221err_node_alloc:
224 kfree(new_mpath); 222 kfree(new_mpath);
225err_path_alloc: 223err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths); 224 atomic_dec(&sdata->u.mesh.mpaths);
227 return err; 225 return err;
228} 226}
229 227
@@ -241,7 +239,7 @@ void mesh_plink_broken(struct sta_info *sta)
241 struct mesh_path *mpath; 239 struct mesh_path *mpath;
242 struct mpath_node *node; 240 struct mpath_node *node;
243 struct hlist_node *p; 241 struct hlist_node *p;
244 struct net_device *dev = sta->sdata->dev; 242 struct ieee80211_sub_if_data *sdata = sta->sdata;
245 int i; 243 int i;
246 244
247 rcu_read_lock(); 245 rcu_read_lock();
@@ -256,7 +254,7 @@ void mesh_plink_broken(struct sta_info *sta)
256 spin_unlock_bh(&mpath->state_lock); 254 spin_unlock_bh(&mpath->state_lock);
257 mesh_path_error_tx(mpath->dst, 255 mesh_path_error_tx(mpath->dst,
258 cpu_to_le32(mpath->dsn), 256 cpu_to_le32(mpath->dsn),
259 dev->broadcast, dev); 257 sdata->dev->broadcast, sdata);
260 } else 258 } else
261 spin_unlock_bh(&mpath->state_lock); 259 spin_unlock_bh(&mpath->state_lock);
262 } 260 }
@@ -284,11 +282,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
284 for_each_mesh_entry(mesh_paths, p, node, i) { 282 for_each_mesh_entry(mesh_paths, p, node, i) {
285 mpath = node->mpath; 283 mpath = node->mpath;
286 if (mpath->next_hop == sta) 284 if (mpath->next_hop == sta)
287 mesh_path_del(mpath->dst, mpath->dev); 285 mesh_path_del(mpath->dst, mpath->sdata);
288 } 286 }
289} 287}
290 288
291void mesh_path_flush(struct net_device *dev) 289void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
292{ 290{
293 struct mesh_path *mpath; 291 struct mesh_path *mpath;
294 struct mpath_node *node; 292 struct mpath_node *node;
@@ -297,19 +295,18 @@ void mesh_path_flush(struct net_device *dev)
297 295
298 for_each_mesh_entry(mesh_paths, p, node, i) { 296 for_each_mesh_entry(mesh_paths, p, node, i) {
299 mpath = node->mpath; 297 mpath = node->mpath;
300 if (mpath->dev == dev) 298 if (mpath->sdata == sdata)
301 mesh_path_del(mpath->dst, mpath->dev); 299 mesh_path_del(mpath->dst, mpath->sdata);
302 } 300 }
303} 301}
304 302
305static void mesh_path_node_reclaim(struct rcu_head *rp) 303static void mesh_path_node_reclaim(struct rcu_head *rp)
306{ 304{
307 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 305 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
308 struct ieee80211_sub_if_data *sdata = 306 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
309 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
310 307
311 del_timer_sync(&node->mpath->timer); 308 del_timer_sync(&node->mpath->timer);
312 atomic_dec(&sdata->u.sta.mpaths); 309 atomic_dec(&sdata->u.mesh.mpaths);
313 kfree(node->mpath); 310 kfree(node->mpath);
314 kfree(node); 311 kfree(node);
315} 312}
@@ -318,11 +315,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
318 * mesh_path_del - delete a mesh path from the table 315 * mesh_path_del - delete a mesh path from the table
319 * 316 *
320 * @addr: dst address (ETH_ALEN length) 317 * @addr: dst address (ETH_ALEN length)
321 * @dev: local interface 318 * @sdata: local subif
322 * 319 *
323 * Returns: 0 if succesful 320 * Returns: 0 if succesful
324 */ 321 */
325int mesh_path_del(u8 *addr, struct net_device *dev) 322int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
326{ 323{
327 struct mesh_path *mpath; 324 struct mesh_path *mpath;
328 struct mpath_node *node; 325 struct mpath_node *node;
@@ -332,13 +329,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev)
332 int err = 0; 329 int err = 0;
333 330
334 read_lock(&pathtbl_resize_lock); 331 read_lock(&pathtbl_resize_lock);
335 hash_idx = mesh_table_hash(addr, dev, mesh_paths); 332 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 333 bucket = &mesh_paths->hash_buckets[hash_idx];
337 334
338 spin_lock(&mesh_paths->hashwlock[hash_idx]); 335 spin_lock(&mesh_paths->hashwlock[hash_idx]);
339 hlist_for_each_entry(node, n, bucket, list) { 336 hlist_for_each_entry(node, n, bucket, list) {
340 mpath = node->mpath; 337 mpath = node->mpath;
341 if (mpath->dev == dev && 338 if (mpath->sdata == sdata &&
342 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 339 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
343 spin_lock_bh(&mpath->state_lock); 340 spin_lock_bh(&mpath->state_lock);
344 mpath->flags |= MESH_PATH_RESOLVING; 341 mpath->flags |= MESH_PATH_RESOLVING;
@@ -378,33 +375,33 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
378 * mesh_path_discard_frame - discard a frame whose path could not be resolved 375 * mesh_path_discard_frame - discard a frame whose path could not be resolved
379 * 376 *
380 * @skb: frame to discard 377 * @skb: frame to discard
381 * @dev: network device the frame was to be sent through 378 * @sdata: network subif the frame was to be sent through
382 * 379 *
383 * If the frame was beign forwarded from another MP, a PERR frame will be sent 380 * If the frame was beign forwarded from another MP, a PERR frame will be sent
384 * to the precursor. 381 * to the precursor.
385 * 382 *
386 * Locking: the function must me called within a rcu_read_lock region 383 * Locking: the function must me called within a rcu_read_lock region
387 */ 384 */
388void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) 385void mesh_path_discard_frame(struct sk_buff *skb,
386 struct ieee80211_sub_if_data *sdata)
389{ 387{
390 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 388 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
392 struct mesh_path *mpath; 389 struct mesh_path *mpath;
393 u32 dsn = 0; 390 u32 dsn = 0;
394 391
395 if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { 392 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
396 u8 *ra, *da; 393 u8 *ra, *da;
397 394
398 da = hdr->addr3; 395 da = hdr->addr3;
399 ra = hdr->addr2; 396 ra = hdr->addr2;
400 mpath = mesh_path_lookup(da, dev); 397 mpath = mesh_path_lookup(da, sdata);
401 if (mpath) 398 if (mpath)
402 dsn = ++mpath->dsn; 399 dsn = ++mpath->dsn;
403 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); 400 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
404 } 401 }
405 402
406 kfree_skb(skb); 403 kfree_skb(skb);
407 sdata->u.sta.mshstats.dropped_frames_no_route++; 404 sdata->u.mesh.mshstats.dropped_frames_no_route++;
408} 405}
409 406
410/** 407/**
@@ -416,14 +413,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
416 */ 413 */
417void mesh_path_flush_pending(struct mesh_path *mpath) 414void mesh_path_flush_pending(struct mesh_path *mpath)
418{ 415{
419 struct ieee80211_sub_if_data *sdata;
420 struct sk_buff *skb; 416 struct sk_buff *skb;
421 417
422 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
423
424 while ((skb = skb_dequeue(&mpath->frame_queue)) && 418 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
425 (mpath->flags & MESH_PATH_ACTIVE)) 419 (mpath->flags & MESH_PATH_ACTIVE))
426 mesh_path_discard_frame(skb, mpath->dev); 420 mesh_path_discard_frame(skb, mpath->sdata);
427} 421}
428 422
429/** 423/**
@@ -472,7 +466,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
472 node = hlist_entry(p, struct mpath_node, list); 466 node = hlist_entry(p, struct mpath_node, list);
473 mpath = node->mpath; 467 mpath = node->mpath;
474 new_node->mpath = mpath; 468 new_node->mpath = mpath;
475 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 469 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
476 hlist_add_head(&new_node->list, 470 hlist_add_head(&new_node->list,
477 &newtbl->hash_buckets[hash_idx]); 471 &newtbl->hash_buckets[hash_idx]);
478 return 0; 472 return 0;
@@ -489,7 +483,7 @@ int mesh_pathtbl_init(void)
489 return 0; 483 return 0;
490} 484}
491 485
492void mesh_path_expire(struct net_device *dev) 486void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
493{ 487{
494 struct mesh_path *mpath; 488 struct mesh_path *mpath;
495 struct mpath_node *node; 489 struct mpath_node *node;
@@ -498,7 +492,7 @@ void mesh_path_expire(struct net_device *dev)
498 492
499 read_lock(&pathtbl_resize_lock); 493 read_lock(&pathtbl_resize_lock);
500 for_each_mesh_entry(mesh_paths, p, node, i) { 494 for_each_mesh_entry(mesh_paths, p, node, i) {
501 if (node->mpath->dev != dev) 495 if (node->mpath->sdata != sdata)
502 continue; 496 continue;
503 mpath = node->mpath; 497 mpath = node->mpath;
504 spin_lock_bh(&mpath->state_lock); 498 spin_lock_bh(&mpath->state_lock);
@@ -507,7 +501,7 @@ void mesh_path_expire(struct net_device *dev)
507 time_after(jiffies, 501 time_after(jiffies,
508 mpath->exp_time + MESH_PATH_EXPIRE)) { 502 mpath->exp_time + MESH_PATH_EXPIRE)) {
509 spin_unlock_bh(&mpath->state_lock); 503 spin_unlock_bh(&mpath->state_lock);
510 mesh_path_del(mpath->dst, mpath->dev); 504 mesh_path_del(mpath->dst, mpath->sdata);
511 } else 505 } else
512 spin_unlock_bh(&mpath->state_lock); 506 spin_unlock_bh(&mpath->state_lock);
513 } 507 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 9efeb1f07025..faac101c0f85 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -36,11 +36,11 @@
36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
37#define MESH_SECURITY_FAILED_VERIFICATION 10 37#define MESH_SECURITY_FAILED_VERIFICATION 10
38 38
39#define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries) 39#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
40#define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout) 40#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
41#define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout) 41#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
42#define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout) 42#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
43#define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks) 43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 44
45enum plink_frame_type { 45enum plink_frame_type {
46 PLINK_OPEN = 0, 46 PLINK_OPEN = 0,
@@ -63,14 +63,14 @@ enum plink_event {
63static inline 63static inline
64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 65{
66 atomic_inc(&sdata->u.sta.mshstats.estab_plinks); 66 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
67 mesh_accept_plinks_update(sdata); 67 mesh_accept_plinks_update(sdata);
68} 68}
69 69
70static inline 70static inline
71void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) 71void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
72{ 72{
73 atomic_dec(&sdata->u.sta.mshstats.estab_plinks); 73 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
74 mesh_accept_plinks_update(sdata); 74 mesh_accept_plinks_update(sdata);
75} 75}
76 76
@@ -106,7 +106,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
106 return NULL; 106 return NULL;
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 110
111 return sta; 111 return sta;
112} 112}
@@ -144,10 +144,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
144 spin_unlock_bh(&sta->lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, 148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
149 __le16 reason) { 149 __le16 reason) {
150 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 150 struct ieee80211_local *local = sdata->local;
151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
152 struct ieee80211_mgmt *mgmt; 152 struct ieee80211_mgmt *mgmt;
153 bool include_plid = false; 153 bool include_plid = false;
@@ -163,10 +163,10 @@ static int mesh_plink_frame_tx(struct net_device *dev,
163 mgmt = (struct ieee80211_mgmt *) 163 mgmt = (struct ieee80211_mgmt *)
164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); 164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); 165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
166 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 166 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
167 IEEE80211_STYPE_ACTION); 167 IEEE80211_STYPE_ACTION);
168 memcpy(mgmt->da, da, ETH_ALEN); 168 memcpy(mgmt->da, da, ETH_ALEN);
169 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 169 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
170 /* BSSID is left zeroed, wildcard value */ 170 /* BSSID is left zeroed, wildcard value */
171 mgmt->u.action.category = PLINK_CATEGORY; 171 mgmt->u.action.category = PLINK_CATEGORY;
172 mgmt->u.action.u.plink_action.action_code = action; 172 mgmt->u.action.u.plink_action.action_code = action;
@@ -180,7 +180,7 @@ static int mesh_plink_frame_tx(struct net_device *dev,
180 /* two-byte status code followed by two-byte AID */ 180 /* two-byte status code followed by two-byte AID */
181 memset(pos, 0, 4); 181 memset(pos, 0, 4);
182 } 182 }
183 mesh_mgmt_ies_add(skb, dev); 183 mesh_mgmt_ies_add(skb, sdata);
184 } 184 }
185 185
186 /* Add Peer Link Management element */ 186 /* Add Peer Link Management element */
@@ -217,15 +217,14 @@ static int mesh_plink_frame_tx(struct net_device *dev,
217 memcpy(pos, &reason, 2); 217 memcpy(pos, &reason, 2);
218 } 218 }
219 219
220 ieee80211_sta_tx(dev, skb, 0); 220 ieee80211_tx_skb(sdata, skb, 0);
221 return 0; 221 return 0;
222} 222}
223 223
224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata,
225 bool peer_accepting_plinks) 225 bool peer_accepting_plinks)
226{ 226{
227 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 227 struct ieee80211_local *local = sdata->local;
228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
229 struct sta_info *sta; 228 struct sta_info *sta;
230 229
231 rcu_read_lock(); 230 rcu_read_lock();
@@ -244,10 +243,10 @@ void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
244 } 243 }
245 244
246 sta->last_rx = jiffies; 245 sta->last_rx = jiffies;
247 sta->supp_rates[local->hw.conf.channel->band] = rates; 246 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
248 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && 247 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN &&
249 sdata->u.sta.accepting_plinks && 248 sdata->u.mesh.accepting_plinks &&
250 sdata->u.sta.mshcfg.auto_open_plinks) 249 sdata->u.mesh.mshcfg.auto_open_plinks)
251 mesh_plink_open(sta); 250 mesh_plink_open(sta);
252 251
253 rcu_read_unlock(); 252 rcu_read_unlock();
@@ -257,7 +256,6 @@ static void mesh_plink_timer(unsigned long data)
257{ 256{
258 struct sta_info *sta; 257 struct sta_info *sta;
259 __le16 llid, plid, reason; 258 __le16 llid, plid, reason;
260 struct net_device *dev = NULL;
261 struct ieee80211_sub_if_data *sdata; 259 struct ieee80211_sub_if_data *sdata;
262#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 260#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
263 DECLARE_MAC_BUF(mac); 261 DECLARE_MAC_BUF(mac);
@@ -277,12 +275,11 @@ static void mesh_plink_timer(unsigned long data)
277 return; 275 return;
278 } 276 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n", 277 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
280 print_mac(mac, sta->addr), sta->plink_state); 278 print_mac(mac, sta->sta.addr), sta->plink_state);
281 reason = 0; 279 reason = 0;
282 llid = sta->llid; 280 llid = sta->llid;
283 plid = sta->plid; 281 plid = sta->plid;
284 sdata = sta->sdata; 282 sdata = sta->sdata;
285 dev = sdata->dev;
286 283
287 switch (sta->plink_state) { 284 switch (sta->plink_state) {
288 case PLINK_OPN_RCVD: 285 case PLINK_OPN_RCVD:
@@ -291,7 +288,7 @@ static void mesh_plink_timer(unsigned long data)
291 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 288 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
292 u32 rand; 289 u32 rand;
293 mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n", 290 mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n",
294 print_mac(mac, sta->addr), 291 print_mac(mac, sta->sta.addr),
295 sta->plink_retries, sta->plink_timeout); 292 sta->plink_retries, sta->plink_timeout);
296 get_random_bytes(&rand, sizeof(u32)); 293 get_random_bytes(&rand, sizeof(u32));
297 sta->plink_timeout = sta->plink_timeout + 294 sta->plink_timeout = sta->plink_timeout +
@@ -299,7 +296,7 @@ static void mesh_plink_timer(unsigned long data)
299 ++sta->plink_retries; 296 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 297 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->lock); 298 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 299 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
303 0, 0); 300 0, 0);
304 break; 301 break;
305 } 302 }
@@ -312,7 +309,7 @@ static void mesh_plink_timer(unsigned long data)
312 sta->plink_state = PLINK_HOLDING; 309 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 310 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->lock); 311 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 312 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
316 reason); 313 reason);
317 break; 314 break;
318 case PLINK_HOLDING: 315 case PLINK_HOLDING:
@@ -355,10 +352,10 @@ int mesh_plink_open(struct sta_info *sta)
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 352 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->lock); 353 spin_unlock_bh(&sta->lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 354 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 355 print_mac(mac, sta->sta.addr));
359 356
360 return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, 357 return mesh_plink_frame_tx(sdata, PLINK_OPEN,
361 sta->addr, llid, 0, 0); 358 sta->sta.addr, llid, 0, 0);
362} 359}
363 360
364void mesh_plink_block(struct sta_info *sta) 361void mesh_plink_block(struct sta_info *sta)
@@ -382,7 +379,7 @@ int mesh_plink_close(struct sta_info *sta)
382#endif 379#endif
383 380
384 mpl_dbg("Mesh plink: closing link with %s\n", 381 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr)); 382 print_mac(mac, sta->sta.addr));
386 spin_lock_bh(&sta->lock); 383 spin_lock_bh(&sta->lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); 384 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason; 385 reason = sta->reason;
@@ -403,15 +400,14 @@ int mesh_plink_close(struct sta_info *sta)
403 llid = sta->llid; 400 llid = sta->llid;
404 plid = sta->plid; 401 plid = sta->plid;
405 spin_unlock_bh(&sta->lock); 402 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 403 mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->sta.addr, llid,
407 plid, reason); 404 plid, reason);
408 return 0; 405 return 0;
409} 406}
410 407
411void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 408void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
412 size_t len, struct ieee80211_rx_status *rx_status) 409 size_t len, struct ieee80211_rx_status *rx_status)
413{ 410{
414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
415 struct ieee80211_local *local = sdata->local; 411 struct ieee80211_local *local = sdata->local;
416 struct ieee802_11_elems elems; 412 struct ieee802_11_elems elems;
417 struct sta_info *sta; 413 struct sta_info *sta;
@@ -425,6 +421,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
425 DECLARE_MAC_BUF(mac); 421 DECLARE_MAC_BUF(mac);
426#endif 422#endif
427 423
424 /* need action_code, aux */
425 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
426 return;
427
428 if (is_multicast_ether_addr(mgmt->da)) { 428 if (is_multicast_ether_addr(mgmt->da)) {
429 mpl_dbg("Mesh plink: ignore frame from multicast address"); 429 mpl_dbg("Mesh plink: ignore frame from multicast address");
430 return; 430 return;
@@ -478,7 +478,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
478 478
479 /* Now we will figure out the appropriate event... */ 479 /* Now we will figure out the appropriate event... */
480 event = PLINK_UNDEFINED; 480 event = PLINK_UNDEFINED;
481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { 481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
482 switch (ftype) { 482 switch (ftype) {
483 case PLINK_OPEN: 483 case PLINK_OPEN:
484 event = OPN_RJCT; 484 event = OPN_RJCT;
@@ -577,9 +577,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
577 sta->llid = llid; 577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->lock); 579 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 580 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
581 0, 0); 581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 582 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
583 llid, plid, 0); 583 llid, plid, 0);
584 break; 584 break;
585 default: 585 default:
@@ -604,7 +604,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
604 604
605 llid = sta->llid; 605 llid = sta->llid;
606 spin_unlock_bh(&sta->lock); 606 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 607 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
608 plid, reason); 608 plid, reason);
609 break; 609 break;
610 case OPN_ACPT: 610 case OPN_ACPT:
@@ -613,7 +613,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
613 sta->plid = plid; 613 sta->plid = plid;
614 llid = sta->llid; 614 llid = sta->llid;
615 spin_unlock_bh(&sta->lock); 615 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 616 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
617 plid, 0); 617 plid, 0);
618 break; 618 break;
619 case CNF_ACPT: 619 case CNF_ACPT:
@@ -646,13 +646,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
646 646
647 llid = sta->llid; 647 llid = sta->llid;
648 spin_unlock_bh(&sta->lock); 648 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 649 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
650 plid, reason); 650 plid, reason);
651 break; 651 break;
652 case OPN_ACPT: 652 case OPN_ACPT:
653 llid = sta->llid; 653 llid = sta->llid;
654 spin_unlock_bh(&sta->lock); 654 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 655 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
656 plid, 0); 656 plid, 0);
657 break; 657 break;
658 case CNF_ACPT: 658 case CNF_ACPT:
@@ -661,7 +661,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
661 mesh_plink_inc_estab_count(sdata); 661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->lock); 662 spin_unlock_bh(&sta->lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr)); 664 print_mac(mac, sta->sta.addr));
665 break; 665 break;
666 default: 666 default:
667 spin_unlock_bh(&sta->lock); 667 spin_unlock_bh(&sta->lock);
@@ -685,7 +685,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
685 685
686 llid = sta->llid; 686 llid = sta->llid;
687 spin_unlock_bh(&sta->lock); 687 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 688 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
689 plid, reason); 689 plid, reason);
690 break; 690 break;
691 case OPN_ACPT: 691 case OPN_ACPT:
@@ -694,8 +694,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
694 mesh_plink_inc_estab_count(sdata); 694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->lock); 695 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 697 print_mac(mac, sta->sta.addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 698 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
699 plid, 0); 699 plid, 0);
700 break; 700 break;
701 default: 701 default:
@@ -714,13 +714,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
714 llid = sta->llid; 714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->lock); 716 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 717 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
718 plid, reason); 718 plid, reason);
719 break; 719 break;
720 case OPN_ACPT: 720 case OPN_ACPT:
721 llid = sta->llid; 721 llid = sta->llid;
722 spin_unlock_bh(&sta->lock); 722 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 723 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
724 plid, 0); 724 plid, 0);
725 break; 725 break;
726 default: 726 default:
@@ -743,8 +743,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
743 llid = sta->llid; 743 llid = sta->llid;
744 reason = sta->reason; 744 reason = sta->reason;
745 spin_unlock_bh(&sta->lock); 745 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 746 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
747 plid, reason); 747 llid, plid, reason);
748 break; 748 break;
749 default: 749 default:
750 spin_unlock_bh(&sta->lock); 750 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9bb68c6a8f44..8611a8318c9c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -11,11 +11,6 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14/* TODO:
15 * order BSS list by RSSI(?) ("quality of AP")
16 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
17 * SSID)
18 */
19#include <linux/delay.h> 14#include <linux/delay.h>
20#include <linux/if_ether.h> 15#include <linux/if_ether.h>
21#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -26,577 +21,184 @@
26#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
27#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
28#include <net/iw_handler.h> 23#include <net/iw_handler.h>
29#include <asm/types.h>
30
31#include <net/mac80211.h> 24#include <net/mac80211.h>
25#include <asm/unaligned.h>
26
32#include "ieee80211_i.h" 27#include "ieee80211_i.h"
33#include "rate.h" 28#include "rate.h"
34#include "led.h" 29#include "led.h"
35#include "mesh.h"
36 30
31#define IEEE80211_ASSOC_SCANS_MAX_TRIES 2
37#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 32#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
38#define IEEE80211_AUTH_MAX_TRIES 3 33#define IEEE80211_AUTH_MAX_TRIES 3
39#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 34#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
40#define IEEE80211_ASSOC_MAX_TRIES 3 35#define IEEE80211_ASSOC_MAX_TRIES 3
41#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 36#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
42#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
43#define IEEE80211_PROBE_INTERVAL (60 * HZ) 37#define IEEE80211_PROBE_INTERVAL (60 * HZ)
44#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
45#define IEEE80211_SCAN_INTERVAL (2 * HZ) 39#define IEEE80211_SCAN_INTERVAL (2 * HZ)
46#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) 40#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
47#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) 41#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
48 42
49#define IEEE80211_PROBE_DELAY (HZ / 33)
50#define IEEE80211_CHANNEL_TIME (HZ / 33)
51#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
52#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
53#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) 43#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
54#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) 44#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
55#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
56 45
57#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 46#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
58 47
59 48
60#define ERP_INFO_USE_PROTECTION BIT(1) 49/* utils */
61
62/* mgmt header + 1 byte action code */
63#define IEEE80211_MIN_ACTION_SIZE (24 + 1)
64
65#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
66#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
67#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
68#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
69#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
70
71/* next values represent the buffer size for A-MPDU frame.
72 * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */
73#define IEEE80211_MIN_AMPDU_BUF 0x8
74#define IEEE80211_MAX_AMPDU_BUF 0x40
75
76static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
77 u8 *ssid, size_t ssid_len);
78static struct ieee80211_sta_bss *
79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
80 u8 *ssid, u8 ssid_len);
81static void ieee80211_rx_bss_put(struct ieee80211_local *local,
82 struct ieee80211_sta_bss *bss);
83static int ieee80211_sta_find_ibss(struct net_device *dev,
84 struct ieee80211_if_sta *ifsta);
85static int ieee80211_sta_wep_configured(struct net_device *dev);
86static int ieee80211_sta_start_scan(struct net_device *dev,
87 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev,
89 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data);
91
92
93void ieee802_11_parse_elems(u8 *start, size_t len,
94 struct ieee802_11_elems *elems)
95{
96 size_t left = len;
97 u8 *pos = start;
98
99 memset(elems, 0, sizeof(*elems));
100
101 while (left >= 2) {
102 u8 id, elen;
103
104 id = *pos++;
105 elen = *pos++;
106 left -= 2;
107
108 if (elen > left)
109 return;
110
111 switch (id) {
112 case WLAN_EID_SSID:
113 elems->ssid = pos;
114 elems->ssid_len = elen;
115 break;
116 case WLAN_EID_SUPP_RATES:
117 elems->supp_rates = pos;
118 elems->supp_rates_len = elen;
119 break;
120 case WLAN_EID_FH_PARAMS:
121 elems->fh_params = pos;
122 elems->fh_params_len = elen;
123 break;
124 case WLAN_EID_DS_PARAMS:
125 elems->ds_params = pos;
126 elems->ds_params_len = elen;
127 break;
128 case WLAN_EID_CF_PARAMS:
129 elems->cf_params = pos;
130 elems->cf_params_len = elen;
131 break;
132 case WLAN_EID_TIM:
133 elems->tim = pos;
134 elems->tim_len = elen;
135 break;
136 case WLAN_EID_IBSS_PARAMS:
137 elems->ibss_params = pos;
138 elems->ibss_params_len = elen;
139 break;
140 case WLAN_EID_CHALLENGE:
141 elems->challenge = pos;
142 elems->challenge_len = elen;
143 break;
144 case WLAN_EID_WPA:
145 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
146 pos[2] == 0xf2) {
147 /* Microsoft OUI (00:50:F2) */
148 if (pos[3] == 1) {
149 /* OUI Type 1 - WPA IE */
150 elems->wpa = pos;
151 elems->wpa_len = elen;
152 } else if (elen >= 5 && pos[3] == 2) {
153 if (pos[4] == 0) {
154 elems->wmm_info = pos;
155 elems->wmm_info_len = elen;
156 } else if (pos[4] == 1) {
157 elems->wmm_param = pos;
158 elems->wmm_param_len = elen;
159 }
160 }
161 }
162 break;
163 case WLAN_EID_RSN:
164 elems->rsn = pos;
165 elems->rsn_len = elen;
166 break;
167 case WLAN_EID_ERP_INFO:
168 elems->erp_info = pos;
169 elems->erp_info_len = elen;
170 break;
171 case WLAN_EID_EXT_SUPP_RATES:
172 elems->ext_supp_rates = pos;
173 elems->ext_supp_rates_len = elen;
174 break;
175 case WLAN_EID_HT_CAPABILITY:
176 elems->ht_cap_elem = pos;
177 elems->ht_cap_elem_len = elen;
178 break;
179 case WLAN_EID_HT_EXTRA_INFO:
180 elems->ht_info_elem = pos;
181 elems->ht_info_elem_len = elen;
182 break;
183 case WLAN_EID_MESH_ID:
184 elems->mesh_id = pos;
185 elems->mesh_id_len = elen;
186 break;
187 case WLAN_EID_MESH_CONFIG:
188 elems->mesh_config = pos;
189 elems->mesh_config_len = elen;
190 break;
191 case WLAN_EID_PEER_LINK:
192 elems->peer_link = pos;
193 elems->peer_link_len = elen;
194 break;
195 case WLAN_EID_PREQ:
196 elems->preq = pos;
197 elems->preq_len = elen;
198 break;
199 case WLAN_EID_PREP:
200 elems->prep = pos;
201 elems->prep_len = elen;
202 break;
203 case WLAN_EID_PERR:
204 elems->perr = pos;
205 elems->perr_len = elen;
206 break;
207 case WLAN_EID_CHANNEL_SWITCH:
208 elems->ch_switch_elem = pos;
209 elems->ch_switch_elem_len = elen;
210 break;
211 case WLAN_EID_QUIET:
212 if (!elems->quiet_elem) {
213 elems->quiet_elem = pos;
214 elems->quiet_elem_len = elen;
215 }
216 elems->num_of_quiet_elem++;
217 break;
218 case WLAN_EID_COUNTRY:
219 elems->country_elem = pos;
220 elems->country_elem_len = elen;
221 break;
222 case WLAN_EID_PWR_CONSTRAINT:
223 elems->pwr_constr_elem = pos;
224 elems->pwr_constr_elem_len = elen;
225 break;
226 default:
227 break;
228 }
229
230 left -= elen;
231 pos += elen;
232 }
233}
234
235
236static int ecw2cw(int ecw) 50static int ecw2cw(int ecw)
237{ 51{
238 return (1 << ecw) - 1; 52 return (1 << ecw) - 1;
239} 53}
240 54
241 55static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
242static void ieee80211_sta_def_wmm_params(struct net_device *dev,
243 struct ieee80211_sta_bss *bss,
244 int ibss)
245{ 56{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 57 u8 *end, *pos;
247 struct ieee80211_local *local = sdata->local;
248 int i, have_higher_than_11mbit = 0;
249
250 58
251 /* cf. IEEE 802.11 9.2.12 */ 59 pos = bss->ies;
252 for (i = 0; i < bss->supp_rates_len; i++) 60 if (pos == NULL)
253 if ((bss->supp_rates[i] & 0x7f) * 5 > 110) 61 return NULL;
254 have_higher_than_11mbit = 1; 62 end = pos + bss->ies_len;
255
256 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
257 have_higher_than_11mbit)
258 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
259 else
260 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
261
262
263 if (local->ops->conf_tx) {
264 struct ieee80211_tx_queue_params qparam;
265
266 memset(&qparam, 0, sizeof(qparam));
267
268 qparam.aifs = 2;
269
270 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
271 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
272 qparam.cw_min = 31;
273 else
274 qparam.cw_min = 15;
275
276 qparam.cw_max = 1023;
277 qparam.txop = 0;
278
279 for (i = 0; i < local_to_hw(local)->queues; i++)
280 local->ops->conf_tx(local_to_hw(local), i, &qparam);
281 }
282}
283
284static void ieee80211_sta_wmm_params(struct net_device *dev,
285 struct ieee80211_if_sta *ifsta,
286 u8 *wmm_param, size_t wmm_param_len)
287{
288 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
289 struct ieee80211_tx_queue_params params;
290 size_t left;
291 int count;
292 u8 *pos;
293
294 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
295 return;
296
297 if (!wmm_param)
298 return;
299
300 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
301 return;
302 count = wmm_param[6] & 0x0f;
303 if (count == ifsta->wmm_last_param_set)
304 return;
305 ifsta->wmm_last_param_set = count;
306
307 pos = wmm_param + 8;
308 left = wmm_param_len - 8;
309
310 memset(&params, 0, sizeof(params));
311
312 if (!local->ops->conf_tx)
313 return;
314
315 local->wmm_acm = 0;
316 for (; left >= 4; left -= 4, pos += 4) {
317 int aci = (pos[0] >> 5) & 0x03;
318 int acm = (pos[0] >> 4) & 0x01;
319 int queue;
320 63
321 switch (aci) { 64 while (pos + 1 < end) {
322 case 1: 65 if (pos + 2 + pos[1] > end)
323 queue = 3;
324 if (acm)
325 local->wmm_acm |= BIT(0) | BIT(3);
326 break;
327 case 2:
328 queue = 1;
329 if (acm)
330 local->wmm_acm |= BIT(4) | BIT(5);
331 break;
332 case 3:
333 queue = 0;
334 if (acm)
335 local->wmm_acm |= BIT(6) | BIT(7);
336 break; 66 break;
337 case 0: 67 if (pos[0] == ie)
338 default: 68 return pos;
339 queue = 2; 69 pos += 2 + pos[1];
340 if (acm)
341 local->wmm_acm |= BIT(1) | BIT(2);
342 break;
343 }
344
345 params.aifs = pos[0] & 0x0f;
346 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
347 params.cw_min = ecw2cw(pos[1] & 0x0f);
348 params.txop = get_unaligned_le16(pos + 2);
349#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
350 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
351 "cWmin=%d cWmax=%d txop=%d\n",
352 dev->name, queue, aci, acm, params.aifs, params.cw_min,
353 params.cw_max, params.txop);
354#endif
355 /* TODO: handle ACM (block TX, fallback to next lowest allowed
356 * AC for now) */
357 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
358 printk(KERN_DEBUG "%s: failed to set TX queue "
359 "parameters for queue %d\n", dev->name, queue);
360 }
361 } 70 }
362}
363 71
364static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata, 72 return NULL;
365 bool use_protection,
366 bool use_short_preamble)
367{
368 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
369#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
370 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
371 DECLARE_MAC_BUF(mac);
372#endif
373 u32 changed = 0;
374
375 if (use_protection != bss_conf->use_cts_prot) {
376#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
377 if (net_ratelimit()) {
378 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
379 "%s)\n",
380 sdata->dev->name,
381 use_protection ? "enabled" : "disabled",
382 print_mac(mac, ifsta->bssid));
383 }
384#endif
385 bss_conf->use_cts_prot = use_protection;
386 changed |= BSS_CHANGED_ERP_CTS_PROT;
387 }
388
389 if (use_short_preamble != bss_conf->use_short_preamble) {
390#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
391 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: switched to %s barker preamble"
393 " (BSSID=%s)\n",
394 sdata->dev->name,
395 use_short_preamble ? "short" : "long",
396 print_mac(mac, ifsta->bssid));
397 }
398#endif
399 bss_conf->use_short_preamble = use_short_preamble;
400 changed |= BSS_CHANGED_ERP_PREAMBLE;
401 }
402
403 return changed;
404} 73}
405 74
406static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, 75static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
407 u8 erp_value) 76 struct ieee80211_supported_band *sband,
408{ 77 u64 *rates)
409 bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
410 bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
411
412 return ieee80211_handle_protect_preamb(sdata,
413 use_protection, use_short_preamble);
414}
415
416static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
417 struct ieee80211_sta_bss *bss)
418{
419 u32 changed = 0;
420
421 if (bss->has_erp_value)
422 changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value);
423 else {
424 u16 capab = bss->capability;
425 changed |= ieee80211_handle_protect_preamb(sdata, false,
426 (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0);
427 }
428
429 return changed;
430}
431
432int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
433 struct ieee80211_ht_info *ht_info)
434{
435
436 if (ht_info == NULL)
437 return -EINVAL;
438
439 memset(ht_info, 0, sizeof(*ht_info));
440
441 if (ht_cap_ie) {
442 u8 ampdu_info = ht_cap_ie->ampdu_params_info;
443
444 ht_info->ht_supported = 1;
445 ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
446 ht_info->ampdu_factor =
447 ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
448 ht_info->ampdu_density =
449 (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
450 memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
451 } else
452 ht_info->ht_supported = 0;
453
454 return 0;
455}
456
457int ieee80211_ht_addt_info_ie_to_ht_bss_info(
458 struct ieee80211_ht_addt_info *ht_add_info_ie,
459 struct ieee80211_ht_bss_info *bss_info)
460{ 78{
461 if (bss_info == NULL) 79 int i, j, count;
462 return -EINVAL; 80 *rates = 0;
463 81 count = 0;
464 memset(bss_info, 0, sizeof(*bss_info)); 82 for (i = 0; i < bss->supp_rates_len; i++) {
465 83 int rate = (bss->supp_rates[i] & 0x7F) * 5;
466 if (ht_add_info_ie) {
467 u16 op_mode;
468 op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
469 84
470 bss_info->primary_channel = ht_add_info_ie->control_chan; 85 for (j = 0; j < sband->n_bitrates; j++)
471 bss_info->bss_cap = ht_add_info_ie->ht_param; 86 if (sband->bitrates[j].bitrate == rate) {
472 bss_info->bss_op_mode = (u8)(op_mode & 0xff); 87 *rates |= BIT(j);
88 count++;
89 break;
90 }
473 } 91 }
474 92
475 return 0; 93 return count;
476} 94}
477 95
478static void ieee80211_sta_send_associnfo(struct net_device *dev, 96/* also used by mesh code */
479 struct ieee80211_if_sta *ifsta) 97u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
98 struct ieee802_11_elems *elems,
99 enum ieee80211_band band)
480{ 100{
481 union iwreq_data wrqu; 101 struct ieee80211_supported_band *sband;
102 struct ieee80211_rate *bitrates;
103 size_t num_rates;
104 u64 supp_rates;
105 int i, j;
106 sband = local->hw.wiphy->bands[band];
482 107
483 if (ifsta->assocreq_ies) { 108 if (!sband) {
484 memset(&wrqu, 0, sizeof(wrqu)); 109 WARN_ON(1);
485 wrqu.data.length = ifsta->assocreq_ies_len; 110 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
486 wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
487 ifsta->assocreq_ies);
488 } 111 }
489 112
490 if (ifsta->assocresp_ies) { 113 bitrates = sband->bitrates;
491 memset(&wrqu, 0, sizeof(wrqu)); 114 num_rates = sband->n_bitrates;
492 wrqu.data.length = ifsta->assocresp_ies_len; 115 supp_rates = 0;
493 wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, 116 for (i = 0; i < elems->supp_rates_len +
494 ifsta->assocresp_ies); 117 elems->ext_supp_rates_len; i++) {
118 u8 rate = 0;
119 int own_rate;
120 if (i < elems->supp_rates_len)
121 rate = elems->supp_rates[i];
122 else if (elems->ext_supp_rates)
123 rate = elems->ext_supp_rates
124 [i - elems->supp_rates_len];
125 own_rate = 5 * (rate & 0x7f);
126 for (j = 0; j < num_rates; j++)
127 if (bitrates[j].bitrate == own_rate)
128 supp_rates |= BIT(j);
495 } 129 }
130 return supp_rates;
496} 131}
497 132
133/* frame sending functions */
498 134
499static void ieee80211_set_associated(struct net_device *dev, 135/* also used by scanning code */
500 struct ieee80211_if_sta *ifsta, 136void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
501 bool assoc) 137 u8 *ssid, size_t ssid_len)
502{ 138{
503 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
504 struct ieee80211_local *local = sdata->local; 139 struct ieee80211_local *local = sdata->local;
505 struct ieee80211_conf *conf = &local_to_hw(local)->conf; 140 struct ieee80211_supported_band *sband;
506 union iwreq_data wrqu; 141 struct sk_buff *skb;
507 u32 changed = BSS_CHANGED_ASSOC; 142 struct ieee80211_mgmt *mgmt;
508 143 u8 *pos, *supp_rates, *esupp_rates = NULL;
509 if (assoc) { 144 int i;
510 struct ieee80211_sta_bss *bss;
511
512 ifsta->flags |= IEEE80211_STA_ASSOCIATED;
513
514 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
515 return;
516
517 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
518 conf->channel->center_freq,
519 ifsta->ssid, ifsta->ssid_len);
520 if (bss) {
521 /* set timing information */
522 sdata->bss_conf.beacon_int = bss->beacon_int;
523 sdata->bss_conf.timestamp = bss->timestamp;
524 sdata->bss_conf.dtim_period = bss->dtim_period;
525
526 changed |= ieee80211_handle_bss_capability(sdata, bss);
527
528 ieee80211_rx_bss_put(local, bss);
529 }
530 145
531 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { 146 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
532 changed |= BSS_CHANGED_HT; 147 if (!skb) {
533 sdata->bss_conf.assoc_ht = 1; 148 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
534 sdata->bss_conf.ht_conf = &conf->ht_conf; 149 "request\n", sdata->dev->name);
535 sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; 150 return;
536 } 151 }
152 skb_reserve(skb, local->hw.extra_tx_headroom);
537 153
538 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; 154 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
539 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); 155 memset(mgmt, 0, 24);
540 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); 156 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
541 ieee80211_sta_send_associnfo(dev, ifsta); 157 IEEE80211_STYPE_PROBE_REQ);
158 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
159 if (dst) {
160 memcpy(mgmt->da, dst, ETH_ALEN);
161 memcpy(mgmt->bssid, dst, ETH_ALEN);
542 } else { 162 } else {
543 netif_carrier_off(dev); 163 memset(mgmt->da, 0xff, ETH_ALEN);
544 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid); 164 memset(mgmt->bssid, 0xff, ETH_ALEN);
545 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
546 changed |= ieee80211_reset_erp_info(dev);
547
548 sdata->bss_conf.assoc_ht = 0;
549 sdata->bss_conf.ht_conf = NULL;
550 sdata->bss_conf.ht_bss_conf = NULL;
551
552 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
553 } 165 }
554 ifsta->last_probe = jiffies; 166 pos = skb_put(skb, 2 + ssid_len);
555 ieee80211_led_assoc(local, assoc); 167 *pos++ = WLAN_EID_SSID;
556 168 *pos++ = ssid_len;
557 sdata->bss_conf.assoc = assoc; 169 memcpy(pos, ssid, ssid_len);
558 ieee80211_bss_info_change_notify(sdata, changed);
559
560 if (assoc)
561 netif_carrier_on(dev);
562
563 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
564 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
565}
566
567static void ieee80211_set_disassoc(struct net_device *dev,
568 struct ieee80211_if_sta *ifsta, int deauth)
569{
570 if (deauth)
571 ifsta->auth_tries = 0;
572 ifsta->assoc_tries = 0;
573 ieee80211_set_associated(dev, ifsta, 0);
574}
575
576void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
577 int encrypt)
578{
579 struct ieee80211_sub_if_data *sdata;
580 170
581 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 171 supp_rates = skb_put(skb, 2);
582 skb->dev = sdata->local->mdev; 172 supp_rates[0] = WLAN_EID_SUPP_RATES;
583 skb_set_mac_header(skb, 0); 173 supp_rates[1] = 0;
584 skb_set_network_header(skb, 0); 174 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
585 skb_set_transport_header(skb, 0);
586 175
587 skb->iif = sdata->dev->ifindex; 176 for (i = 0; i < sband->n_bitrates; i++) {
588 skb->do_not_encrypt = !encrypt; 177 struct ieee80211_rate *rate = &sband->bitrates[i];
178 if (esupp_rates) {
179 pos = skb_put(skb, 1);
180 esupp_rates[1]++;
181 } else if (supp_rates[1] == 8) {
182 esupp_rates = skb_put(skb, 3);
183 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
184 esupp_rates[1] = 1;
185 pos = &esupp_rates[2];
186 } else {
187 pos = skb_put(skb, 1);
188 supp_rates[1]++;
189 }
190 *pos = rate->bitrate / 5;
191 }
589 192
590 dev_queue_xmit(skb); 193 ieee80211_tx_skb(sdata, skb, 0);
591} 194}
592 195
593 196static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
594static void ieee80211_send_auth(struct net_device *dev,
595 struct ieee80211_if_sta *ifsta, 197 struct ieee80211_if_sta *ifsta,
596 int transaction, u8 *extra, size_t extra_len, 198 int transaction, u8 *extra, size_t extra_len,
597 int encrypt) 199 int encrypt)
598{ 200{
599 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 201 struct ieee80211_local *local = sdata->local;
600 struct sk_buff *skb; 202 struct sk_buff *skb;
601 struct ieee80211_mgmt *mgmt; 203 struct ieee80211_mgmt *mgmt;
602 204
@@ -604,19 +206,19 @@ static void ieee80211_send_auth(struct net_device *dev,
604 sizeof(*mgmt) + 6 + extra_len); 206 sizeof(*mgmt) + 6 + extra_len);
605 if (!skb) { 207 if (!skb) {
606 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 208 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
607 "frame\n", dev->name); 209 "frame\n", sdata->dev->name);
608 return; 210 return;
609 } 211 }
610 skb_reserve(skb, local->hw.extra_tx_headroom); 212 skb_reserve(skb, local->hw.extra_tx_headroom);
611 213
612 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 214 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
613 memset(mgmt, 0, 24 + 6); 215 memset(mgmt, 0, 24 + 6);
614 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 216 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
615 IEEE80211_STYPE_AUTH); 217 IEEE80211_STYPE_AUTH);
616 if (encrypt) 218 if (encrypt)
617 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 219 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
618 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 220 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
619 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 221 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
620 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 222 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
621 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); 223 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg);
622 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 224 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -625,64 +227,19 @@ static void ieee80211_send_auth(struct net_device *dev,
625 if (extra) 227 if (extra)
626 memcpy(skb_put(skb, extra_len), extra, extra_len); 228 memcpy(skb_put(skb, extra_len), extra, extra_len);
627 229
628 ieee80211_sta_tx(dev, skb, encrypt); 230 ieee80211_tx_skb(sdata, skb, encrypt);
629}
630
631
632static void ieee80211_authenticate(struct net_device *dev,
633 struct ieee80211_if_sta *ifsta)
634{
635 DECLARE_MAC_BUF(mac);
636
637 ifsta->auth_tries++;
638 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
639 printk(KERN_DEBUG "%s: authentication with AP %s"
640 " timed out\n",
641 dev->name, print_mac(mac, ifsta->bssid));
642 ifsta->state = IEEE80211_DISABLED;
643 return;
644 }
645
646 ifsta->state = IEEE80211_AUTHENTICATE;
647 printk(KERN_DEBUG "%s: authenticate with AP %s\n",
648 dev->name, print_mac(mac, ifsta->bssid));
649
650 ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0);
651
652 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
653} 231}
654 232
655static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, 233static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
656 struct ieee80211_supported_band *sband,
657 u64 *rates)
658{
659 int i, j, count;
660 *rates = 0;
661 count = 0;
662 for (i = 0; i < bss->supp_rates_len; i++) {
663 int rate = (bss->supp_rates[i] & 0x7F) * 5;
664
665 for (j = 0; j < sband->n_bitrates; j++)
666 if (sband->bitrates[j].bitrate == rate) {
667 *rates |= BIT(j);
668 count++;
669 break;
670 }
671 }
672
673 return count;
674}
675
676static void ieee80211_send_assoc(struct net_device *dev,
677 struct ieee80211_if_sta *ifsta) 234 struct ieee80211_if_sta *ifsta)
678{ 235{
679 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 236 struct ieee80211_local *local = sdata->local;
680 struct sk_buff *skb; 237 struct sk_buff *skb;
681 struct ieee80211_mgmt *mgmt; 238 struct ieee80211_mgmt *mgmt;
682 u8 *pos, *ies; 239 u8 *pos, *ies, *ht_add_ie;
683 int i, len, count, rates_len, supp_rates_len; 240 int i, len, count, rates_len, supp_rates_len;
684 u16 capab; 241 u16 capab;
685 struct ieee80211_sta_bss *bss; 242 struct ieee80211_bss *bss;
686 int wmm = 0; 243 int wmm = 0;
687 struct ieee80211_supported_band *sband; 244 struct ieee80211_supported_band *sband;
688 u64 rates = 0; 245 u64 rates = 0;
@@ -692,7 +249,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
692 ifsta->ssid_len); 249 ifsta->ssid_len);
693 if (!skb) { 250 if (!skb) {
694 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 251 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
695 "frame\n", dev->name); 252 "frame\n", sdata->dev->name);
696 return; 253 return;
697 } 254 }
698 skb_reserve(skb, local->hw.extra_tx_headroom); 255 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -708,13 +265,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
708 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; 265 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
709 } 266 }
710 267
711 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 268 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
712 local->hw.conf.channel->center_freq, 269 local->hw.conf.channel->center_freq,
713 ifsta->ssid, ifsta->ssid_len); 270 ifsta->ssid, ifsta->ssid_len);
714 if (bss) { 271 if (bss) {
715 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 272 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
716 capab |= WLAN_CAPABILITY_PRIVACY; 273 capab |= WLAN_CAPABILITY_PRIVACY;
717 if (bss->wmm_ie) 274 if (bss->wmm_used)
718 wmm = 1; 275 wmm = 1;
719 276
720 /* get all rates supported by the device and the AP as 277 /* get all rates supported by the device and the AP as
@@ -736,13 +293,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
736 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 293 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
737 memset(mgmt, 0, 24); 294 memset(mgmt, 0, 24);
738 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 295 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
739 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 296 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
740 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 297 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
741 298
742 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { 299 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) {
743 skb_put(skb, 10); 300 skb_put(skb, 10);
744 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 301 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
745 IEEE80211_STYPE_REASSOC_REQ); 302 IEEE80211_STYPE_REASSOC_REQ);
746 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); 303 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
747 mgmt->u.reassoc_req.listen_interval = 304 mgmt->u.reassoc_req.listen_interval =
748 cpu_to_le16(local->hw.conf.listen_interval); 305 cpu_to_le16(local->hw.conf.listen_interval);
@@ -750,8 +307,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
750 ETH_ALEN); 307 ETH_ALEN);
751 } else { 308 } else {
752 skb_put(skb, 4); 309 skb_put(skb, 4);
753 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 310 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
754 IEEE80211_STYPE_ASSOC_REQ); 311 IEEE80211_STYPE_ASSOC_REQ);
755 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); 312 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
756 mgmt->u.reassoc_req.listen_interval = 313 mgmt->u.reassoc_req.listen_interval =
757 cpu_to_le16(local->hw.conf.listen_interval); 314 cpu_to_le16(local->hw.conf.listen_interval);
@@ -836,9 +393,10 @@ static void ieee80211_send_assoc(struct net_device *dev,
836 393
837 /* wmm support is a must to HT */ 394 /* wmm support is a must to HT */
838 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && 395 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
839 sband->ht_info.ht_supported && bss->ht_add_ie) { 396 sband->ht_info.ht_supported &&
397 (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) {
840 struct ieee80211_ht_addt_info *ht_add_info = 398 struct ieee80211_ht_addt_info *ht_add_info =
841 (struct ieee80211_ht_addt_info *)bss->ht_add_ie; 399 (struct ieee80211_ht_addt_info *)ht_add_ie;
842 u16 cap = sband->ht_info.cap; 400 u16 cap = sband->ht_info.cap;
843 __le16 tmp; 401 __le16 tmp;
844 u32 flags = local->hw.conf.channel->flags; 402 u32 flags = local->hw.conf.channel->flags;
@@ -877,21 +435,22 @@ static void ieee80211_send_assoc(struct net_device *dev,
877 if (ifsta->assocreq_ies) 435 if (ifsta->assocreq_ies)
878 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); 436 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len);
879 437
880 ieee80211_sta_tx(dev, skb, 0); 438 ieee80211_tx_skb(sdata, skb, 0);
881} 439}
882 440
883 441
884static void ieee80211_send_deauth(struct net_device *dev, 442static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
885 struct ieee80211_if_sta *ifsta, u16 reason) 443 u16 stype, u16 reason)
886{ 444{
887 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 445 struct ieee80211_local *local = sdata->local;
446 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
888 struct sk_buff *skb; 447 struct sk_buff *skb;
889 struct ieee80211_mgmt *mgmt; 448 struct ieee80211_mgmt *mgmt;
890 449
891 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 450 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
892 if (!skb) { 451 if (!skb) {
893 printk(KERN_DEBUG "%s: failed to allocate buffer for deauth " 452 printk(KERN_DEBUG "%s: failed to allocate buffer for "
894 "frame\n", dev->name); 453 "deauth/disassoc frame\n", sdata->dev->name);
895 return; 454 return;
896 } 455 }
897 skb_reserve(skb, local->hw.extra_tx_headroom); 456 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -899,940 +458,561 @@ static void ieee80211_send_deauth(struct net_device *dev,
899 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 458 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
900 memset(mgmt, 0, 24); 459 memset(mgmt, 0, 24);
901 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 460 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
902 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 461 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
903 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 462 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
904 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 463 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
905 IEEE80211_STYPE_DEAUTH);
906 skb_put(skb, 2); 464 skb_put(skb, 2);
465 /* u.deauth.reason_code == u.disassoc.reason_code */
907 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 466 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
908 467
909 ieee80211_sta_tx(dev, skb, 0); 468 ieee80211_tx_skb(sdata, skb, 0);
910} 469}
911 470
912 471/* MLME */
913static void ieee80211_send_disassoc(struct net_device *dev, 472static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
914 struct ieee80211_if_sta *ifsta, u16 reason) 473 struct ieee80211_bss *bss)
915{ 474{
916 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 475 struct ieee80211_local *local = sdata->local;
917 struct sk_buff *skb; 476 int i, have_higher_than_11mbit = 0;
918 struct ieee80211_mgmt *mgmt;
919 477
920 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 478 /* cf. IEEE 802.11 9.2.12 */
921 if (!skb) { 479 for (i = 0; i < bss->supp_rates_len; i++)
922 printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc " 480 if ((bss->supp_rates[i] & 0x7f) * 5 > 110)
923 "frame\n", dev->name); 481 have_higher_than_11mbit = 1;
924 return;
925 }
926 skb_reserve(skb, local->hw.extra_tx_headroom);
927 482
928 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 483 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
929 memset(mgmt, 0, 24); 484 have_higher_than_11mbit)
930 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 485 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
931 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 486 else
932 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 487 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
933 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
934 IEEE80211_STYPE_DISASSOC);
935 skb_put(skb, 2);
936 mgmt->u.disassoc.reason_code = cpu_to_le16(reason);
937 488
938 ieee80211_sta_tx(dev, skb, 0); 489 ieee80211_set_wmm_default(sdata);
939} 490}
940 491
941 492static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
942static int ieee80211_privacy_mismatch(struct net_device *dev, 493 struct ieee80211_if_sta *ifsta,
943 struct ieee80211_if_sta *ifsta) 494 u8 *wmm_param, size_t wmm_param_len)
944{ 495{
945 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 496 struct ieee80211_tx_queue_params params;
946 struct ieee80211_sta_bss *bss; 497 size_t left;
947 int bss_privacy; 498 int count;
948 int wep_privacy; 499 u8 *pos;
949 int privacy_invoked;
950
951 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
952 return 0;
953
954 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
955 local->hw.conf.channel->center_freq,
956 ifsta->ssid, ifsta->ssid_len);
957 if (!bss)
958 return 0;
959
960 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
961 wep_privacy = !!ieee80211_sta_wep_configured(dev);
962 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
963 500
964 ieee80211_rx_bss_put(local, bss); 501 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
502 return;
965 503
966 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) 504 if (!wmm_param)
967 return 0; 505 return;
968 506
969 return 1; 507 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
970} 508 return;
509 count = wmm_param[6] & 0x0f;
510 if (count == ifsta->wmm_last_param_set)
511 return;
512 ifsta->wmm_last_param_set = count;
971 513
514 pos = wmm_param + 8;
515 left = wmm_param_len - 8;
972 516
973static void ieee80211_associate(struct net_device *dev, 517 memset(&params, 0, sizeof(params));
974 struct ieee80211_if_sta *ifsta)
975{
976 DECLARE_MAC_BUF(mac);
977 518
978 ifsta->assoc_tries++; 519 if (!local->ops->conf_tx)
979 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
980 printk(KERN_DEBUG "%s: association with AP %s"
981 " timed out\n",
982 dev->name, print_mac(mac, ifsta->bssid));
983 ifsta->state = IEEE80211_DISABLED;
984 return; 520 return;
985 }
986 521
987 ifsta->state = IEEE80211_ASSOCIATE; 522 local->wmm_acm = 0;
988 printk(KERN_DEBUG "%s: associate with AP %s\n", 523 for (; left >= 4; left -= 4, pos += 4) {
989 dev->name, print_mac(mac, ifsta->bssid)); 524 int aci = (pos[0] >> 5) & 0x03;
990 if (ieee80211_privacy_mismatch(dev, ifsta)) { 525 int acm = (pos[0] >> 4) & 0x01;
991 printk(KERN_DEBUG "%s: mismatch in privacy configuration and " 526 int queue;
992 "mixed-cell disabled - abort association\n", dev->name);
993 ifsta->state = IEEE80211_DISABLED;
994 return;
995 }
996 527
997 ieee80211_send_assoc(dev, ifsta); 528 switch (aci) {
529 case 1:
530 queue = 3;
531 if (acm)
532 local->wmm_acm |= BIT(0) | BIT(3);
533 break;
534 case 2:
535 queue = 1;
536 if (acm)
537 local->wmm_acm |= BIT(4) | BIT(5);
538 break;
539 case 3:
540 queue = 0;
541 if (acm)
542 local->wmm_acm |= BIT(6) | BIT(7);
543 break;
544 case 0:
545 default:
546 queue = 2;
547 if (acm)
548 local->wmm_acm |= BIT(1) | BIT(2);
549 break;
550 }
998 551
999 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); 552 params.aifs = pos[0] & 0x0f;
553 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
554 params.cw_min = ecw2cw(pos[1] & 0x0f);
555 params.txop = get_unaligned_le16(pos + 2);
556#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
557 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
558 "cWmin=%d cWmax=%d txop=%d\n",
559 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min,
560 params.cw_max, params.txop);
561#endif
562 /* TODO: handle ACM (block TX, fallback to next lowest allowed
563 * AC for now) */
564 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
565 printk(KERN_DEBUG "%s: failed to set TX queue "
566 "parameters for queue %d\n", local->mdev->name, queue);
567 }
568 }
1000} 569}
1001 570
1002 571static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
1003static void ieee80211_associated(struct net_device *dev, 572 bool use_protection,
1004 struct ieee80211_if_sta *ifsta) 573 bool use_short_preamble)
1005{ 574{
1006 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 575 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
1007 struct sta_info *sta; 576#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1008 int disassoc; 577 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1009 DECLARE_MAC_BUF(mac); 578 DECLARE_MAC_BUF(mac);
579#endif
580 u32 changed = 0;
1010 581
1011 /* TODO: start monitoring current AP signal quality and number of 582 if (use_protection != bss_conf->use_cts_prot) {
1012 * missed beacons. Scan other channels every now and then and search 583#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1013 * for better APs. */ 584 if (net_ratelimit()) {
1014 /* TODO: remove expired BSSes */ 585 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
1015 586 "%s)\n",
1016 ifsta->state = IEEE80211_ASSOCIATED; 587 sdata->dev->name,
1017 588 use_protection ? "enabled" : "disabled",
1018 rcu_read_lock(); 589 print_mac(mac, ifsta->bssid));
1019
1020 sta = sta_info_get(local, ifsta->bssid);
1021 if (!sta) {
1022 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
1023 dev->name, print_mac(mac, ifsta->bssid));
1024 disassoc = 1;
1025 } else {
1026 disassoc = 0;
1027 if (time_after(jiffies,
1028 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
1029 if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) {
1030 printk(KERN_DEBUG "%s: No ProbeResp from "
1031 "current AP %s - assume out of "
1032 "range\n",
1033 dev->name, print_mac(mac, ifsta->bssid));
1034 disassoc = 1;
1035 sta_info_unlink(&sta);
1036 } else
1037 ieee80211_send_probe_req(dev, ifsta->bssid,
1038 local->scan_ssid,
1039 local->scan_ssid_len);
1040 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
1041 } else {
1042 ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1043 if (time_after(jiffies, ifsta->last_probe +
1044 IEEE80211_PROBE_INTERVAL)) {
1045 ifsta->last_probe = jiffies;
1046 ieee80211_send_probe_req(dev, ifsta->bssid,
1047 ifsta->ssid,
1048 ifsta->ssid_len);
1049 }
1050 } 590 }
591#endif
592 bss_conf->use_cts_prot = use_protection;
593 changed |= BSS_CHANGED_ERP_CTS_PROT;
1051 } 594 }
1052 595
1053 rcu_read_unlock(); 596 if (use_short_preamble != bss_conf->use_short_preamble) {
1054 597#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1055 if (disassoc && sta) 598 if (net_ratelimit()) {
1056 sta_info_destroy(sta); 599 printk(KERN_DEBUG "%s: switched to %s barker preamble"
1057 600 " (BSSID=%s)\n",
1058 if (disassoc) { 601 sdata->dev->name,
1059 ifsta->state = IEEE80211_DISABLED; 602 use_short_preamble ? "short" : "long",
1060 ieee80211_set_associated(dev, ifsta, 0); 603 print_mac(mac, ifsta->bssid));
1061 } else { 604 }
1062 mod_timer(&ifsta->timer, jiffies + 605#endif
1063 IEEE80211_MONITORING_INTERVAL); 606 bss_conf->use_short_preamble = use_short_preamble;
607 changed |= BSS_CHANGED_ERP_PREAMBLE;
1064 } 608 }
1065}
1066 609
610 return changed;
611}
1067 612
1068static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 613static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata,
1069 u8 *ssid, size_t ssid_len) 614 u8 erp_value)
1070{ 615{
1071 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 616 bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
1072 struct ieee80211_supported_band *sband; 617 bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
1073 struct sk_buff *skb;
1074 struct ieee80211_mgmt *mgmt;
1075 u8 *pos, *supp_rates, *esupp_rates = NULL;
1076 int i;
1077
1078 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
1079 if (!skb) {
1080 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
1081 "request\n", dev->name);
1082 return;
1083 }
1084 skb_reserve(skb, local->hw.extra_tx_headroom);
1085 618
1086 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 619 return ieee80211_handle_protect_preamb(sdata,
1087 memset(mgmt, 0, 24); 620 use_protection, use_short_preamble);
1088 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 621}
1089 IEEE80211_STYPE_PROBE_REQ);
1090 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1091 if (dst) {
1092 memcpy(mgmt->da, dst, ETH_ALEN);
1093 memcpy(mgmt->bssid, dst, ETH_ALEN);
1094 } else {
1095 memset(mgmt->da, 0xff, ETH_ALEN);
1096 memset(mgmt->bssid, 0xff, ETH_ALEN);
1097 }
1098 pos = skb_put(skb, 2 + ssid_len);
1099 *pos++ = WLAN_EID_SSID;
1100 *pos++ = ssid_len;
1101 memcpy(pos, ssid, ssid_len);
1102 622
1103 supp_rates = skb_put(skb, 2); 623static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1104 supp_rates[0] = WLAN_EID_SUPP_RATES; 624 struct ieee80211_bss *bss)
1105 supp_rates[1] = 0; 625{
1106 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 626 u32 changed = 0;
1107 627
1108 for (i = 0; i < sband->n_bitrates; i++) { 628 if (bss->has_erp_value)
1109 struct ieee80211_rate *rate = &sband->bitrates[i]; 629 changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value);
1110 if (esupp_rates) { 630 else {
1111 pos = skb_put(skb, 1); 631 u16 capab = bss->capability;
1112 esupp_rates[1]++; 632 changed |= ieee80211_handle_protect_preamb(sdata, false,
1113 } else if (supp_rates[1] == 8) { 633 (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0);
1114 esupp_rates = skb_put(skb, 3);
1115 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
1116 esupp_rates[1] = 1;
1117 pos = &esupp_rates[2];
1118 } else {
1119 pos = skb_put(skb, 1);
1120 supp_rates[1]++;
1121 }
1122 *pos = rate->bitrate / 5;
1123 } 634 }
1124 635
1125 ieee80211_sta_tx(dev, skb, 0); 636 return changed;
1126} 637}
1127 638
639static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata,
640 struct ieee80211_if_sta *ifsta)
641{
642 union iwreq_data wrqu;
643 memset(&wrqu, 0, sizeof(wrqu));
644 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
645 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
646 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
647 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
648}
1128 649
1129static int ieee80211_sta_wep_configured(struct net_device *dev) 650static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
651 struct ieee80211_if_sta *ifsta)
1130{ 652{
1131 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 653 union iwreq_data wrqu;
1132 if (!sdata || !sdata->default_key || 654
1133 sdata->default_key->conf.alg != ALG_WEP) 655 if (ifsta->assocreq_ies) {
1134 return 0; 656 memset(&wrqu, 0, sizeof(wrqu));
1135 return 1; 657 wrqu.data.length = ifsta->assocreq_ies_len;
658 wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
659 ifsta->assocreq_ies);
660 }
661 if (ifsta->assocresp_ies) {
662 memset(&wrqu, 0, sizeof(wrqu));
663 wrqu.data.length = ifsta->assocresp_ies_len;
664 wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
665 ifsta->assocresp_ies);
666 }
1136} 667}
1137 668
1138 669
1139static void ieee80211_auth_completed(struct net_device *dev, 670static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1140 struct ieee80211_if_sta *ifsta) 671 struct ieee80211_if_sta *ifsta)
1141{ 672{
1142 printk(KERN_DEBUG "%s: authenticated\n", dev->name); 673 struct ieee80211_local *local = sdata->local;
1143 ifsta->flags |= IEEE80211_STA_AUTHENTICATED; 674 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
1144 ieee80211_associate(dev, ifsta); 675 u32 changed = BSS_CHANGED_ASSOC;
1145}
1146 676
677 struct ieee80211_bss *bss;
1147 678
1148static void ieee80211_auth_challenge(struct net_device *dev, 679 ifsta->flags |= IEEE80211_STA_ASSOCIATED;
1149 struct ieee80211_if_sta *ifsta,
1150 struct ieee80211_mgmt *mgmt,
1151 size_t len)
1152{
1153 u8 *pos;
1154 struct ieee802_11_elems elems;
1155 680
1156 pos = mgmt->u.auth.variable; 681 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1157 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1158 if (!elems.challenge)
1159 return; 682 return;
1160 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2,
1161 elems.challenge_len + 2, 1);
1162}
1163 683
1164static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, 684 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1165 u8 dialog_token, u16 status, u16 policy, 685 conf->channel->center_freq,
1166 u16 buf_size, u16 timeout) 686 ifsta->ssid, ifsta->ssid_len);
1167{ 687 if (bss) {
1168 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 688 /* set timing information */
1169 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 689 sdata->bss_conf.beacon_int = bss->beacon_int;
1170 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 690 sdata->bss_conf.timestamp = bss->timestamp;
1171 struct sk_buff *skb; 691 sdata->bss_conf.dtim_period = bss->dtim_period;
1172 struct ieee80211_mgmt *mgmt;
1173 u16 capab;
1174 692
1175 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 693 changed |= ieee80211_handle_bss_capability(sdata, bss);
1176 694
1177 if (!skb) { 695 ieee80211_rx_bss_put(local, bss);
1178 printk(KERN_DEBUG "%s: failed to allocate buffer "
1179 "for addba resp frame\n", dev->name);
1180 return;
1181 } 696 }
1182 697
1183 skb_reserve(skb, local->hw.extra_tx_headroom); 698 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
1184 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 699 changed |= BSS_CHANGED_HT;
1185 memset(mgmt, 0, 24); 700 sdata->bss_conf.assoc_ht = 1;
1186 memcpy(mgmt->da, da, ETH_ALEN); 701 sdata->bss_conf.ht_conf = &conf->ht_conf;
1187 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 702 sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
1188 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 703 }
1189 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1190 else
1191 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1192 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1193 IEEE80211_STYPE_ACTION);
1194 704
1195 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); 705 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
1196 mgmt->u.action.category = WLAN_CATEGORY_BACK; 706 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
1197 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; 707 ieee80211_sta_send_associnfo(sdata, ifsta);
1198 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
1199 708
1200 capab = (u16)(policy << 1); /* bit 1 aggregation policy */ 709 ifsta->last_probe = jiffies;
1201 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 710 ieee80211_led_assoc(local, 1);
1202 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
1203 711
1204 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); 712 sdata->bss_conf.assoc = 1;
1205 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); 713 /*
1206 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); 714 * For now just always ask the driver to update the basic rateset
715 * when we have associated, we aren't checking whether it actually
716 * changed or not.
717 */
718 changed |= BSS_CHANGED_BASIC_RATES;
719 ieee80211_bss_info_change_notify(sdata, changed);
1207 720
1208 ieee80211_sta_tx(dev, skb, 0); 721 netif_tx_start_all_queues(sdata->dev);
722 netif_carrier_on(sdata->dev);
1209 723
1210 return; 724 ieee80211_sta_send_apinfo(sdata, ifsta);
1211} 725}
1212 726
1213void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, 727static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
1214 u16 tid, u8 dialog_token, u16 start_seq_num, 728 struct ieee80211_if_sta *ifsta)
1215 u16 agg_size, u16 timeout)
1216{ 729{
1217 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 730 DECLARE_MAC_BUF(mac);
1218 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1219 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1220 struct sk_buff *skb;
1221 struct ieee80211_mgmt *mgmt;
1222 u16 capab;
1223
1224 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1225 731
1226 if (!skb) { 732 ifsta->direct_probe_tries++;
1227 printk(KERN_ERR "%s: failed to allocate buffer " 733 if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
1228 "for addba request frame\n", dev->name); 734 printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n",
735 sdata->dev->name, print_mac(mac, ifsta->bssid));
736 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1229 return; 737 return;
1230 } 738 }
1231 skb_reserve(skb, local->hw.extra_tx_headroom);
1232 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1233 memset(mgmt, 0, 24);
1234 memcpy(mgmt->da, da, ETH_ALEN);
1235 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1236 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1237 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1238 else
1239 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1240
1241 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1242 IEEE80211_STYPE_ACTION);
1243 739
1244 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 740 printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n",
741 sdata->dev->name, print_mac(mac, ifsta->bssid),
742 ifsta->direct_probe_tries);
1245 743
1246 mgmt->u.action.category = WLAN_CATEGORY_BACK; 744 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1247 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
1248 745
1249 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 746 set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request);
1250 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
1251 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
1252 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
1253 747
1254 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab); 748 /* Direct probe is sent to broadcast address as some APs
1255 749 * will not answer to direct packet in unassociated state.
1256 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout); 750 */
1257 mgmt->u.action.u.addba_req.start_seq_num = 751 ieee80211_send_probe_req(sdata, NULL,
1258 cpu_to_le16(start_seq_num << 4); 752 ifsta->ssid, ifsta->ssid_len);
1259 753
1260 ieee80211_sta_tx(dev, skb, 0); 754 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
1261} 755}
1262 756
1263static void ieee80211_sta_process_addba_request(struct net_device *dev, 757
1264 struct ieee80211_mgmt *mgmt, 758static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
1265 size_t len) 759 struct ieee80211_if_sta *ifsta)
1266{ 760{
1267 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1268 struct ieee80211_hw *hw = &local->hw;
1269 struct ieee80211_conf *conf = &hw->conf;
1270 struct sta_info *sta;
1271 struct tid_ampdu_rx *tid_agg_rx;
1272 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
1273 u8 dialog_token;
1274 int ret = -EOPNOTSUPP;
1275 DECLARE_MAC_BUF(mac); 761 DECLARE_MAC_BUF(mac);
1276 762
1277 rcu_read_lock(); 763 ifsta->auth_tries++;
1278 764 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
1279 sta = sta_info_get(local, mgmt->sa); 765 printk(KERN_DEBUG "%s: authentication with AP %s"
1280 if (!sta) { 766 " timed out\n",
1281 rcu_read_unlock(); 767 sdata->dev->name, print_mac(mac, ifsta->bssid));
768 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1282 return; 769 return;
1283 } 770 }
1284 771
1285 /* extract session parameters from addba request frame */ 772 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
1286 dialog_token = mgmt->u.action.u.addba_req.dialog_token; 773 printk(KERN_DEBUG "%s: authenticate with AP %s\n",
1287 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); 774 sdata->dev->name, print_mac(mac, ifsta->bssid));
1288 start_seq_num =
1289 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
1290
1291 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
1292 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
1293 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1294 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
1295
1296 status = WLAN_STATUS_REQUEST_DECLINED;
1297
1298 /* sanity check for incoming parameters:
1299 * check if configuration can support the BA policy
1300 * and if buffer size does not exceeds max value */
1301 if (((ba_policy != 1)
1302 && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
1303 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
1304 status = WLAN_STATUS_INVALID_QOS_PARAM;
1305#ifdef CONFIG_MAC80211_HT_DEBUG
1306 if (net_ratelimit())
1307 printk(KERN_DEBUG "AddBA Req with bad params from "
1308 "%s on tid %u. policy %d, buffer size %d\n",
1309 print_mac(mac, mgmt->sa), tid, ba_policy,
1310 buf_size);
1311#endif /* CONFIG_MAC80211_HT_DEBUG */
1312 goto end_no_lock;
1313 }
1314 /* determine default buffer size */
1315 if (buf_size == 0) {
1316 struct ieee80211_supported_band *sband;
1317
1318 sband = local->hw.wiphy->bands[conf->channel->band];
1319 buf_size = IEEE80211_MIN_AMPDU_BUF;
1320 buf_size = buf_size << sband->ht_info.ampdu_factor;
1321 }
1322
1323 775
1324 /* examine state machine */ 776 ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0);
1325 spin_lock_bh(&sta->lock);
1326 777
1327 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 778 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
1328#ifdef CONFIG_MAC80211_HT_DEBUG
1329 if (net_ratelimit())
1330 printk(KERN_DEBUG "unexpected AddBA Req from "
1331 "%s on tid %u\n",
1332 print_mac(mac, mgmt->sa), tid);
1333#endif /* CONFIG_MAC80211_HT_DEBUG */
1334 goto end;
1335 }
1336
1337 /* prepare A-MPDU MLME for Rx aggregation */
1338 sta->ampdu_mlme.tid_rx[tid] =
1339 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1340 if (!sta->ampdu_mlme.tid_rx[tid]) {
1341#ifdef CONFIG_MAC80211_HT_DEBUG
1342 if (net_ratelimit())
1343 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1344 tid);
1345#endif
1346 goto end;
1347 }
1348 /* rx timer */
1349 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
1350 sta_rx_agg_session_timer_expired;
1351 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
1352 (unsigned long)&sta->timer_to_tid[tid];
1353 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1354
1355 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
1356
1357 /* prepare reordering buffer */
1358 tid_agg_rx->reorder_buf =
1359 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
1360 if (!tid_agg_rx->reorder_buf) {
1361#ifdef CONFIG_MAC80211_HT_DEBUG
1362 if (net_ratelimit())
1363 printk(KERN_ERR "can not allocate reordering buffer "
1364 "to tid %d\n", tid);
1365#endif
1366 kfree(sta->ampdu_mlme.tid_rx[tid]);
1367 goto end;
1368 }
1369 memset(tid_agg_rx->reorder_buf, 0,
1370 buf_size * sizeof(struct sk_buff *));
1371
1372 if (local->ops->ampdu_action)
1373 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
1374 sta->addr, tid, &start_seq_num);
1375#ifdef CONFIG_MAC80211_HT_DEBUG
1376 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
1377#endif /* CONFIG_MAC80211_HT_DEBUG */
1378
1379 if (ret) {
1380 kfree(tid_agg_rx->reorder_buf);
1381 kfree(tid_agg_rx);
1382 sta->ampdu_mlme.tid_rx[tid] = NULL;
1383 goto end;
1384 }
1385
1386 /* change state and send addba resp */
1387 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
1388 tid_agg_rx->dialog_token = dialog_token;
1389 tid_agg_rx->ssn = start_seq_num;
1390 tid_agg_rx->head_seq_num = start_seq_num;
1391 tid_agg_rx->buf_size = buf_size;
1392 tid_agg_rx->timeout = timeout;
1393 tid_agg_rx->stored_mpdu_num = 0;
1394 status = WLAN_STATUS_SUCCESS;
1395end:
1396 spin_unlock_bh(&sta->lock);
1397
1398end_no_lock:
1399 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
1400 dialog_token, status, 1, buf_size, timeout);
1401 rcu_read_unlock();
1402} 779}
1403 780
1404static void ieee80211_sta_process_addba_resp(struct net_device *dev, 781static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1405 struct ieee80211_mgmt *mgmt, 782 struct ieee80211_if_sta *ifsta, bool deauth,
1406 size_t len) 783 bool self_disconnected, u16 reason)
1407{ 784{
1408 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 785 struct ieee80211_local *local = sdata->local;
1409 struct ieee80211_hw *hw = &local->hw;
1410 struct sta_info *sta; 786 struct sta_info *sta;
1411 u16 capab; 787 u32 changed = BSS_CHANGED_ASSOC;
1412 u16 tid;
1413 u8 *state;
1414 788
1415 rcu_read_lock(); 789 rcu_read_lock();
1416 790
1417 sta = sta_info_get(local, mgmt->sa); 791 sta = sta_info_get(local, ifsta->bssid);
1418 if (!sta) { 792 if (!sta) {
1419 rcu_read_unlock(); 793 rcu_read_unlock();
1420 return; 794 return;
1421 } 795 }
1422 796
1423 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 797 if (deauth) {
1424 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 798 ifsta->direct_probe_tries = 0;
799 ifsta->auth_tries = 0;
800 }
801 ifsta->assoc_scan_tries = 0;
802 ifsta->assoc_tries = 0;
1425 803
1426 state = &sta->ampdu_mlme.tid_state_tx[tid]; 804 netif_tx_stop_all_queues(sdata->dev);
805 netif_carrier_off(sdata->dev);
1427 806
1428 spin_lock_bh(&sta->lock); 807 ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr);
1429 808
1430 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 809 if (self_disconnected) {
1431 spin_unlock_bh(&sta->lock); 810 if (deauth)
1432 goto addba_resp_exit; 811 ieee80211_send_deauth_disassoc(sdata,
812 IEEE80211_STYPE_DEAUTH, reason);
813 else
814 ieee80211_send_deauth_disassoc(sdata,
815 IEEE80211_STYPE_DISASSOC, reason);
1433 } 816 }
1434 817
1435 if (mgmt->u.action.u.addba_resp.dialog_token != 818 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
1436 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 819 changed |= ieee80211_reset_erp_info(sdata);
1437 spin_unlock_bh(&sta->lock);
1438#ifdef CONFIG_MAC80211_HT_DEBUG
1439 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1440#endif /* CONFIG_MAC80211_HT_DEBUG */
1441 goto addba_resp_exit;
1442 }
1443 820
1444 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 821 if (sdata->bss_conf.assoc_ht)
1445#ifdef CONFIG_MAC80211_HT_DEBUG 822 changed |= BSS_CHANGED_HT;
1446 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1447#endif /* CONFIG_MAC80211_HT_DEBUG */
1448 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1449 == WLAN_STATUS_SUCCESS) {
1450 *state |= HT_ADDBA_RECEIVED_MSK;
1451 sta->ampdu_mlme.addba_req_num[tid] = 0;
1452 823
1453 if (*state == HT_AGG_STATE_OPERATIONAL) 824 sdata->bss_conf.assoc_ht = 0;
1454 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 825 sdata->bss_conf.ht_conf = NULL;
826 sdata->bss_conf.ht_bss_conf = NULL;
1455 827
1456 spin_unlock_bh(&sta->lock); 828 ieee80211_led_assoc(local, 0);
1457 } else { 829 sdata->bss_conf.assoc = 0;
1458 sta->ampdu_mlme.addba_req_num[tid]++; 830
1459 /* this will allow the state check in stop_BA_session */ 831 ieee80211_sta_send_apinfo(sdata, ifsta);
1460 *state = HT_AGG_STATE_OPERATIONAL; 832
1461 spin_unlock_bh(&sta->lock); 833 if (self_disconnected)
1462 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 834 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1463 WLAN_BACK_INITIATOR); 835
1464 } 836 sta_info_unlink(&sta);
1465 837
1466addba_resp_exit:
1467 rcu_read_unlock(); 838 rcu_read_unlock();
839
840 sta_info_destroy(sta);
1468} 841}
1469 842
1470void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 843static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata)
1471 u16 initiator, u16 reason_code)
1472{ 844{
1473 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 845 if (!sdata || !sdata->default_key ||
1474 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 846 sdata->default_key->conf.alg != ALG_WEP)
1475 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 847 return 0;
1476 struct sk_buff *skb; 848 return 1;
1477 struct ieee80211_mgmt *mgmt; 849}
1478 u16 params;
1479
1480 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1481
1482 if (!skb) {
1483 printk(KERN_ERR "%s: failed to allocate buffer "
1484 "for delba frame\n", dev->name);
1485 return;
1486 }
1487 850
1488 skb_reserve(skb, local->hw.extra_tx_headroom); 851static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata,
1489 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 852 struct ieee80211_if_sta *ifsta)
1490 memset(mgmt, 0, 24); 853{
1491 memcpy(mgmt->da, da, ETH_ALEN); 854 struct ieee80211_local *local = sdata->local;
1492 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 855 struct ieee80211_bss *bss;
1493 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 856 int bss_privacy;
1494 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); 857 int wep_privacy;
1495 else 858 int privacy_invoked;
1496 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1497 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1498 IEEE80211_STYPE_ACTION);
1499 859
1500 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); 860 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
861 return 0;
1501 862
1502 mgmt->u.action.category = WLAN_CATEGORY_BACK; 863 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1503 mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; 864 local->hw.conf.channel->center_freq,
1504 params = (u16)(initiator << 11); /* bit 11 initiator */ 865 ifsta->ssid, ifsta->ssid_len);
1505 params |= (u16)(tid << 12); /* bit 15:12 TID number */ 866 if (!bss)
867 return 0;
1506 868
1507 mgmt->u.action.u.delba.params = cpu_to_le16(params); 869 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
1508 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); 870 wep_privacy = !!ieee80211_sta_wep_configured(sdata);
871 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
1509 872
1510 ieee80211_sta_tx(dev, skb, 0); 873 ieee80211_rx_bss_put(local, bss);
1511}
1512 874
1513void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) 875 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
1514{ 876 return 0;
1515 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1516 struct sk_buff *skb;
1517 struct ieee80211_bar *bar;
1518 u16 bar_control = 0;
1519 877
1520 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 878 return 1;
1521 if (!skb) {
1522 printk(KERN_ERR "%s: failed to allocate buffer for "
1523 "bar frame\n", dev->name);
1524 return;
1525 }
1526 skb_reserve(skb, local->hw.extra_tx_headroom);
1527 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
1528 memset(bar, 0, sizeof(*bar));
1529 bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL,
1530 IEEE80211_STYPE_BACK_REQ);
1531 memcpy(bar->ra, ra, ETH_ALEN);
1532 memcpy(bar->ta, dev->dev_addr, ETH_ALEN);
1533 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
1534 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
1535 bar_control |= (u16)(tid << 12);
1536 bar->control = cpu_to_le16(bar_control);
1537 bar->start_seq_num = cpu_to_le16(ssn);
1538
1539 ieee80211_sta_tx(dev, skb, 0);
1540} 879}
1541 880
1542void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, 881static void ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1543 u16 initiator, u16 reason) 882 struct ieee80211_if_sta *ifsta)
1544{ 883{
1545 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1546 struct ieee80211_hw *hw = &local->hw;
1547 struct sta_info *sta;
1548 int ret, i;
1549 DECLARE_MAC_BUF(mac); 884 DECLARE_MAC_BUF(mac);
1550 885
1551 rcu_read_lock(); 886 ifsta->assoc_tries++;
1552 887 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
1553 sta = sta_info_get(local, ra); 888 printk(KERN_DEBUG "%s: association with AP %s"
1554 if (!sta) { 889 " timed out\n",
1555 rcu_read_unlock(); 890 sdata->dev->name, print_mac(mac, ifsta->bssid));
891 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1556 return; 892 return;
1557 } 893 }
1558 894
1559 /* check if TID is in operational state */ 895 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1560 spin_lock_bh(&sta->lock); 896 printk(KERN_DEBUG "%s: associate with AP %s\n",
1561 if (sta->ampdu_mlme.tid_state_rx[tid] 897 sdata->dev->name, print_mac(mac, ifsta->bssid));
1562 != HT_AGG_STATE_OPERATIONAL) { 898 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
1563 spin_unlock_bh(&sta->lock); 899 printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
1564 rcu_read_unlock(); 900 "mixed-cell disabled - abort association\n", sdata->dev->name);
901 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1565 return; 902 return;
1566 } 903 }
1567 sta->ampdu_mlme.tid_state_rx[tid] =
1568 HT_AGG_STATE_REQ_STOP_BA_MSK |
1569 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1570 spin_unlock_bh(&sta->lock);
1571
1572 /* stop HW Rx aggregation. ampdu_action existence
1573 * already verified in session init so we add the BUG_ON */
1574 BUG_ON(!local->ops->ampdu_action);
1575
1576#ifdef CONFIG_MAC80211_HT_DEBUG
1577 printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
1578 print_mac(mac, ra), tid);
1579#endif /* CONFIG_MAC80211_HT_DEBUG */
1580
1581 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
1582 ra, tid, NULL);
1583 if (ret)
1584 printk(KERN_DEBUG "HW problem - can not stop rx "
1585 "aggregation for tid %d\n", tid);
1586
1587 /* shutdown timer has not expired */
1588 if (initiator != WLAN_BACK_TIMER)
1589 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1590
1591 /* check if this is a self generated aggregation halt */
1592 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
1593 ieee80211_send_delba(dev, ra, tid, 0, reason);
1594
1595 /* free the reordering buffer */
1596 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
1597 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
1598 /* release the reordered frames */
1599 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
1600 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
1601 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
1602 }
1603 }
1604 /* free resources */
1605 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
1606 kfree(sta->ampdu_mlme.tid_rx[tid]);
1607 sta->ampdu_mlme.tid_rx[tid] = NULL;
1608 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
1609 904
1610 rcu_read_unlock(); 905 ieee80211_send_assoc(sdata, ifsta);
906
907 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
1611} 908}
1612 909
1613 910
1614static void ieee80211_sta_process_delba(struct net_device *dev, 911static void ieee80211_associated(struct ieee80211_sub_if_data *sdata,
1615 struct ieee80211_mgmt *mgmt, size_t len) 912 struct ieee80211_if_sta *ifsta)
1616{ 913{
1617 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 914 struct ieee80211_local *local = sdata->local;
1618 struct sta_info *sta; 915 struct sta_info *sta;
1619 u16 tid, params; 916 int disassoc;
1620 u16 initiator;
1621 DECLARE_MAC_BUF(mac); 917 DECLARE_MAC_BUF(mac);
1622 918
1623 rcu_read_lock(); 919 /* TODO: start monitoring current AP signal quality and number of
1624 920 * missed beacons. Scan other channels every now and then and search
1625 sta = sta_info_get(local, mgmt->sa); 921 * for better APs. */
1626 if (!sta) { 922 /* TODO: remove expired BSSes */
1627 rcu_read_unlock();
1628 return;
1629 }
1630
1631 params = le16_to_cpu(mgmt->u.action.u.delba.params);
1632 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
1633 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
1634
1635#ifdef CONFIG_MAC80211_HT_DEBUG
1636 if (net_ratelimit())
1637 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
1638 print_mac(mac, mgmt->sa),
1639 initiator ? "initiator" : "recipient", tid,
1640 mgmt->u.action.u.delba.reason_code);
1641#endif /* CONFIG_MAC80211_HT_DEBUG */
1642
1643 if (initiator == WLAN_BACK_INITIATOR)
1644 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1645 WLAN_BACK_INITIATOR, 0);
1646 else { /* WLAN_BACK_RECIPIENT */
1647 spin_lock_bh(&sta->lock);
1648 sta->ampdu_mlme.tid_state_tx[tid] =
1649 HT_AGG_STATE_OPERATIONAL;
1650 spin_unlock_bh(&sta->lock);
1651 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1652 WLAN_BACK_RECIPIENT);
1653 }
1654 rcu_read_unlock();
1655}
1656 923
1657/* 924 ifsta->state = IEEE80211_STA_MLME_ASSOCIATED;
1658 * After sending add Block Ack request we activated a timer until
1659 * add Block Ack response will arrive from the recipient.
1660 * If this timer expires sta_addba_resp_timer_expired will be executed.
1661 */
1662void sta_addba_resp_timer_expired(unsigned long data)
1663{
1664 /* not an elegant detour, but there is no choice as the timer passes
1665 * only one argument, and both sta_info and TID are needed, so init
1666 * flow in sta_info_create gives the TID as data, while the timer_to_id
1667 * array gives the sta through container_of */
1668 u16 tid = *(u8 *)data;
1669 struct sta_info *temp_sta = container_of((void *)data,
1670 struct sta_info, timer_to_tid[tid]);
1671
1672 struct ieee80211_local *local = temp_sta->local;
1673 struct ieee80211_hw *hw = &local->hw;
1674 struct sta_info *sta;
1675 u8 *state;
1676 925
1677 rcu_read_lock(); 926 rcu_read_lock();
1678 927
1679 sta = sta_info_get(local, temp_sta->addr); 928 sta = sta_info_get(local, ifsta->bssid);
1680 if (!sta) { 929 if (!sta) {
1681 rcu_read_unlock(); 930 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
1682 return; 931 sdata->dev->name, print_mac(mac, ifsta->bssid));
1683 } 932 disassoc = 1;
1684 933 } else {
1685 state = &sta->ampdu_mlme.tid_state_tx[tid]; 934 disassoc = 0;
1686 /* check if the TID waits for addBA response */ 935 if (time_after(jiffies,
1687 spin_lock_bh(&sta->lock); 936 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
1688 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 937 if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) {
1689 spin_unlock_bh(&sta->lock); 938 printk(KERN_DEBUG "%s: No ProbeResp from "
1690 *state = HT_AGG_STATE_IDLE; 939 "current AP %s - assume out of "
1691#ifdef CONFIG_MAC80211_HT_DEBUG 940 "range\n",
1692 printk(KERN_DEBUG "timer expired on tid %d but we are not " 941 sdata->dev->name, print_mac(mac, ifsta->bssid));
1693 "expecting addBA response there", tid); 942 disassoc = 1;
1694#endif 943 } else
1695 goto timer_expired_exit; 944 ieee80211_send_probe_req(sdata, ifsta->bssid,
945 local->scan_ssid,
946 local->scan_ssid_len);
947 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
948 } else {
949 ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
950 if (time_after(jiffies, ifsta->last_probe +
951 IEEE80211_PROBE_INTERVAL)) {
952 ifsta->last_probe = jiffies;
953 ieee80211_send_probe_req(sdata, ifsta->bssid,
954 ifsta->ssid,
955 ifsta->ssid_len);
956 }
957 }
1696 } 958 }
1697 959
1698#ifdef CONFIG_MAC80211_HT_DEBUG
1699 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1700#endif
1701
1702 /* go through the state check in stop_BA_session */
1703 *state = HT_AGG_STATE_OPERATIONAL;
1704 spin_unlock_bh(&sta->lock);
1705 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1706 WLAN_BACK_INITIATOR);
1707
1708timer_expired_exit:
1709 rcu_read_unlock(); 960 rcu_read_unlock();
1710}
1711 961
1712/* 962 if (disassoc)
1713 * After accepting the AddBA Request we activated a timer, 963 ieee80211_set_disassoc(sdata, ifsta, true, true,
1714 * resetting it after each frame that arrives from the originator. 964 WLAN_REASON_PREV_AUTH_NOT_VALID);
1715 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 965 else
1716 */ 966 mod_timer(&ifsta->timer, jiffies +
1717static void sta_rx_agg_session_timer_expired(unsigned long data) 967 IEEE80211_MONITORING_INTERVAL);
1718{
1719 /* not an elegant detour, but there is no choice as the timer passes
1720 * only one argument, and various sta_info are needed here, so init
1721 * flow in sta_info_create gives the TID as data, while the timer_to_id
1722 * array gives the sta through container_of */
1723 u8 *ptid = (u8 *)data;
1724 u8 *timer_to_id = ptid - *ptid;
1725 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
1726 timer_to_tid[0]);
1727
1728#ifdef CONFIG_MAC80211_HT_DEBUG
1729 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1730#endif
1731 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1732 (u16)*ptid, WLAN_BACK_TIMER,
1733 WLAN_REASON_QSTA_TIMEOUT);
1734} 968}
1735 969
1736void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
1737{
1738 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1739 int i;
1740 970
1741 for (i = 0; i < STA_TID_NUM; i++) { 971static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1742 ieee80211_stop_tx_ba_session(&local->hw, addr, i, 972 struct ieee80211_if_sta *ifsta)
1743 WLAN_BACK_INITIATOR); 973{
1744 ieee80211_sta_stop_rx_ba_session(dev, addr, i, 974 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1745 WLAN_BACK_RECIPIENT, 975 ifsta->flags |= IEEE80211_STA_AUTHENTICATED;
1746 WLAN_REASON_QSTA_LEAVE_QBSS); 976 ieee80211_associate(sdata, ifsta);
1747 }
1748} 977}
1749 978
1750static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1751 struct ieee80211_msrment_ie *request_ie,
1752 const u8 *da, const u8 *bssid,
1753 u8 dialog_token)
1754{
1755 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1756 struct sk_buff *skb;
1757 struct ieee80211_mgmt *msr_report;
1758 979
1759 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + 980static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1760 sizeof(struct ieee80211_msrment_ie)); 981 struct ieee80211_if_sta *ifsta,
982 struct ieee80211_mgmt *mgmt,
983 size_t len)
984{
985 u8 *pos;
986 struct ieee802_11_elems elems;
1761 987
1762 if (!skb) { 988 pos = mgmt->u.auth.variable;
1763 printk(KERN_ERR "%s: failed to allocate buffer for " 989 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1764 "measurement report frame\n", dev->name); 990 if (!elems.challenge)
1765 return; 991 return;
1766 } 992 ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2,
1767 993 elems.challenge_len + 2, 1);
1768 skb_reserve(skb, local->hw.extra_tx_headroom);
1769 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
1770 memset(msr_report, 0, 24);
1771 memcpy(msr_report->da, da, ETH_ALEN);
1772 memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN);
1773 memcpy(msr_report->bssid, bssid, ETH_ALEN);
1774 msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1775 IEEE80211_STYPE_ACTION);
1776
1777 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
1778 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
1779 msr_report->u.action.u.measurement.action_code =
1780 WLAN_ACTION_SPCT_MSR_RPRT;
1781 msr_report->u.action.u.measurement.dialog_token = dialog_token;
1782
1783 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
1784 msr_report->u.action.u.measurement.length =
1785 sizeof(struct ieee80211_msrment_ie);
1786
1787 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
1788 sizeof(struct ieee80211_msrment_ie));
1789 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
1790 msr_report->u.action.u.measurement.msr_elem.mode |=
1791 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
1792 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
1793
1794 ieee80211_sta_tx(dev, skb, 0);
1795}
1796
1797static void ieee80211_sta_process_measurement_req(struct net_device *dev,
1798 struct ieee80211_mgmt *mgmt,
1799 size_t len)
1800{
1801 /*
1802 * Ignoring measurement request is spec violation.
1803 * Mandatory measurements must be reported optional
1804 * measurements might be refused or reported incapable
1805 * For now just refuse
1806 * TODO: Answer basic measurement as unmeasured
1807 */
1808 ieee80211_send_refuse_measurement_request(dev,
1809 &mgmt->u.action.u.measurement.msr_elem,
1810 mgmt->sa, mgmt->bssid,
1811 mgmt->u.action.u.measurement.dialog_token);
1812} 994}
1813 995
1814 996static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1815static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1816 struct ieee80211_if_sta *ifsta, 997 struct ieee80211_if_sta *ifsta,
1817 struct ieee80211_mgmt *mgmt, 998 struct ieee80211_mgmt *mgmt,
1818 size_t len) 999 size_t len)
1819{ 1000{
1820 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1821 u16 auth_alg, auth_transaction, status_code; 1001 u16 auth_alg, auth_transaction, status_code;
1822 DECLARE_MAC_BUF(mac); 1002 DECLARE_MAC_BUF(mac);
1823 1003
1824 if (ifsta->state != IEEE80211_AUTHENTICATE && 1004 if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
1825 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 1005 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1826 return; 1006 return;
1827 1007
1828 if (len < 24 + 6) 1008 if (len < 24 + 6)
1829 return; 1009 return;
1830 1010
1831 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1011 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1832 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) 1012 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1833 return; 1013 return;
1834 1014
1835 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1015 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1836 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 1016 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1837 return; 1017 return;
1838 1018
@@ -1840,7 +1020,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1840 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 1020 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1841 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1021 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1842 1022
1843 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1023 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1844 /* 1024 /*
1845 * IEEE 802.11 standard does not require authentication in IBSS 1025 * IEEE 802.11 standard does not require authentication in IBSS
1846 * networks and most implementations do not seem to use it. 1026 * networks and most implementations do not seem to use it.
@@ -1849,7 +1029,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1849 */ 1029 */
1850 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 1030 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1851 return; 1031 return;
1852 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); 1032 ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
1853 } 1033 }
1854 1034
1855 if (auth_alg != ifsta->auth_alg || 1035 if (auth_alg != ifsta->auth_alg ||
@@ -1882,7 +1062,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1882 algs[pos] == 0xff) 1062 algs[pos] == 0xff)
1883 continue; 1063 continue;
1884 if (algs[pos] == WLAN_AUTH_SHARED_KEY && 1064 if (algs[pos] == WLAN_AUTH_SHARED_KEY &&
1885 !ieee80211_sta_wep_configured(dev)) 1065 !ieee80211_sta_wep_configured(sdata))
1886 continue; 1066 continue;
1887 ifsta->auth_alg = algs[pos]; 1067 ifsta->auth_alg = algs[pos];
1888 break; 1068 break;
@@ -1894,19 +1074,19 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1894 switch (ifsta->auth_alg) { 1074 switch (ifsta->auth_alg) {
1895 case WLAN_AUTH_OPEN: 1075 case WLAN_AUTH_OPEN:
1896 case WLAN_AUTH_LEAP: 1076 case WLAN_AUTH_LEAP:
1897 ieee80211_auth_completed(dev, ifsta); 1077 ieee80211_auth_completed(sdata, ifsta);
1898 break; 1078 break;
1899 case WLAN_AUTH_SHARED_KEY: 1079 case WLAN_AUTH_SHARED_KEY:
1900 if (ifsta->auth_transaction == 4) 1080 if (ifsta->auth_transaction == 4)
1901 ieee80211_auth_completed(dev, ifsta); 1081 ieee80211_auth_completed(sdata, ifsta);
1902 else 1082 else
1903 ieee80211_auth_challenge(dev, ifsta, mgmt, len); 1083 ieee80211_auth_challenge(sdata, ifsta, mgmt, len);
1904 break; 1084 break;
1905 } 1085 }
1906} 1086}
1907 1087
1908 1088
1909static void ieee80211_rx_mgmt_deauth(struct net_device *dev, 1089static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1910 struct ieee80211_if_sta *ifsta, 1090 struct ieee80211_if_sta *ifsta,
1911 struct ieee80211_mgmt *mgmt, 1091 struct ieee80211_mgmt *mgmt,
1912 size_t len) 1092 size_t len)
@@ -1923,22 +1103,22 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1923 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1103 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1924 1104
1925 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) 1105 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1926 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1106 printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name);
1927 1107
1928 if (ifsta->state == IEEE80211_AUTHENTICATE || 1108 if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE ||
1929 ifsta->state == IEEE80211_ASSOCIATE || 1109 ifsta->state == IEEE80211_STA_MLME_ASSOCIATE ||
1930 ifsta->state == IEEE80211_ASSOCIATED) { 1110 ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1931 ifsta->state = IEEE80211_AUTHENTICATE; 1111 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1932 mod_timer(&ifsta->timer, jiffies + 1112 mod_timer(&ifsta->timer, jiffies +
1933 IEEE80211_RETRY_AUTH_INTERVAL); 1113 IEEE80211_RETRY_AUTH_INTERVAL);
1934 } 1114 }
1935 1115
1936 ieee80211_set_disassoc(dev, ifsta, 1); 1116 ieee80211_set_disassoc(sdata, ifsta, true, false, 0);
1937 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; 1117 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED;
1938} 1118}
1939 1119
1940 1120
1941static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, 1121static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1942 struct ieee80211_if_sta *ifsta, 1122 struct ieee80211_if_sta *ifsta,
1943 struct ieee80211_mgmt *mgmt, 1123 struct ieee80211_mgmt *mgmt,
1944 size_t len) 1124 size_t len)
@@ -1955,15 +1135,15 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
1955 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1135 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1956 1136
1957 if (ifsta->flags & IEEE80211_STA_ASSOCIATED) 1137 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
1958 printk(KERN_DEBUG "%s: disassociated\n", dev->name); 1138 printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name);
1959 1139
1960 if (ifsta->state == IEEE80211_ASSOCIATED) { 1140 if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1961 ifsta->state = IEEE80211_ASSOCIATE; 1141 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1962 mod_timer(&ifsta->timer, jiffies + 1142 mod_timer(&ifsta->timer, jiffies +
1963 IEEE80211_RETRY_AUTH_INTERVAL); 1143 IEEE80211_RETRY_AUTH_INTERVAL);
1964 } 1144 }
1965 1145
1966 ieee80211_set_disassoc(dev, ifsta, 0); 1146 ieee80211_set_disassoc(sdata, ifsta, false, false, 0);
1967} 1147}
1968 1148
1969 1149
@@ -1974,7 +1154,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1974 int reassoc) 1154 int reassoc)
1975{ 1155{
1976 struct ieee80211_local *local = sdata->local; 1156 struct ieee80211_local *local = sdata->local;
1977 struct net_device *dev = sdata->dev;
1978 struct ieee80211_supported_band *sband; 1157 struct ieee80211_supported_band *sband;
1979 struct sta_info *sta; 1158 struct sta_info *sta;
1980 u64 rates, basic_rates; 1159 u64 rates, basic_rates;
@@ -1989,7 +1168,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1989 /* AssocResp and ReassocResp have identical structure, so process both 1168 /* AssocResp and ReassocResp have identical structure, so process both
1990 * of them in this function. */ 1169 * of them in this function. */
1991 1170
1992 if (ifsta->state != IEEE80211_ASSOCIATE) 1171 if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE)
1993 return; 1172 return;
1994 1173
1995 if (len < 24 + 6) 1174 if (len < 24 + 6)
@@ -2004,12 +1183,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2004 1183
2005 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " 1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
2006 "status=%d aid=%d)\n", 1185 "status=%d aid=%d)\n",
2007 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), 1186 sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
2008 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 1187 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2009 1188
2010 if (status_code != WLAN_STATUS_SUCCESS) { 1189 if (status_code != WLAN_STATUS_SUCCESS) {
2011 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
2012 dev->name, status_code); 1191 sdata->dev->name, status_code);
2013 /* if this was a reassociation, ensure we try a "full" 1192 /* if this was a reassociation, ensure we try a "full"
2014 * association next time. This works around some broken APs 1193 * association next time. This works around some broken APs
2015 * which do not correctly reject reassociation requests. */ 1194 * which do not correctly reject reassociation requests. */
@@ -2019,7 +1198,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2019 1198
2020 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1199 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
2021 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1200 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
2022 "set\n", dev->name, aid); 1201 "set\n", sdata->dev->name, aid);
2023 aid &= ~(BIT(15) | BIT(14)); 1202 aid &= ~(BIT(15) | BIT(14));
2024 1203
2025 pos = mgmt->u.assoc_resp.variable; 1204 pos = mgmt->u.assoc_resp.variable;
@@ -2027,11 +1206,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2027 1206
2028 if (!elems.supp_rates) { 1207 if (!elems.supp_rates) {
2029 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1208 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
2030 dev->name); 1209 sdata->dev->name);
2031 return; 1210 return;
2032 } 1211 }
2033 1212
2034 printk(KERN_DEBUG "%s: associated\n", dev->name); 1213 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
2035 ifsta->aid = aid; 1214 ifsta->aid = aid;
2036 ifsta->ap_capab = capab_info; 1215 ifsta->ap_capab = capab_info;
2037 1216
@@ -2046,17 +1225,17 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2046 /* Add STA entry for the AP */ 1225 /* Add STA entry for the AP */
2047 sta = sta_info_get(local, ifsta->bssid); 1226 sta = sta_info_get(local, ifsta->bssid);
2048 if (!sta) { 1227 if (!sta) {
2049 struct ieee80211_sta_bss *bss; 1228 struct ieee80211_bss *bss;
2050 int err; 1229 int err;
2051 1230
2052 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); 1231 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
2053 if (!sta) { 1232 if (!sta) {
2054 printk(KERN_DEBUG "%s: failed to alloc STA entry for" 1233 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
2055 " the AP\n", dev->name); 1234 " the AP\n", sdata->dev->name);
2056 rcu_read_unlock(); 1235 rcu_read_unlock();
2057 return; 1236 return;
2058 } 1237 }
2059 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 1238 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
2060 local->hw.conf.channel->center_freq, 1239 local->hw.conf.channel->center_freq,
2061 ifsta->ssid, ifsta->ssid_len); 1240 ifsta->ssid, ifsta->ssid_len);
2062 if (bss) { 1241 if (bss) {
@@ -2069,7 +1248,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2069 err = sta_info_insert(sta); 1248 err = sta_info_insert(sta);
2070 if (err) { 1249 if (err) {
2071 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1250 printk(KERN_DEBUG "%s: failed to insert STA entry for"
2072 " the AP (error %d)\n", dev->name, err); 1251 " the AP (error %d)\n", sdata->dev->name, err);
2073 rcu_read_unlock(); 1252 rcu_read_unlock();
2074 return; 1253 return;
2075 } 1254 }
@@ -2122,8 +1301,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2122 } 1301 }
2123 } 1302 }
2124 1303
2125 sta->supp_rates[local->hw.conf.channel->band] = rates; 1304 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
2126 sdata->basic_rates = basic_rates; 1305 sdata->bss_conf.basic_rates = basic_rates;
2127 1306
2128 /* cf. IEEE 802.11 9.2.12 */ 1307 /* cf. IEEE 802.11 9.2.12 */
2129 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 1308 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
@@ -2137,11 +1316,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2137 struct ieee80211_ht_bss_info bss_info; 1316 struct ieee80211_ht_bss_info bss_info;
2138 ieee80211_ht_cap_ie_to_ht_info( 1317 ieee80211_ht_cap_ie_to_ht_info(
2139 (struct ieee80211_ht_cap *) 1318 (struct ieee80211_ht_cap *)
2140 elems.ht_cap_elem, &sta->ht_info); 1319 elems.ht_cap_elem, &sta->sta.ht_info);
2141 ieee80211_ht_addt_info_ie_to_ht_bss_info( 1320 ieee80211_ht_addt_info_ie_to_ht_bss_info(
2142 (struct ieee80211_ht_addt_info *) 1321 (struct ieee80211_ht_addt_info *)
2143 elems.ht_info_elem, &bss_info); 1322 elems.ht_info_elem, &bss_info);
2144 ieee80211_handle_ht(local, 1, &sta->ht_info, &bss_info); 1323 ieee80211_handle_ht(local, 1, &sta->sta.ht_info, &bss_info);
2145 } 1324 }
2146 1325
2147 rate_control_rate_init(sta, local); 1326 rate_control_rate_init(sta, local);
@@ -2149,7 +1328,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2149 if (elems.wmm_param) { 1328 if (elems.wmm_param) {
2150 set_sta_flags(sta, WLAN_STA_WME); 1329 set_sta_flags(sta, WLAN_STA_WME);
2151 rcu_read_unlock(); 1330 rcu_read_unlock();
2152 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 1331 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2153 elems.wmm_param_len); 1332 elems.wmm_param_len);
2154 } else 1333 } else
2155 rcu_read_unlock(); 1334 rcu_read_unlock();
@@ -2158,234 +1337,26 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2158 * ieee80211_set_associated() will tell the driver */ 1337 * ieee80211_set_associated() will tell the driver */
2159 bss_conf->aid = aid; 1338 bss_conf->aid = aid;
2160 bss_conf->assoc_capability = capab_info; 1339 bss_conf->assoc_capability = capab_info;
2161 ieee80211_set_associated(dev, ifsta, 1); 1340 ieee80211_set_associated(sdata, ifsta);
2162
2163 ieee80211_associated(dev, ifsta);
2164}
2165
2166
2167/* Caller must hold local->sta_bss_lock */
2168static void __ieee80211_rx_bss_hash_add(struct net_device *dev,
2169 struct ieee80211_sta_bss *bss)
2170{
2171 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2172 u8 hash_idx;
2173
2174 if (bss_mesh_cfg(bss))
2175 hash_idx = mesh_id_hash(bss_mesh_id(bss),
2176 bss_mesh_id_len(bss));
2177 else
2178 hash_idx = STA_HASH(bss->bssid);
2179
2180 bss->hnext = local->sta_bss_hash[hash_idx];
2181 local->sta_bss_hash[hash_idx] = bss;
2182}
2183
2184
2185/* Caller must hold local->sta_bss_lock */
2186static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
2187 struct ieee80211_sta_bss *bss)
2188{
2189 struct ieee80211_sta_bss *b, *prev = NULL;
2190 b = local->sta_bss_hash[STA_HASH(bss->bssid)];
2191 while (b) {
2192 if (b == bss) {
2193 if (!prev)
2194 local->sta_bss_hash[STA_HASH(bss->bssid)] =
2195 bss->hnext;
2196 else
2197 prev->hnext = bss->hnext;
2198 break;
2199 }
2200 prev = b;
2201 b = b->hnext;
2202 }
2203}
2204
2205
2206static struct ieee80211_sta_bss *
2207ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
2208 u8 *ssid, u8 ssid_len)
2209{
2210 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2211 struct ieee80211_sta_bss *bss;
2212
2213 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
2214 if (!bss)
2215 return NULL;
2216 atomic_inc(&bss->users);
2217 atomic_inc(&bss->users);
2218 memcpy(bss->bssid, bssid, ETH_ALEN);
2219 bss->freq = freq;
2220 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
2221 memcpy(bss->ssid, ssid, ssid_len);
2222 bss->ssid_len = ssid_len;
2223 }
2224
2225 spin_lock_bh(&local->sta_bss_lock);
2226 /* TODO: order by RSSI? */
2227 list_add_tail(&bss->list, &local->sta_bss_list);
2228 __ieee80211_rx_bss_hash_add(dev, bss);
2229 spin_unlock_bh(&local->sta_bss_lock);
2230 return bss;
2231}
2232 1341
2233static struct ieee80211_sta_bss * 1342 ieee80211_associated(sdata, ifsta);
2234ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
2235 u8 *ssid, u8 ssid_len)
2236{
2237 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2238 struct ieee80211_sta_bss *bss;
2239
2240 spin_lock_bh(&local->sta_bss_lock);
2241 bss = local->sta_bss_hash[STA_HASH(bssid)];
2242 while (bss) {
2243 if (!bss_mesh_cfg(bss) &&
2244 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
2245 bss->freq == freq &&
2246 bss->ssid_len == ssid_len &&
2247 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
2248 atomic_inc(&bss->users);
2249 break;
2250 }
2251 bss = bss->hnext;
2252 }
2253 spin_unlock_bh(&local->sta_bss_lock);
2254 return bss;
2255} 1343}
2256 1344
2257#ifdef CONFIG_MAC80211_MESH
2258static struct ieee80211_sta_bss *
2259ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2260 u8 *mesh_cfg, int freq)
2261{
2262 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2263 struct ieee80211_sta_bss *bss;
2264
2265 spin_lock_bh(&local->sta_bss_lock);
2266 bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
2267 while (bss) {
2268 if (bss_mesh_cfg(bss) &&
2269 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
2270 bss->freq == freq &&
2271 mesh_id_len == bss->mesh_id_len &&
2272 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
2273 mesh_id_len))) {
2274 atomic_inc(&bss->users);
2275 break;
2276 }
2277 bss = bss->hnext;
2278 }
2279 spin_unlock_bh(&local->sta_bss_lock);
2280 return bss;
2281}
2282
2283static struct ieee80211_sta_bss *
2284ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2285 u8 *mesh_cfg, int mesh_config_len, int freq)
2286{
2287 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2288 struct ieee80211_sta_bss *bss;
2289
2290 if (mesh_config_len != MESH_CFG_LEN)
2291 return NULL;
2292
2293 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
2294 if (!bss)
2295 return NULL;
2296
2297 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
2298 if (!bss->mesh_cfg) {
2299 kfree(bss);
2300 return NULL;
2301 }
2302
2303 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
2304 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
2305 if (!bss->mesh_id) {
2306 kfree(bss->mesh_cfg);
2307 kfree(bss);
2308 return NULL;
2309 }
2310 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
2311 }
2312
2313 atomic_inc(&bss->users);
2314 atomic_inc(&bss->users);
2315 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
2316 bss->mesh_id_len = mesh_id_len;
2317 bss->freq = freq;
2318 spin_lock_bh(&local->sta_bss_lock);
2319 /* TODO: order by RSSI? */
2320 list_add_tail(&bss->list, &local->sta_bss_list);
2321 __ieee80211_rx_bss_hash_add(dev, bss);
2322 spin_unlock_bh(&local->sta_bss_lock);
2323 return bss;
2324}
2325#endif
2326 1345
2327static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss) 1346static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
2328{
2329 kfree(bss->wpa_ie);
2330 kfree(bss->rsn_ie);
2331 kfree(bss->wmm_ie);
2332 kfree(bss->ht_ie);
2333 kfree(bss->ht_add_ie);
2334 kfree(bss_mesh_id(bss));
2335 kfree(bss_mesh_cfg(bss));
2336 kfree(bss);
2337}
2338
2339
2340static void ieee80211_rx_bss_put(struct ieee80211_local *local,
2341 struct ieee80211_sta_bss *bss)
2342{
2343 local_bh_disable();
2344 if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) {
2345 local_bh_enable();
2346 return;
2347 }
2348
2349 __ieee80211_rx_bss_hash_del(local, bss);
2350 list_del(&bss->list);
2351 spin_unlock_bh(&local->sta_bss_lock);
2352 ieee80211_rx_bss_free(bss);
2353}
2354
2355
2356void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
2357{
2358 spin_lock_init(&local->sta_bss_lock);
2359 INIT_LIST_HEAD(&local->sta_bss_list);
2360}
2361
2362
2363void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
2364{
2365 struct ieee80211_sta_bss *bss, *tmp;
2366
2367 list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list)
2368 ieee80211_rx_bss_put(local, bss);
2369}
2370
2371
2372static int ieee80211_sta_join_ibss(struct net_device *dev,
2373 struct ieee80211_if_sta *ifsta, 1347 struct ieee80211_if_sta *ifsta,
2374 struct ieee80211_sta_bss *bss) 1348 struct ieee80211_bss *bss)
2375{ 1349{
2376 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1350 struct ieee80211_local *local = sdata->local;
2377 int res, rates, i, j; 1351 int res, rates, i, j;
2378 struct sk_buff *skb; 1352 struct sk_buff *skb;
2379 struct ieee80211_mgmt *mgmt; 1353 struct ieee80211_mgmt *mgmt;
2380 u8 *pos; 1354 u8 *pos;
2381 struct ieee80211_sub_if_data *sdata;
2382 struct ieee80211_supported_band *sband; 1355 struct ieee80211_supported_band *sband;
2383 union iwreq_data wrqu; 1356 union iwreq_data wrqu;
2384 1357
2385 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1358 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2386 1359
2387 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2388
2389 /* Remove possible STA entries from other IBSS networks. */ 1360 /* Remove possible STA entries from other IBSS networks. */
2390 sta_info_flush_delayed(sdata); 1361 sta_info_flush_delayed(sdata);
2391 1362
@@ -2403,7 +1374,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2403 sdata->drop_unencrypted = bss->capability & 1374 sdata->drop_unencrypted = bss->capability &
2404 WLAN_CAPABILITY_PRIVACY ? 1 : 0; 1375 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2405 1376
2406 res = ieee80211_set_freq(dev, bss->freq); 1377 res = ieee80211_set_freq(sdata, bss->freq);
2407 1378
2408 if (res) 1379 if (res)
2409 return res; 1380 return res;
@@ -2416,10 +1387,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2416 mgmt = (struct ieee80211_mgmt *) 1387 mgmt = (struct ieee80211_mgmt *)
2417 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 1388 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2418 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 1389 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2419 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1390 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2420 IEEE80211_STYPE_PROBE_RESP); 1391 IEEE80211_STYPE_PROBE_RESP);
2421 memset(mgmt->da, 0xff, ETH_ALEN); 1392 memset(mgmt->da, 0xff, ETH_ALEN);
2422 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1393 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2423 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1394 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2424 mgmt->u.beacon.beacon_int = 1395 mgmt->u.beacon.beacon_int =
2425 cpu_to_le16(local->hw.conf.beacon_int); 1396 cpu_to_le16(local->hw.conf.beacon_int);
@@ -2476,108 +1447,36 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2476 } 1447 }
2477 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; 1448 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2478 1449
2479 ieee80211_sta_def_wmm_params(dev, bss, 1); 1450 ieee80211_sta_def_wmm_params(sdata, bss);
2480 1451
2481 ifsta->state = IEEE80211_IBSS_JOINED; 1452 ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED;
2482 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 1453 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2483 1454
2484 memset(&wrqu, 0, sizeof(wrqu)); 1455 memset(&wrqu, 0, sizeof(wrqu));
2485 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); 1456 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
2486 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 1457 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
2487 1458
2488 return res; 1459 return res;
2489} 1460}
2490 1461
2491u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 1462static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2492 struct ieee802_11_elems *elems,
2493 enum ieee80211_band band)
2494{
2495 struct ieee80211_supported_band *sband;
2496 struct ieee80211_rate *bitrates;
2497 size_t num_rates;
2498 u64 supp_rates;
2499 int i, j;
2500 sband = local->hw.wiphy->bands[band];
2501
2502 if (!sband) {
2503 WARN_ON(1);
2504 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2505 }
2506
2507 bitrates = sband->bitrates;
2508 num_rates = sband->n_bitrates;
2509 supp_rates = 0;
2510 for (i = 0; i < elems->supp_rates_len +
2511 elems->ext_supp_rates_len; i++) {
2512 u8 rate = 0;
2513 int own_rate;
2514 if (i < elems->supp_rates_len)
2515 rate = elems->supp_rates[i];
2516 else if (elems->ext_supp_rates)
2517 rate = elems->ext_supp_rates
2518 [i - elems->supp_rates_len];
2519 own_rate = 5 * (rate & 0x7f);
2520 for (j = 0; j < num_rates; j++)
2521 if (bitrates[j].bitrate == own_rate)
2522 supp_rates |= BIT(j);
2523 }
2524 return supp_rates;
2525}
2526
2527
2528static void ieee80211_rx_bss_info(struct net_device *dev,
2529 struct ieee80211_mgmt *mgmt, 1463 struct ieee80211_mgmt *mgmt,
2530 size_t len, 1464 size_t len,
2531 struct ieee80211_rx_status *rx_status, 1465 struct ieee80211_rx_status *rx_status,
2532 struct ieee802_11_elems *elems, 1466 struct ieee802_11_elems *elems,
2533 int beacon) 1467 bool beacon)
2534{ 1468{
2535 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1469 struct ieee80211_local *local = sdata->local;
2536 int freq, clen; 1470 int freq;
2537 struct ieee80211_sta_bss *bss; 1471 struct ieee80211_bss *bss;
2538 struct sta_info *sta; 1472 struct sta_info *sta;
2539 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2540 u64 beacon_timestamp, rx_timestamp;
2541 struct ieee80211_channel *channel; 1473 struct ieee80211_channel *channel;
1474 u64 beacon_timestamp, rx_timestamp;
1475 u64 supp_rates = 0;
1476 enum ieee80211_band band = rx_status->band;
2542 DECLARE_MAC_BUF(mac); 1477 DECLARE_MAC_BUF(mac);
2543 DECLARE_MAC_BUF(mac2); 1478 DECLARE_MAC_BUF(mac2);
2544 1479
2545 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
2546 return; /* ignore ProbeResp to foreign address */
2547
2548 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2549
2550 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
2551 elems->mesh_config && mesh_matches_local(elems, dev)) {
2552 u64 rates = ieee80211_sta_get_rates(local, elems,
2553 rx_status->band);
2554
2555 mesh_neighbour_update(mgmt->sa, rates, dev,
2556 mesh_peer_accepts_plinks(elems, dev));
2557 }
2558
2559 rcu_read_lock();
2560
2561 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
2562 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
2563 (sta = sta_info_get(local, mgmt->sa))) {
2564 u64 prev_rates;
2565 u64 supp_rates = ieee80211_sta_get_rates(local, elems,
2566 rx_status->band);
2567
2568 prev_rates = sta->supp_rates[rx_status->band];
2569 sta->supp_rates[rx_status->band] &= supp_rates;
2570 if (sta->supp_rates[rx_status->band] == 0) {
2571 /* No matching rates - this should not really happen.
2572 * Make sure that at least one rate is marked
2573 * supported to avoid issues with TX rate ctrl. */
2574 sta->supp_rates[rx_status->band] =
2575 sdata->u.sta.supp_rates_bits[rx_status->band];
2576 }
2577 }
2578
2579 rcu_read_unlock();
2580
2581 if (elems->ds_params && elems->ds_params_len == 1) 1480 if (elems->ds_params && elems->ds_params_len == 1)
2582 freq = ieee80211_channel_to_frequency(elems->ds_params[0]); 1481 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
2583 else 1482 else
@@ -2588,215 +1487,60 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2588 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) 1487 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
2589 return; 1488 return;
2590 1489
2591#ifdef CONFIG_MAC80211_MESH 1490 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && elems->supp_rates &&
2592 if (elems->mesh_config) 1491 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) {
2593 bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, 1492 supp_rates = ieee80211_sta_get_rates(local, elems, band);
2594 elems->mesh_id_len, elems->mesh_config, freq);
2595 else
2596#endif
2597 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
2598 elems->ssid, elems->ssid_len);
2599 if (!bss) {
2600#ifdef CONFIG_MAC80211_MESH
2601 if (elems->mesh_config)
2602 bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id,
2603 elems->mesh_id_len, elems->mesh_config,
2604 elems->mesh_config_len, freq);
2605 else
2606#endif
2607 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
2608 elems->ssid, elems->ssid_len);
2609 if (!bss)
2610 return;
2611 } else {
2612#if 0
2613 /* TODO: order by RSSI? */
2614 spin_lock_bh(&local->sta_bss_lock);
2615 list_move_tail(&bss->list, &local->sta_bss_list);
2616 spin_unlock_bh(&local->sta_bss_lock);
2617#endif
2618 }
2619 1493
2620 /* save the ERP value so that it is available at association time */ 1494 rcu_read_lock();
2621 if (elems->erp_info && elems->erp_info_len >= 1) {
2622 bss->erp_value = elems->erp_info[0];
2623 bss->has_erp_value = 1;
2624 }
2625 1495
2626 if (elems->ht_cap_elem && 1496 sta = sta_info_get(local, mgmt->sa);
2627 (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len || 1497 if (sta) {
2628 memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) { 1498 u64 prev_rates;
2629 kfree(bss->ht_ie);
2630 bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
2631 if (bss->ht_ie) {
2632 memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
2633 elems->ht_cap_elem_len + 2);
2634 bss->ht_ie_len = elems->ht_cap_elem_len + 2;
2635 } else
2636 bss->ht_ie_len = 0;
2637 } else if (!elems->ht_cap_elem && bss->ht_ie) {
2638 kfree(bss->ht_ie);
2639 bss->ht_ie = NULL;
2640 bss->ht_ie_len = 0;
2641 }
2642 1499
2643 if (elems->ht_info_elem && 1500 prev_rates = sta->sta.supp_rates[band];
2644 (!bss->ht_add_ie || 1501 /* make sure mandatory rates are always added */
2645 bss->ht_add_ie_len != elems->ht_info_elem_len || 1502 sta->sta.supp_rates[band] = supp_rates |
2646 memcmp(bss->ht_add_ie, elems->ht_info_elem, 1503 ieee80211_mandatory_rates(local, band);
2647 elems->ht_info_elem_len))) {
2648 kfree(bss->ht_add_ie);
2649 bss->ht_add_ie =
2650 kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
2651 if (bss->ht_add_ie) {
2652 memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
2653 elems->ht_info_elem_len + 2);
2654 bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
2655 } else
2656 bss->ht_add_ie_len = 0;
2657 } else if (!elems->ht_info_elem && bss->ht_add_ie) {
2658 kfree(bss->ht_add_ie);
2659 bss->ht_add_ie = NULL;
2660 bss->ht_add_ie_len = 0;
2661 }
2662 1504
2663 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 1505#ifdef CONFIG_MAC80211_IBSS_DEBUG
2664 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 1506 if (sta->sta.supp_rates[band] != prev_rates)
1507 printk(KERN_DEBUG "%s: updated supp_rates set "
1508 "for %s based on beacon info (0x%llx | "
1509 "0x%llx -> 0x%llx)\n",
1510 sdata->dev->name,
1511 print_mac(mac, sta->sta.addr),
1512 (unsigned long long) prev_rates,
1513 (unsigned long long) supp_rates,
1514 (unsigned long long) sta->sta.supp_rates[band]);
1515#endif
1516 } else {
1517 ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid,
1518 mgmt->sa, supp_rates);
1519 }
2665 1520
2666 if (elems->tim) { 1521 rcu_read_unlock();
2667 struct ieee80211_tim_ie *tim_ie =
2668 (struct ieee80211_tim_ie *)elems->tim;
2669 bss->dtim_period = tim_ie->dtim_period;
2670 } 1522 }
2671 1523
2672 /* set default value for buggy APs */ 1524 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
2673 if (!elems->tim || bss->dtim_period == 0) 1525 freq, beacon);
2674 bss->dtim_period = 1; 1526 if (!bss)
2675 1527 return;
2676 bss->supp_rates_len = 0;
2677 if (elems->supp_rates) {
2678 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2679 if (clen > elems->supp_rates_len)
2680 clen = elems->supp_rates_len;
2681 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
2682 clen);
2683 bss->supp_rates_len += clen;
2684 }
2685 if (elems->ext_supp_rates) {
2686 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2687 if (clen > elems->ext_supp_rates_len)
2688 clen = elems->ext_supp_rates_len;
2689 memcpy(&bss->supp_rates[bss->supp_rates_len],
2690 elems->ext_supp_rates, clen);
2691 bss->supp_rates_len += clen;
2692 }
2693 1528
2694 bss->band = rx_status->band; 1529 /* was just updated in ieee80211_bss_info_update */
2695 1530 beacon_timestamp = bss->timestamp;
2696 bss->timestamp = beacon_timestamp;
2697 bss->last_update = jiffies;
2698 bss->signal = rx_status->signal;
2699 bss->noise = rx_status->noise;
2700 bss->qual = rx_status->qual;
2701 if (!beacon && !bss->probe_resp)
2702 bss->probe_resp = true;
2703 1531
2704 /* 1532 /*
2705 * In STA mode, the remaining parameters should not be overridden 1533 * In STA mode, the remaining parameters should not be overridden
2706 * by beacons because they're not necessarily accurate there. 1534 * by beacons because they're not necessarily accurate there.
2707 */ 1535 */
2708 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1536 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2709 bss->probe_resp && beacon) { 1537 bss->last_probe_resp && beacon) {
2710 ieee80211_rx_bss_put(local, bss); 1538 ieee80211_rx_bss_put(local, bss);
2711 return; 1539 return;
2712 } 1540 }
2713 1541
2714 if (elems->wpa &&
2715 (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len ||
2716 memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) {
2717 kfree(bss->wpa_ie);
2718 bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
2719 if (bss->wpa_ie) {
2720 memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
2721 bss->wpa_ie_len = elems->wpa_len + 2;
2722 } else
2723 bss->wpa_ie_len = 0;
2724 } else if (!elems->wpa && bss->wpa_ie) {
2725 kfree(bss->wpa_ie);
2726 bss->wpa_ie = NULL;
2727 bss->wpa_ie_len = 0;
2728 }
2729
2730 if (elems->rsn &&
2731 (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
2732 memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
2733 kfree(bss->rsn_ie);
2734 bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
2735 if (bss->rsn_ie) {
2736 memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
2737 bss->rsn_ie_len = elems->rsn_len + 2;
2738 } else
2739 bss->rsn_ie_len = 0;
2740 } else if (!elems->rsn && bss->rsn_ie) {
2741 kfree(bss->rsn_ie);
2742 bss->rsn_ie = NULL;
2743 bss->rsn_ie_len = 0;
2744 }
2745
2746 /*
2747 * Cf.
2748 * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC
2749 *
2750 * quoting:
2751 *
2752 * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia
2753 * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi
2754 * Alliance (September 1, 2004) is incorporated by reference herein.
2755 * The inclusion of the WMM Parameters in probe responses and
2756 * association responses is mandatory for WMM enabled networks. The
2757 * inclusion of the WMM Parameters in beacons, however, is optional.
2758 */
2759
2760 if (elems->wmm_param &&
2761 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
2762 memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
2763 kfree(bss->wmm_ie);
2764 bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
2765 if (bss->wmm_ie) {
2766 memcpy(bss->wmm_ie, elems->wmm_param - 2,
2767 elems->wmm_param_len + 2);
2768 bss->wmm_ie_len = elems->wmm_param_len + 2;
2769 } else
2770 bss->wmm_ie_len = 0;
2771 } else if (elems->wmm_info &&
2772 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
2773 memcmp(bss->wmm_ie, elems->wmm_info,
2774 elems->wmm_info_len))) {
2775 /* As for certain AP's Fifth bit is not set in WMM IE in
2776 * beacon frames.So while parsing the beacon frame the
2777 * wmm_info structure is used instead of wmm_param.
2778 * wmm_info structure was never used to set bss->wmm_ie.
2779 * This code fixes this problem by copying the WME
2780 * information from wmm_info to bss->wmm_ie and enabling
2781 * n-band association.
2782 */
2783 kfree(bss->wmm_ie);
2784 bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
2785 if (bss->wmm_ie) {
2786 memcpy(bss->wmm_ie, elems->wmm_info - 2,
2787 elems->wmm_info_len + 2);
2788 bss->wmm_ie_len = elems->wmm_info_len + 2;
2789 } else
2790 bss->wmm_ie_len = 0;
2791 } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
2792 kfree(bss->wmm_ie);
2793 bss->wmm_ie = NULL;
2794 bss->wmm_ie_len = 0;
2795 }
2796
2797 /* check if we need to merge IBSS */ 1542 /* check if we need to merge IBSS */
2798 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && 1543 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && beacon &&
2799 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2800 bss->capability & WLAN_CAPABILITY_IBSS && 1544 bss->capability & WLAN_CAPABILITY_IBSS &&
2801 bss->freq == local->oper_channel->center_freq && 1545 bss->freq == local->oper_channel->center_freq &&
2802 elems->ssid_len == sdata->u.sta.ssid_len && 1546 elems->ssid_len == sdata->u.sta.ssid_len &&
@@ -2818,7 +1562,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2818 * e.g: at 1 MBit that means mactime is 192 usec earlier 1562 * e.g: at 1 MBit that means mactime is 192 usec earlier
2819 * (=24 bytes * 8 usecs/byte) than the beacon timestamp. 1563 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
2820 */ 1564 */
2821 int rate = local->hw.wiphy->bands[rx_status->band]-> 1565 int rate = local->hw.wiphy->bands[band]->
2822 bitrates[rx_status->rate_idx].bitrate; 1566 bitrates[rx_status->rate_idx].bitrate;
2823 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); 1567 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
2824 } else if (local && local->ops && local->ops->get_tsf) 1568 } else if (local && local->ops && local->ops->get_tsf)
@@ -2841,12 +1585,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2841#ifdef CONFIG_MAC80211_IBSS_DEBUG 1585#ifdef CONFIG_MAC80211_IBSS_DEBUG
2842 printk(KERN_DEBUG "%s: beacon TSF higher than " 1586 printk(KERN_DEBUG "%s: beacon TSF higher than "
2843 "local TSF - IBSS merge with BSSID %s\n", 1587 "local TSF - IBSS merge with BSSID %s\n",
2844 dev->name, print_mac(mac, mgmt->bssid)); 1588 sdata->dev->name, print_mac(mac, mgmt->bssid));
2845#endif 1589#endif
2846 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); 1590 ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss);
2847 ieee80211_ibss_add_sta(dev, NULL, 1591 ieee80211_ibss_add_sta(sdata, NULL,
2848 mgmt->bssid, mgmt->sa, 1592 mgmt->bssid, mgmt->sa,
2849 BIT(rx_status->rate_idx)); 1593 supp_rates);
2850 } 1594 }
2851 } 1595 }
2852 1596
@@ -2854,13 +1598,17 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2854} 1598}
2855 1599
2856 1600
2857static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, 1601static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2858 struct ieee80211_mgmt *mgmt, 1602 struct ieee80211_mgmt *mgmt,
2859 size_t len, 1603 size_t len,
2860 struct ieee80211_rx_status *rx_status) 1604 struct ieee80211_rx_status *rx_status)
2861{ 1605{
2862 size_t baselen; 1606 size_t baselen;
2863 struct ieee802_11_elems elems; 1607 struct ieee802_11_elems elems;
1608 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1609
1610 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
1611 return; /* ignore ProbeResp to foreign address */
2864 1612
2865 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1613 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
2866 if (baselen > len) 1614 if (baselen > len)
@@ -2869,20 +1617,27 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
2869 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 1617 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2870 &elems); 1618 &elems);
2871 1619
2872 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); 1620 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1621
1622 /* direct probe may be part of the association flow */
1623 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
1624 &ifsta->request)) {
1625 printk(KERN_DEBUG "%s direct probe responded\n",
1626 sdata->dev->name);
1627 ieee80211_authenticate(sdata, ifsta);
1628 }
2873} 1629}
2874 1630
2875 1631
2876static void ieee80211_rx_mgmt_beacon(struct net_device *dev, 1632static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2877 struct ieee80211_mgmt *mgmt, 1633 struct ieee80211_mgmt *mgmt,
2878 size_t len, 1634 size_t len,
2879 struct ieee80211_rx_status *rx_status) 1635 struct ieee80211_rx_status *rx_status)
2880{ 1636{
2881 struct ieee80211_sub_if_data *sdata;
2882 struct ieee80211_if_sta *ifsta; 1637 struct ieee80211_if_sta *ifsta;
2883 size_t baselen; 1638 size_t baselen;
2884 struct ieee802_11_elems elems; 1639 struct ieee802_11_elems elems;
2885 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1640 struct ieee80211_local *local = sdata->local;
2886 struct ieee80211_conf *conf = &local->hw.conf; 1641 struct ieee80211_conf *conf = &local->hw.conf;
2887 u32 changed = 0; 1642 u32 changed = 0;
2888 1643
@@ -2893,10 +1648,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2893 1648
2894 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 1649 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2895 1650
2896 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); 1651 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
2897 1652
2898 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1653 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2899 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
2900 return; 1654 return;
2901 ifsta = &sdata->u.sta; 1655 ifsta = &sdata->u.sta;
2902 1656
@@ -2904,15 +1658,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2904 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 1658 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
2905 return; 1659 return;
2906 1660
2907 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 1661 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2908 elems.wmm_param_len); 1662 elems.wmm_param_len);
2909 1663
2910 /* Do not send changes to driver if we are scanning. This removes
2911 * requirement that driver's bss_info_changed function needs to be
2912 * atomic. */
2913 if (local->sta_sw_scanning || local->sta_hw_scanning)
2914 return;
2915
2916 if (elems.erp_info && elems.erp_info_len >= 1) 1664 if (elems.erp_info && elems.erp_info_len >= 1)
2917 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); 1665 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]);
2918 else { 1666 else {
@@ -2936,14 +1684,13 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2936} 1684}
2937 1685
2938 1686
2939static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, 1687static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
2940 struct ieee80211_if_sta *ifsta, 1688 struct ieee80211_if_sta *ifsta,
2941 struct ieee80211_mgmt *mgmt, 1689 struct ieee80211_mgmt *mgmt,
2942 size_t len, 1690 size_t len,
2943 struct ieee80211_rx_status *rx_status) 1691 struct ieee80211_rx_status *rx_status)
2944{ 1692{
2945 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1693 struct ieee80211_local *local = sdata->local;
2946 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2947 int tx_last_beacon; 1694 int tx_last_beacon;
2948 struct sk_buff *skb; 1695 struct sk_buff *skb;
2949 struct ieee80211_mgmt *resp; 1696 struct ieee80211_mgmt *resp;
@@ -2954,8 +1701,8 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2954 DECLARE_MAC_BUF(mac3); 1701 DECLARE_MAC_BUF(mac3);
2955#endif 1702#endif
2956 1703
2957 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || 1704 if (sdata->vif.type != NL80211_IFTYPE_ADHOC ||
2958 ifsta->state != IEEE80211_IBSS_JOINED || 1705 ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
2959 len < 24 + 2 || !ifsta->probe_resp) 1706 len < 24 + 2 || !ifsta->probe_resp)
2960 return; 1707 return;
2961 1708
@@ -2967,7 +1714,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2967#ifdef CONFIG_MAC80211_IBSS_DEBUG 1714#ifdef CONFIG_MAC80211_IBSS_DEBUG
2968 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" 1715 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID="
2969 "%s (tx_last_beacon=%d)\n", 1716 "%s (tx_last_beacon=%d)\n",
2970 dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), 1717 sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da),
2971 print_mac(mac3, mgmt->bssid), tx_last_beacon); 1718 print_mac(mac3, mgmt->bssid), tx_last_beacon);
2972#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 1719#endif /* CONFIG_MAC80211_IBSS_DEBUG */
2973 1720
@@ -2985,7 +1732,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2985#ifdef CONFIG_MAC80211_IBSS_DEBUG 1732#ifdef CONFIG_MAC80211_IBSS_DEBUG
2986 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 1733 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
2987 "from %s\n", 1734 "from %s\n",
2988 dev->name, print_mac(mac, mgmt->sa)); 1735 sdata->dev->name, print_mac(mac, mgmt->sa));
2989#endif 1736#endif
2990 return; 1737 return;
2991 } 1738 }
@@ -3005,74 +1752,15 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
3005 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1752 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3006#ifdef CONFIG_MAC80211_IBSS_DEBUG 1753#ifdef CONFIG_MAC80211_IBSS_DEBUG
3007 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", 1754 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n",
3008 dev->name, print_mac(mac, resp->da)); 1755 sdata->dev->name, print_mac(mac, resp->da));
3009#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 1756#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3010 ieee80211_sta_tx(dev, skb, 0); 1757 ieee80211_tx_skb(sdata, skb, 0);
3011} 1758}
3012 1759
3013static void ieee80211_rx_mgmt_action(struct net_device *dev, 1760void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
3014 struct ieee80211_if_sta *ifsta,
3015 struct ieee80211_mgmt *mgmt,
3016 size_t len,
3017 struct ieee80211_rx_status *rx_status)
3018{
3019 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3020 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3021
3022 if (len < IEEE80211_MIN_ACTION_SIZE)
3023 return;
3024
3025 switch (mgmt->u.action.category) {
3026 case WLAN_CATEGORY_SPECTRUM_MGMT:
3027 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
3028 break;
3029 switch (mgmt->u.action.u.chan_switch.action_code) {
3030 case WLAN_ACTION_SPCT_MSR_REQ:
3031 if (len < (IEEE80211_MIN_ACTION_SIZE +
3032 sizeof(mgmt->u.action.u.measurement)))
3033 break;
3034 ieee80211_sta_process_measurement_req(dev, mgmt, len);
3035 break;
3036 }
3037 break;
3038 case WLAN_CATEGORY_BACK:
3039 switch (mgmt->u.action.u.addba_req.action_code) {
3040 case WLAN_ACTION_ADDBA_REQ:
3041 if (len < (IEEE80211_MIN_ACTION_SIZE +
3042 sizeof(mgmt->u.action.u.addba_req)))
3043 break;
3044 ieee80211_sta_process_addba_request(dev, mgmt, len);
3045 break;
3046 case WLAN_ACTION_ADDBA_RESP:
3047 if (len < (IEEE80211_MIN_ACTION_SIZE +
3048 sizeof(mgmt->u.action.u.addba_resp)))
3049 break;
3050 ieee80211_sta_process_addba_resp(dev, mgmt, len);
3051 break;
3052 case WLAN_ACTION_DELBA:
3053 if (len < (IEEE80211_MIN_ACTION_SIZE +
3054 sizeof(mgmt->u.action.u.delba)))
3055 break;
3056 ieee80211_sta_process_delba(dev, mgmt, len);
3057 break;
3058 }
3059 break;
3060 case PLINK_CATEGORY:
3061 if (ieee80211_vif_is_mesh(&sdata->vif))
3062 mesh_rx_plink_frame(dev, mgmt, len, rx_status);
3063 break;
3064 case MESH_PATH_SEL_CATEGORY:
3065 if (ieee80211_vif_is_mesh(&sdata->vif))
3066 mesh_rx_path_sel_frame(dev, mgmt, len);
3067 break;
3068 }
3069}
3070
3071void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3072 struct ieee80211_rx_status *rx_status) 1761 struct ieee80211_rx_status *rx_status)
3073{ 1762{
3074 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1763 struct ieee80211_local *local = sdata->local;
3075 struct ieee80211_sub_if_data *sdata;
3076 struct ieee80211_if_sta *ifsta; 1764 struct ieee80211_if_sta *ifsta;
3077 struct ieee80211_mgmt *mgmt; 1765 struct ieee80211_mgmt *mgmt;
3078 u16 fc; 1766 u16 fc;
@@ -3080,7 +1768,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3080 if (skb->len < 24) 1768 if (skb->len < 24)
3081 goto fail; 1769 goto fail;
3082 1770
3083 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3084 ifsta = &sdata->u.sta; 1771 ifsta = &sdata->u.sta;
3085 1772
3086 mgmt = (struct ieee80211_mgmt *) skb->data; 1773 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -3090,7 +1777,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3090 case IEEE80211_STYPE_PROBE_REQ: 1777 case IEEE80211_STYPE_PROBE_REQ:
3091 case IEEE80211_STYPE_PROBE_RESP: 1778 case IEEE80211_STYPE_PROBE_RESP:
3092 case IEEE80211_STYPE_BEACON: 1779 case IEEE80211_STYPE_BEACON:
3093 case IEEE80211_STYPE_ACTION:
3094 memcpy(skb->cb, rx_status, sizeof(*rx_status)); 1780 memcpy(skb->cb, rx_status, sizeof(*rx_status));
3095 case IEEE80211_STYPE_AUTH: 1781 case IEEE80211_STYPE_AUTH:
3096 case IEEE80211_STYPE_ASSOC_RESP: 1782 case IEEE80211_STYPE_ASSOC_RESP:
@@ -3106,17 +1792,14 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3106 kfree_skb(skb); 1792 kfree_skb(skb);
3107} 1793}
3108 1794
3109 1795static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
3110static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3111 struct sk_buff *skb) 1796 struct sk_buff *skb)
3112{ 1797{
3113 struct ieee80211_rx_status *rx_status; 1798 struct ieee80211_rx_status *rx_status;
3114 struct ieee80211_sub_if_data *sdata;
3115 struct ieee80211_if_sta *ifsta; 1799 struct ieee80211_if_sta *ifsta;
3116 struct ieee80211_mgmt *mgmt; 1800 struct ieee80211_mgmt *mgmt;
3117 u16 fc; 1801 u16 fc;
3118 1802
3119 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3120 ifsta = &sdata->u.sta; 1803 ifsta = &sdata->u.sta;
3121 1804
3122 rx_status = (struct ieee80211_rx_status *) skb->cb; 1805 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -3125,17 +1808,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3125 1808
3126 switch (fc & IEEE80211_FCTL_STYPE) { 1809 switch (fc & IEEE80211_FCTL_STYPE) {
3127 case IEEE80211_STYPE_PROBE_REQ: 1810 case IEEE80211_STYPE_PROBE_REQ:
3128 ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len, 1811 ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len,
3129 rx_status); 1812 rx_status);
3130 break; 1813 break;
3131 case IEEE80211_STYPE_PROBE_RESP: 1814 case IEEE80211_STYPE_PROBE_RESP:
3132 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); 1815 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status);
3133 break; 1816 break;
3134 case IEEE80211_STYPE_BEACON: 1817 case IEEE80211_STYPE_BEACON:
3135 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); 1818 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
3136 break; 1819 break;
3137 case IEEE80211_STYPE_AUTH: 1820 case IEEE80211_STYPE_AUTH:
3138 ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len); 1821 ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len);
3139 break; 1822 break;
3140 case IEEE80211_STYPE_ASSOC_RESP: 1823 case IEEE80211_STYPE_ASSOC_RESP:
3141 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); 1824 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0);
@@ -3144,13 +1827,10 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3144 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); 1827 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1);
3145 break; 1828 break;
3146 case IEEE80211_STYPE_DEAUTH: 1829 case IEEE80211_STYPE_DEAUTH:
3147 ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len); 1830 ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len);
3148 break; 1831 break;
3149 case IEEE80211_STYPE_DISASSOC: 1832 case IEEE80211_STYPE_DISASSOC:
3150 ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); 1833 ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len);
3151 break;
3152 case IEEE80211_STYPE_ACTION:
3153 ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status);
3154 break; 1834 break;
3155 } 1835 }
3156 1836
@@ -3158,47 +1838,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3158} 1838}
3159 1839
3160 1840
3161ieee80211_rx_result 1841static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
3162ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3163 struct ieee80211_rx_status *rx_status)
3164{
3165 struct ieee80211_mgmt *mgmt;
3166 __le16 fc;
3167
3168 if (skb->len < 2)
3169 return RX_DROP_UNUSABLE;
3170
3171 mgmt = (struct ieee80211_mgmt *) skb->data;
3172 fc = mgmt->frame_control;
3173
3174 if (ieee80211_is_ctl(fc))
3175 return RX_CONTINUE;
3176
3177 if (skb->len < 24)
3178 return RX_DROP_MONITOR;
3179
3180 if (ieee80211_is_probe_resp(fc)) {
3181 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
3182 dev_kfree_skb(skb);
3183 return RX_QUEUED;
3184 }
3185
3186 if (ieee80211_is_beacon(fc)) {
3187 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
3188 dev_kfree_skb(skb);
3189 return RX_QUEUED;
3190 }
3191
3192 return RX_CONTINUE;
3193}
3194
3195
3196static int ieee80211_sta_active_ibss(struct net_device *dev)
3197{ 1842{
3198 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1843 struct ieee80211_local *local = sdata->local;
3199 int active = 0; 1844 int active = 0;
3200 struct sta_info *sta; 1845 struct sta_info *sta;
3201 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3202 1846
3203 rcu_read_lock(); 1847 rcu_read_lock();
3204 1848
@@ -3217,179 +1861,36 @@ static int ieee80211_sta_active_ibss(struct net_device *dev)
3217} 1861}
3218 1862
3219 1863
3220static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) 1864static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata,
3221{
3222 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3223 struct sta_info *sta, *tmp;
3224 LIST_HEAD(tmp_list);
3225 DECLARE_MAC_BUF(mac);
3226 unsigned long flags;
3227
3228 spin_lock_irqsave(&local->sta_lock, flags);
3229 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
3230 if (time_after(jiffies, sta->last_rx + exp_time)) {
3231#ifdef CONFIG_MAC80211_IBSS_DEBUG
3232 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
3233 dev->name, print_mac(mac, sta->addr));
3234#endif
3235 __sta_info_unlink(&sta);
3236 if (sta)
3237 list_add(&sta->list, &tmp_list);
3238 }
3239 spin_unlock_irqrestore(&local->sta_lock, flags);
3240
3241 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
3242 sta_info_destroy(sta);
3243}
3244
3245
3246static void ieee80211_sta_merge_ibss(struct net_device *dev,
3247 struct ieee80211_if_sta *ifsta) 1865 struct ieee80211_if_sta *ifsta)
3248{ 1866{
3249 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 1867 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
3250 1868
3251 ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); 1869 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
3252 if (ieee80211_sta_active_ibss(dev)) 1870 if (ieee80211_sta_active_ibss(sdata))
3253 return; 1871 return;
3254 1872
3255 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 1873 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
3256 "IBSS networks with same SSID (merge)\n", dev->name); 1874 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
3257 ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len); 1875 ieee80211_request_scan(sdata, ifsta->ssid, ifsta->ssid_len);
3258}
3259
3260
3261#ifdef CONFIG_MAC80211_MESH
3262static void ieee80211_mesh_housekeeping(struct net_device *dev,
3263 struct ieee80211_if_sta *ifsta)
3264{
3265 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3266 bool free_plinks;
3267
3268 ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
3269 mesh_path_expire(dev);
3270
3271 free_plinks = mesh_plink_availables(sdata);
3272 if (free_plinks != sdata->u.sta.accepting_plinks)
3273 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3274
3275 mod_timer(&ifsta->timer, jiffies +
3276 IEEE80211_MESH_HOUSEKEEPING_INTERVAL);
3277} 1876}
3278 1877
3279 1878
3280void ieee80211_start_mesh(struct net_device *dev) 1879static void ieee80211_sta_timer(unsigned long data)
3281{
3282 struct ieee80211_if_sta *ifsta;
3283 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3284 ifsta = &sdata->u.sta;
3285 ifsta->state = IEEE80211_MESH_UP;
3286 ieee80211_sta_timer((unsigned long)sdata);
3287 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3288}
3289#endif
3290
3291
3292void ieee80211_sta_timer(unsigned long data)
3293{ 1880{
3294 struct ieee80211_sub_if_data *sdata = 1881 struct ieee80211_sub_if_data *sdata =
3295 (struct ieee80211_sub_if_data *) data; 1882 (struct ieee80211_sub_if_data *) data;
3296 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 1883 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3297 struct ieee80211_local *local = wdev_priv(&sdata->wdev); 1884 struct ieee80211_local *local = sdata->local;
3298 1885
3299 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); 1886 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3300 queue_work(local->hw.workqueue, &ifsta->work); 1887 queue_work(local->hw.workqueue, &ifsta->work);
3301} 1888}
3302 1889
3303void ieee80211_sta_work(struct work_struct *work) 1890static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata,
3304{
3305 struct ieee80211_sub_if_data *sdata =
3306 container_of(work, struct ieee80211_sub_if_data, u.sta.work);
3307 struct net_device *dev = sdata->dev;
3308 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3309 struct ieee80211_if_sta *ifsta;
3310 struct sk_buff *skb;
3311
3312 if (!netif_running(dev))
3313 return;
3314
3315 if (local->sta_sw_scanning || local->sta_hw_scanning)
3316 return;
3317
3318 if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA &&
3319 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
3320 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
3321 return;
3322 ifsta = &sdata->u.sta;
3323
3324 while ((skb = skb_dequeue(&ifsta->skb_queue)))
3325 ieee80211_sta_rx_queued_mgmt(dev, skb);
3326
3327#ifdef CONFIG_MAC80211_MESH
3328 if (ifsta->preq_queue_len &&
3329 time_after(jiffies,
3330 ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval)))
3331 mesh_path_start_discovery(dev);
3332#endif
3333
3334 if (ifsta->state != IEEE80211_AUTHENTICATE &&
3335 ifsta->state != IEEE80211_ASSOCIATE &&
3336 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
3337 if (ifsta->scan_ssid_len)
3338 ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len);
3339 else
3340 ieee80211_sta_start_scan(dev, NULL, 0);
3341 return;
3342 }
3343
3344 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
3345 if (ieee80211_sta_config_auth(dev, ifsta))
3346 return;
3347 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3348 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
3349 return;
3350
3351 switch (ifsta->state) {
3352 case IEEE80211_DISABLED:
3353 break;
3354 case IEEE80211_AUTHENTICATE:
3355 ieee80211_authenticate(dev, ifsta);
3356 break;
3357 case IEEE80211_ASSOCIATE:
3358 ieee80211_associate(dev, ifsta);
3359 break;
3360 case IEEE80211_ASSOCIATED:
3361 ieee80211_associated(dev, ifsta);
3362 break;
3363 case IEEE80211_IBSS_SEARCH:
3364 ieee80211_sta_find_ibss(dev, ifsta);
3365 break;
3366 case IEEE80211_IBSS_JOINED:
3367 ieee80211_sta_merge_ibss(dev, ifsta);
3368 break;
3369#ifdef CONFIG_MAC80211_MESH
3370 case IEEE80211_MESH_UP:
3371 ieee80211_mesh_housekeeping(dev, ifsta);
3372 break;
3373#endif
3374 default:
3375 WARN_ON(1);
3376 break;
3377 }
3378
3379 if (ieee80211_privacy_mismatch(dev, ifsta)) {
3380 printk(KERN_DEBUG "%s: privacy configuration mismatch and "
3381 "mixed-cell disabled - disassociate\n", dev->name);
3382
3383 ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED);
3384 ieee80211_set_disassoc(dev, ifsta, 0);
3385 }
3386}
3387
3388
3389static void ieee80211_sta_reset_auth(struct net_device *dev,
3390 struct ieee80211_if_sta *ifsta) 1891 struct ieee80211_if_sta *ifsta)
3391{ 1892{
3392 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1893 struct ieee80211_local *local = sdata->local;
3393 1894
3394 if (local->ops->reset_tsf) { 1895 if (local->ops->reset_tsf) {
3395 /* Reset own TSF to allow time synchronization work. */ 1896 /* Reset own TSF to allow time synchronization work. */
@@ -3409,29 +1910,15 @@ static void ieee80211_sta_reset_auth(struct net_device *dev,
3409 ifsta->auth_alg = WLAN_AUTH_OPEN; 1910 ifsta->auth_alg = WLAN_AUTH_OPEN;
3410 ifsta->auth_transaction = -1; 1911 ifsta->auth_transaction = -1;
3411 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 1912 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
3412 ifsta->auth_tries = ifsta->assoc_tries = 0; 1913 ifsta->assoc_scan_tries = 0;
3413 netif_carrier_off(dev); 1914 ifsta->direct_probe_tries = 0;
1915 ifsta->auth_tries = 0;
1916 ifsta->assoc_tries = 0;
1917 netif_tx_stop_all_queues(sdata->dev);
1918 netif_carrier_off(sdata->dev);
3414} 1919}
3415 1920
3416 1921
3417void ieee80211_sta_req_auth(struct net_device *dev,
3418 struct ieee80211_if_sta *ifsta)
3419{
3420 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3421 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3422
3423 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
3424 return;
3425
3426 if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
3427 IEEE80211_STA_AUTO_BSSID_SEL)) &&
3428 (ifsta->flags & (IEEE80211_STA_SSID_SET |
3429 IEEE80211_STA_AUTO_SSID_SEL))) {
3430 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3431 queue_work(local->hw.workqueue, &ifsta->work);
3432 }
3433}
3434
3435static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, 1922static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
3436 const char *ssid, int ssid_len) 1923 const char *ssid, int ssid_len)
3437{ 1924{
@@ -3462,81 +1949,11 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
3462 return 0; 1949 return 0;
3463} 1950}
3464 1951
3465static int ieee80211_sta_config_auth(struct net_device *dev, 1952static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata,
3466 struct ieee80211_if_sta *ifsta) 1953 struct ieee80211_if_sta *ifsta)
3467{ 1954{
3468 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1955 struct ieee80211_local *local = sdata->local;
3469 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1956 struct ieee80211_bss *bss;
3470 struct ieee80211_sta_bss *bss, *selected = NULL;
3471 int top_rssi = 0, freq;
3472
3473 spin_lock_bh(&local->sta_bss_lock);
3474 freq = local->oper_channel->center_freq;
3475 list_for_each_entry(bss, &local->sta_bss_list, list) {
3476 if (!(bss->capability & WLAN_CAPABILITY_ESS))
3477 continue;
3478
3479 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
3480 IEEE80211_STA_AUTO_BSSID_SEL |
3481 IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
3482 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
3483 !!sdata->default_key))
3484 continue;
3485
3486 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
3487 bss->freq != freq)
3488 continue;
3489
3490 if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) &&
3491 memcmp(bss->bssid, ifsta->bssid, ETH_ALEN))
3492 continue;
3493
3494 if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) &&
3495 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3496 continue;
3497
3498 if (!selected || top_rssi < bss->signal) {
3499 selected = bss;
3500 top_rssi = bss->signal;
3501 }
3502 }
3503 if (selected)
3504 atomic_inc(&selected->users);
3505 spin_unlock_bh(&local->sta_bss_lock);
3506
3507 if (selected) {
3508 ieee80211_set_freq(dev, selected->freq);
3509 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
3510 ieee80211_sta_set_ssid(dev, selected->ssid,
3511 selected->ssid_len);
3512 ieee80211_sta_set_bssid(dev, selected->bssid);
3513 ieee80211_sta_def_wmm_params(dev, selected, 0);
3514 ieee80211_rx_bss_put(local, selected);
3515 ifsta->state = IEEE80211_AUTHENTICATE;
3516 ieee80211_sta_reset_auth(dev, ifsta);
3517 return 0;
3518 } else {
3519 if (ifsta->state != IEEE80211_AUTHENTICATE) {
3520 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
3521 ieee80211_sta_start_scan(dev, NULL, 0);
3522 else
3523 ieee80211_sta_start_scan(dev, ifsta->ssid,
3524 ifsta->ssid_len);
3525 ifsta->state = IEEE80211_AUTHENTICATE;
3526 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3527 } else
3528 ifsta->state = IEEE80211_DISABLED;
3529 }
3530 return -1;
3531}
3532
3533
3534static int ieee80211_sta_create_ibss(struct net_device *dev,
3535 struct ieee80211_if_sta *ifsta)
3536{
3537 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3538 struct ieee80211_sta_bss *bss;
3539 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3540 struct ieee80211_supported_band *sband; 1957 struct ieee80211_supported_band *sband;
3541 u8 bssid[ETH_ALEN], *pos; 1958 u8 bssid[ETH_ALEN], *pos;
3542 int i; 1959 int i;
@@ -3552,15 +1969,15 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3552 * random number generator get different BSSID. */ 1969 * random number generator get different BSSID. */
3553 get_random_bytes(bssid, ETH_ALEN); 1970 get_random_bytes(bssid, ETH_ALEN);
3554 for (i = 0; i < ETH_ALEN; i++) 1971 for (i = 0; i < ETH_ALEN; i++)
3555 bssid[i] ^= dev->dev_addr[i]; 1972 bssid[i] ^= sdata->dev->dev_addr[i];
3556 bssid[0] &= ~0x01; 1973 bssid[0] &= ~0x01;
3557 bssid[0] |= 0x02; 1974 bssid[0] |= 0x02;
3558#endif 1975#endif
3559 1976
3560 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", 1977 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
3561 dev->name, print_mac(mac, bssid)); 1978 sdata->dev->name, print_mac(mac, bssid));
3562 1979
3563 bss = ieee80211_rx_bss_add(dev, bssid, 1980 bss = ieee80211_rx_bss_add(local, bssid,
3564 local->hw.conf.channel->center_freq, 1981 local->hw.conf.channel->center_freq,
3565 sdata->u.sta.ssid, sdata->u.sta.ssid_len); 1982 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
3566 if (!bss) 1983 if (!bss)
@@ -3587,17 +2004,17 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3587 *pos++ = (u8) (rate / 5); 2004 *pos++ = (u8) (rate / 5);
3588 } 2005 }
3589 2006
3590 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 2007 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3591 ieee80211_rx_bss_put(local, bss); 2008 ieee80211_rx_bss_put(local, bss);
3592 return ret; 2009 return ret;
3593} 2010}
3594 2011
3595 2012
3596static int ieee80211_sta_find_ibss(struct net_device *dev, 2013static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
3597 struct ieee80211_if_sta *ifsta) 2014 struct ieee80211_if_sta *ifsta)
3598{ 2015{
3599 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2016 struct ieee80211_local *local = sdata->local;
3600 struct ieee80211_sta_bss *bss; 2017 struct ieee80211_bss *bss;
3601 int found = 0; 2018 int found = 0;
3602 u8 bssid[ETH_ALEN]; 2019 u8 bssid[ETH_ALEN];
3603 int active_ibss; 2020 int active_ibss;
@@ -3607,13 +2024,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3607 if (ifsta->ssid_len == 0) 2024 if (ifsta->ssid_len == 0)
3608 return -EINVAL; 2025 return -EINVAL;
3609 2026
3610 active_ibss = ieee80211_sta_active_ibss(dev); 2027 active_ibss = ieee80211_sta_active_ibss(sdata);
3611#ifdef CONFIG_MAC80211_IBSS_DEBUG 2028#ifdef CONFIG_MAC80211_IBSS_DEBUG
3612 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 2029 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
3613 dev->name, active_ibss); 2030 sdata->dev->name, active_ibss);
3614#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2031#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3615 spin_lock_bh(&local->sta_bss_lock); 2032 spin_lock_bh(&local->bss_lock);
3616 list_for_each_entry(bss, &local->sta_bss_list, list) { 2033 list_for_each_entry(bss, &local->bss_list, list) {
3617 if (ifsta->ssid_len != bss->ssid_len || 2034 if (ifsta->ssid_len != bss->ssid_len ||
3618 memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0 2035 memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0
3619 || !(bss->capability & WLAN_CAPABILITY_IBSS)) 2036 || !(bss->capability & WLAN_CAPABILITY_IBSS))
@@ -3627,7 +2044,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3627 if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0) 2044 if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0)
3628 break; 2045 break;
3629 } 2046 }
3630 spin_unlock_bh(&local->sta_bss_lock); 2047 spin_unlock_bh(&local->bss_lock);
3631 2048
3632#ifdef CONFIG_MAC80211_IBSS_DEBUG 2049#ifdef CONFIG_MAC80211_IBSS_DEBUG
3633 if (found) 2050 if (found)
@@ -3645,15 +2062,15 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3645 else 2062 else
3646 search_freq = local->hw.conf.channel->center_freq; 2063 search_freq = local->hw.conf.channel->center_freq;
3647 2064
3648 bss = ieee80211_rx_bss_get(dev, bssid, search_freq, 2065 bss = ieee80211_rx_bss_get(local, bssid, search_freq,
3649 ifsta->ssid, ifsta->ssid_len); 2066 ifsta->ssid, ifsta->ssid_len);
3650 if (!bss) 2067 if (!bss)
3651 goto dont_join; 2068 goto dont_join;
3652 2069
3653 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 2070 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
3654 " based on configured SSID\n", 2071 " based on configured SSID\n",
3655 dev->name, print_mac(mac, bssid)); 2072 sdata->dev->name, print_mac(mac, bssid));
3656 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 2073 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3657 ieee80211_rx_bss_put(local, bss); 2074 ieee80211_rx_bss_put(local, bss);
3658 return ret; 2075 return ret;
3659 } 2076 }
@@ -3664,17 +2081,17 @@ dont_join:
3664#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2081#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3665 2082
3666 /* Selected IBSS not found in current scan results - try to scan */ 2083 /* Selected IBSS not found in current scan results - try to scan */
3667 if (ifsta->state == IEEE80211_IBSS_JOINED && 2084 if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED &&
3668 !ieee80211_sta_active_ibss(dev)) { 2085 !ieee80211_sta_active_ibss(sdata)) {
3669 mod_timer(&ifsta->timer, jiffies + 2086 mod_timer(&ifsta->timer, jiffies +
3670 IEEE80211_IBSS_MERGE_INTERVAL); 2087 IEEE80211_IBSS_MERGE_INTERVAL);
3671 } else if (time_after(jiffies, local->last_scan_completed + 2088 } else if (time_after(jiffies, local->last_scan_completed +
3672 IEEE80211_SCAN_INTERVAL)) { 2089 IEEE80211_SCAN_INTERVAL)) {
3673 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 2090 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
3674 "join\n", dev->name); 2091 "join\n", sdata->dev->name);
3675 return ieee80211_sta_req_scan(dev, ifsta->ssid, 2092 return ieee80211_request_scan(sdata, ifsta->ssid,
3676 ifsta->ssid_len); 2093 ifsta->ssid_len);
3677 } else if (ifsta->state != IEEE80211_IBSS_JOINED) { 2094 } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) {
3678 int interval = IEEE80211_SCAN_INTERVAL; 2095 int interval = IEEE80211_SCAN_INTERVAL;
3679 2096
3680 if (time_after(jiffies, ifsta->ibss_join_req + 2097 if (time_after(jiffies, ifsta->ibss_join_req +
@@ -3682,10 +2099,10 @@ dont_join:
3682 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && 2099 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
3683 (!(local->oper_channel->flags & 2100 (!(local->oper_channel->flags &
3684 IEEE80211_CHAN_NO_IBSS))) 2101 IEEE80211_CHAN_NO_IBSS)))
3685 return ieee80211_sta_create_ibss(dev, ifsta); 2102 return ieee80211_sta_create_ibss(sdata, ifsta);
3686 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { 2103 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
3687 printk(KERN_DEBUG "%s: IBSS not allowed on" 2104 printk(KERN_DEBUG "%s: IBSS not allowed on"
3688 " %d MHz\n", dev->name, 2105 " %d MHz\n", sdata->dev->name,
3689 local->hw.conf.channel->center_freq); 2106 local->hw.conf.channel->center_freq);
3690 } 2107 }
3691 2108
@@ -3694,7 +2111,7 @@ dont_join:
3694 interval = IEEE80211_SCAN_INTERVAL_SLOW; 2111 interval = IEEE80211_SCAN_INTERVAL_SLOW;
3695 } 2112 }
3696 2113
3697 ifsta->state = IEEE80211_IBSS_SEARCH; 2114 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
3698 mod_timer(&ifsta->timer, jiffies + interval); 2115 mod_timer(&ifsta->timer, jiffies + interval);
3699 return 0; 2116 return 0;
3700 } 2117 }
@@ -3703,620 +2120,344 @@ dont_join:
3703} 2120}
3704 2121
3705 2122
3706int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) 2123static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
2124 struct ieee80211_if_sta *ifsta)
3707{ 2125{
3708 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2126 struct ieee80211_local *local = sdata->local;
3709 struct ieee80211_if_sta *ifsta; 2127 struct ieee80211_bss *bss, *selected = NULL;
3710 int res; 2128 int top_rssi = 0, freq;
3711 2129
3712 if (len > IEEE80211_MAX_SSID_LEN) 2130 spin_lock_bh(&local->bss_lock);
3713 return -EINVAL; 2131 freq = local->oper_channel->center_freq;
2132 list_for_each_entry(bss, &local->bss_list, list) {
2133 if (!(bss->capability & WLAN_CAPABILITY_ESS))
2134 continue;
3714 2135
3715 ifsta = &sdata->u.sta; 2136 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
2137 IEEE80211_STA_AUTO_BSSID_SEL |
2138 IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
2139 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
2140 !!sdata->default_key))
2141 continue;
3716 2142
3717 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) { 2143 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
3718 memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); 2144 bss->freq != freq)
3719 memcpy(ifsta->ssid, ssid, len); 2145 continue;
3720 ifsta->ssid_len = len;
3721 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
3722 2146
3723 res = 0; 2147 if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) &&
3724 /* 2148 memcmp(bss->bssid, ifsta->bssid, ETH_ALEN))
3725 * Hack! MLME code needs to be cleaned up to have different 2149 continue;
3726 * entry points for configuration and internal selection change
3727 */
3728 if (netif_running(sdata->dev))
3729 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
3730 if (res) {
3731 printk(KERN_DEBUG "%s: Failed to config new SSID to "
3732 "the low-level driver\n", dev->name);
3733 return res;
3734 }
3735 }
3736 2150
3737 if (len) 2151 if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) &&
3738 ifsta->flags |= IEEE80211_STA_SSID_SET; 2152 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3739 else 2153 continue;
3740 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
3741 2154
3742 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 2155 if (!selected || top_rssi < bss->signal) {
3743 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { 2156 selected = bss;
3744 ifsta->ibss_join_req = jiffies; 2157 top_rssi = bss->signal;
3745 ifsta->state = IEEE80211_IBSS_SEARCH; 2158 }
3746 return ieee80211_sta_find_ibss(dev, ifsta);
3747 } 2159 }
2160 if (selected)
2161 atomic_inc(&selected->users);
2162 spin_unlock_bh(&local->bss_lock);
3748 2163
3749 return 0; 2164 if (selected) {
3750} 2165 ieee80211_set_freq(sdata, selected->freq);
2166 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
2167 ieee80211_sta_set_ssid(sdata, selected->ssid,
2168 selected->ssid_len);
2169 ieee80211_sta_set_bssid(sdata, selected->bssid);
2170 ieee80211_sta_def_wmm_params(sdata, selected);
3751 2171
2172 /* Send out direct probe if no probe resp was received or
2173 * the one we have is outdated
2174 */
2175 if (!selected->last_probe_resp ||
2176 time_after(jiffies, selected->last_probe_resp
2177 + IEEE80211_SCAN_RESULT_EXPIRE))
2178 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
2179 else
2180 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
3752 2181
3753int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) 2182 ieee80211_rx_bss_put(local, selected);
3754{ 2183 ieee80211_sta_reset_auth(sdata, ifsta);
3755 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2184 return 0;
3756 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2185 } else {
3757 memcpy(ssid, ifsta->ssid, ifsta->ssid_len); 2186 if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
3758 *len = ifsta->ssid_len; 2187 ifsta->assoc_scan_tries++;
3759 return 0; 2188 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
2189 ieee80211_start_scan(sdata, NULL, 0);
2190 else
2191 ieee80211_start_scan(sdata, ifsta->ssid,
2192 ifsta->ssid_len);
2193 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
2194 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
2195 } else
2196 ifsta->state = IEEE80211_STA_MLME_DISABLED;
2197 }
2198 return -1;
3760} 2199}
3761 2200
3762 2201
3763int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) 2202static void ieee80211_sta_work(struct work_struct *work)
3764{ 2203{
3765 struct ieee80211_sub_if_data *sdata; 2204 struct ieee80211_sub_if_data *sdata =
2205 container_of(work, struct ieee80211_sub_if_data, u.sta.work);
2206 struct ieee80211_local *local = sdata->local;
3766 struct ieee80211_if_sta *ifsta; 2207 struct ieee80211_if_sta *ifsta;
3767 int res; 2208 struct sk_buff *skb;
3768
3769 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3770 ifsta = &sdata->u.sta;
3771 2209
3772 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 2210 if (!netif_running(sdata->dev))
3773 memcpy(ifsta->bssid, bssid, ETH_ALEN); 2211 return;
3774 res = 0;
3775 /*
3776 * Hack! See also ieee80211_sta_set_ssid.
3777 */
3778 if (netif_running(sdata->dev))
3779 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
3780 if (res) {
3781 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
3782 "the low-level driver\n", dev->name);
3783 return res;
3784 }
3785 }
3786 2212
3787 if (is_valid_ether_addr(bssid)) 2213 if (local->sw_scanning || local->hw_scanning)
3788 ifsta->flags |= IEEE80211_STA_BSSID_SET; 2214 return;
3789 else
3790 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
3791 2215
3792 return 0; 2216 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION &&
3793} 2217 sdata->vif.type != NL80211_IFTYPE_ADHOC))
2218 return;
2219 ifsta = &sdata->u.sta;
3794 2220
2221 while ((skb = skb_dequeue(&ifsta->skb_queue)))
2222 ieee80211_sta_rx_queued_mgmt(sdata, skb);
3795 2223
3796static void ieee80211_send_nullfunc(struct ieee80211_local *local, 2224 if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE &&
3797 struct ieee80211_sub_if_data *sdata, 2225 ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
3798 int powersave) 2226 ifsta->state != IEEE80211_STA_MLME_ASSOCIATE &&
3799{ 2227 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
3800 struct sk_buff *skb; 2228 ieee80211_start_scan(sdata, ifsta->scan_ssid,
3801 struct ieee80211_hdr *nullfunc; 2229 ifsta->scan_ssid_len);
3802 __le16 fc; 2230 return;
2231 }
3803 2232
3804 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 2233 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
3805 if (!skb) { 2234 if (ieee80211_sta_config_auth(sdata, ifsta))
3806 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 2235 return;
3807 "frame\n", sdata->dev->name); 2236 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
2237 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
3808 return; 2238 return;
2239
2240 switch (ifsta->state) {
2241 case IEEE80211_STA_MLME_DISABLED:
2242 break;
2243 case IEEE80211_STA_MLME_DIRECT_PROBE:
2244 ieee80211_direct_probe(sdata, ifsta);
2245 break;
2246 case IEEE80211_STA_MLME_AUTHENTICATE:
2247 ieee80211_authenticate(sdata, ifsta);
2248 break;
2249 case IEEE80211_STA_MLME_ASSOCIATE:
2250 ieee80211_associate(sdata, ifsta);
2251 break;
2252 case IEEE80211_STA_MLME_ASSOCIATED:
2253 ieee80211_associated(sdata, ifsta);
2254 break;
2255 case IEEE80211_STA_MLME_IBSS_SEARCH:
2256 ieee80211_sta_find_ibss(sdata, ifsta);
2257 break;
2258 case IEEE80211_STA_MLME_IBSS_JOINED:
2259 ieee80211_sta_merge_ibss(sdata, ifsta);
2260 break;
2261 default:
2262 WARN_ON(1);
2263 break;
3809 } 2264 }
3810 skb_reserve(skb, local->hw.extra_tx_headroom);
3811 2265
3812 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 2266 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
3813 memset(nullfunc, 0, 24); 2267 printk(KERN_DEBUG "%s: privacy configuration mismatch and "
3814 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 2268 "mixed-cell disabled - disassociate\n", sdata->dev->name);
3815 IEEE80211_FCTL_TODS);
3816 if (powersave)
3817 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
3818 nullfunc->frame_control = fc;
3819 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
3820 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
3821 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
3822
3823 ieee80211_sta_tx(sdata->dev, skb, 0);
3824}
3825 2269
2270 ieee80211_set_disassoc(sdata, ifsta, false, true,
2271 WLAN_REASON_UNSPECIFIED);
2272 }
2273}
3826 2274
3827static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 2275static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3828{ 2276{
3829 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 2277 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3830 ieee80211_vif_is_mesh(&sdata->vif)) 2278 queue_work(sdata->local->hw.workqueue,
3831 ieee80211_sta_timer((unsigned long)sdata); 2279 &sdata->u.sta.work);
3832} 2280}
3833 2281
3834void ieee80211_scan_completed(struct ieee80211_hw *hw) 2282/* interface setup */
2283void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3835{ 2284{
3836 struct ieee80211_local *local = hw_to_local(hw); 2285 struct ieee80211_if_sta *ifsta;
3837 struct net_device *dev = local->scan_dev;
3838 struct ieee80211_sub_if_data *sdata;
3839 union iwreq_data wrqu;
3840 2286
3841 local->last_scan_completed = jiffies; 2287 ifsta = &sdata->u.sta;
3842 memset(&wrqu, 0, sizeof(wrqu)); 2288 INIT_WORK(&ifsta->work, ieee80211_sta_work);
3843 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); 2289 setup_timer(&ifsta->timer, ieee80211_sta_timer,
3844 2290 (unsigned long) sdata);
3845 if (local->sta_hw_scanning) { 2291 skb_queue_head_init(&ifsta->skb_queue);
3846 local->sta_hw_scanning = 0;
3847 if (ieee80211_hw_config(local))
3848 printk(KERN_DEBUG "%s: failed to restore operational "
3849 "channel after scan\n", dev->name);
3850 /* Restart STA timer for HW scan case */
3851 rcu_read_lock();
3852 list_for_each_entry_rcu(sdata, &local->interfaces, list)
3853 ieee80211_restart_sta_timer(sdata);
3854 rcu_read_unlock();
3855 2292
3856 goto done; 2293 ifsta->capab = WLAN_CAPABILITY_ESS;
2294 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
2295 IEEE80211_AUTH_ALG_SHARED_KEY;
2296 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
2297 IEEE80211_STA_AUTO_BSSID_SEL |
2298 IEEE80211_STA_AUTO_CHANNEL_SEL;
2299 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
2300 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
2301}
2302
2303/*
2304 * Add a new IBSS station, will also be called by the RX code when,
2305 * in IBSS mode, receiving a frame from a yet-unknown station, hence
2306 * must be callable in atomic context.
2307 */
2308struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
2309 struct sk_buff *skb, u8 *bssid,
2310 u8 *addr, u64 supp_rates)
2311{
2312 struct ieee80211_local *local = sdata->local;
2313 struct sta_info *sta;
2314 DECLARE_MAC_BUF(mac);
2315 int band = local->hw.conf.channel->band;
2316
2317 /* TODO: Could consider removing the least recently used entry and
2318 * allow new one to be added. */
2319 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
2320 if (net_ratelimit()) {
2321 printk(KERN_DEBUG "%s: No room for a new IBSS STA "
2322 "entry %s\n", sdata->dev->name, print_mac(mac, addr));
2323 }
2324 return NULL;
3857 } 2325 }
3858 2326
3859 local->sta_sw_scanning = 0; 2327 if (compare_ether_addr(bssid, sdata->u.sta.bssid))
3860 if (ieee80211_hw_config(local)) 2328 return NULL;
3861 printk(KERN_DEBUG "%s: failed to restore operational "
3862 "channel after scan\n", dev->name);
3863 2329
2330#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2331 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
2332 wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name);
2333#endif
3864 2334
3865 netif_tx_lock_bh(local->mdev); 2335 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
3866 netif_addr_lock(local->mdev); 2336 if (!sta)
3867 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; 2337 return NULL;
3868 local->ops->configure_filter(local_to_hw(local),
3869 FIF_BCN_PRBRESP_PROMISC,
3870 &local->filter_flags,
3871 local->mdev->mc_count,
3872 local->mdev->mc_list);
3873 2338
3874 netif_addr_unlock(local->mdev); 2339 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
3875 netif_tx_unlock_bh(local->mdev);
3876 2340
3877 rcu_read_lock(); 2341 /* make sure mandatory rates are always added */
3878 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2342 sta->sta.supp_rates[band] = supp_rates |
3879 /* Tell AP we're back */ 2343 ieee80211_mandatory_rates(local, band);
3880 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
3881 sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)
3882 ieee80211_send_nullfunc(local, sdata, 0);
3883 2344
3884 ieee80211_restart_sta_timer(sdata); 2345 rate_control_rate_init(sta, local);
3885 2346
3886 netif_wake_queue(sdata->dev); 2347 if (sta_info_insert(sta))
3887 } 2348 return NULL;
3888 rcu_read_unlock();
3889 2349
3890done: 2350 return sta;
3891 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3892 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
3893 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3894 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
3895 (!(ifsta->state == IEEE80211_IBSS_JOINED) &&
3896 !ieee80211_sta_active_ibss(dev)))
3897 ieee80211_sta_find_ibss(dev, ifsta);
3898 }
3899} 2351}
3900EXPORT_SYMBOL(ieee80211_scan_completed);
3901 2352
3902void ieee80211_sta_scan_work(struct work_struct *work) 2353/* configuration hooks */
2354void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
2355 struct ieee80211_if_sta *ifsta)
3903{ 2356{
3904 struct ieee80211_local *local = 2357 struct ieee80211_local *local = sdata->local;
3905 container_of(work, struct ieee80211_local, scan_work.work);
3906 struct net_device *dev = local->scan_dev;
3907 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3908 struct ieee80211_supported_band *sband;
3909 struct ieee80211_channel *chan;
3910 int skip;
3911 unsigned long next_delay = 0;
3912 2358
3913 if (!local->sta_sw_scanning) 2359 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3914 return; 2360 return;
3915 2361
3916 switch (local->scan_state) { 2362 if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
3917 case SCAN_SET_CHANNEL: 2363 IEEE80211_STA_AUTO_BSSID_SEL)) &&
3918 /* 2364 (ifsta->flags & (IEEE80211_STA_SSID_SET |
3919 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS 2365 IEEE80211_STA_AUTO_SSID_SEL))) {
3920 * after we successfully scanned the last channel of the last
3921 * band (and the last band is supported by the hw)
3922 */
3923 if (local->scan_band < IEEE80211_NUM_BANDS)
3924 sband = local->hw.wiphy->bands[local->scan_band];
3925 else
3926 sband = NULL;
3927
3928 /*
3929 * If we are at an unsupported band and have more bands
3930 * left to scan, advance to the next supported one.
3931 */
3932 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
3933 local->scan_band++;
3934 sband = local->hw.wiphy->bands[local->scan_band];
3935 local->scan_channel_idx = 0;
3936 }
3937
3938 /* if no more bands/channels left, complete scan */
3939 if (!sband || local->scan_channel_idx >= sband->n_channels) {
3940 ieee80211_scan_completed(local_to_hw(local));
3941 return;
3942 }
3943 skip = 0;
3944 chan = &sband->channels[local->scan_channel_idx];
3945
3946 if (chan->flags & IEEE80211_CHAN_DISABLED ||
3947 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3948 chan->flags & IEEE80211_CHAN_NO_IBSS))
3949 skip = 1;
3950
3951 if (!skip) {
3952 local->scan_channel = chan;
3953 if (ieee80211_hw_config(local)) {
3954 printk(KERN_DEBUG "%s: failed to set freq to "
3955 "%d MHz for scan\n", dev->name,
3956 chan->center_freq);
3957 skip = 1;
3958 }
3959 }
3960
3961 /* advance state machine to next channel/band */
3962 local->scan_channel_idx++;
3963 if (local->scan_channel_idx >= sband->n_channels) {
3964 /*
3965 * scan_band may end up == IEEE80211_NUM_BANDS, but
3966 * we'll catch that case above and complete the scan
3967 * if that is the case.
3968 */
3969 local->scan_band++;
3970 local->scan_channel_idx = 0;
3971 }
3972
3973 if (skip)
3974 break;
3975 2366
3976 next_delay = IEEE80211_PROBE_DELAY + 2367 if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED)
3977 usecs_to_jiffies(local->hw.channel_change_time); 2368 ieee80211_set_disassoc(sdata, ifsta, true, true,
3978 local->scan_state = SCAN_SEND_PROBE; 2369 WLAN_REASON_DEAUTH_LEAVING);
3979 break;
3980 case SCAN_SEND_PROBE:
3981 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
3982 local->scan_state = SCAN_SET_CHANNEL;
3983 2370
3984 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) 2371 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3985 break; 2372 queue_work(local->hw.workqueue, &ifsta->work);
3986 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
3987 local->scan_ssid_len);
3988 next_delay = IEEE80211_CHANNEL_TIME;
3989 break;
3990 } 2373 }
3991
3992 if (local->sta_sw_scanning)
3993 queue_delayed_work(local->hw.workqueue, &local->scan_work,
3994 next_delay);
3995} 2374}
3996 2375
3997 2376int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
3998static int ieee80211_sta_start_scan(struct net_device *dev,
3999 u8 *ssid, size_t ssid_len)
4000{ 2377{
4001 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2378 struct ieee80211_if_sta *ifsta;
4002 struct ieee80211_sub_if_data *sdata; 2379 int res;
4003 2380
4004 if (ssid_len > IEEE80211_MAX_SSID_LEN) 2381 if (len > IEEE80211_MAX_SSID_LEN)
4005 return -EINVAL; 2382 return -EINVAL;
4006 2383
4007 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) 2384 ifsta = &sdata->u.sta;
4008 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
4009 * BSSID: MACAddress
4010 * SSID
4011 * ScanType: ACTIVE, PASSIVE
4012 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
4013 * a Probe frame during active scanning
4014 * ChannelList
4015 * MinChannelTime (>= ProbeDelay), in TU
4016 * MaxChannelTime: (>= MinChannelTime), in TU
4017 */
4018
4019 /* MLME-SCAN.confirm
4020 * BSSDescriptionSet
4021 * ResultCode: SUCCESS, INVALID_PARAMETERS
4022 */
4023 2385
4024 if (local->sta_sw_scanning || local->sta_hw_scanning) { 2386 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) {
4025 if (local->scan_dev == dev) 2387 memset(ifsta->ssid, 0, sizeof(ifsta->ssid));
4026 return 0; 2388 memcpy(ifsta->ssid, ssid, len);
4027 return -EBUSY; 2389 ifsta->ssid_len = len;
4028 } 2390 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
4029 2391
4030 if (local->ops->hw_scan) { 2392 res = 0;
4031 int rc = local->ops->hw_scan(local_to_hw(local), 2393 /*
4032 ssid, ssid_len); 2394 * Hack! MLME code needs to be cleaned up to have different
4033 if (!rc) { 2395 * entry points for configuration and internal selection change
4034 local->sta_hw_scanning = 1; 2396 */
4035 local->scan_dev = dev; 2397 if (netif_running(sdata->dev))
2398 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
2399 if (res) {
2400 printk(KERN_DEBUG "%s: Failed to config new SSID to "
2401 "the low-level driver\n", sdata->dev->name);
2402 return res;
4036 } 2403 }
4037 return rc;
4038 } 2404 }
4039 2405
4040 local->sta_sw_scanning = 1; 2406 if (len)
2407 ifsta->flags |= IEEE80211_STA_SSID_SET;
2408 else
2409 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
4041 2410
4042 rcu_read_lock(); 2411 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
4043 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2412 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
4044 netif_stop_queue(sdata->dev); 2413 ifsta->ibss_join_req = jiffies;
4045 if (sdata->vif.type == IEEE80211_IF_TYPE_STA && 2414 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
4046 (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) 2415 return ieee80211_sta_find_ibss(sdata, ifsta);
4047 ieee80211_send_nullfunc(local, sdata, 1);
4048 } 2416 }
4049 rcu_read_unlock();
4050
4051 if (ssid) {
4052 local->scan_ssid_len = ssid_len;
4053 memcpy(local->scan_ssid, ssid, ssid_len);
4054 } else
4055 local->scan_ssid_len = 0;
4056 local->scan_state = SCAN_SET_CHANNEL;
4057 local->scan_channel_idx = 0;
4058 local->scan_band = IEEE80211_BAND_2GHZ;
4059 local->scan_dev = dev;
4060
4061 netif_addr_lock_bh(local->mdev);
4062 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
4063 local->ops->configure_filter(local_to_hw(local),
4064 FIF_BCN_PRBRESP_PROMISC,
4065 &local->filter_flags,
4066 local->mdev->mc_count,
4067 local->mdev->mc_list);
4068 netif_addr_unlock_bh(local->mdev);
4069
4070 /* TODO: start scan as soon as all nullfunc frames are ACKed */
4071 queue_delayed_work(local->hw.workqueue, &local->scan_work,
4072 IEEE80211_CHANNEL_TIME);
4073 2417
4074 return 0; 2418 return 0;
4075} 2419}
4076 2420
4077 2421int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
4078int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
4079{ 2422{
4080 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4081 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2423 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4082 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2424 memcpy(ssid, ifsta->ssid, ifsta->ssid_len);
4083 2425 *len = ifsta->ssid_len;
4084 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4085 return ieee80211_sta_start_scan(dev, ssid, ssid_len);
4086
4087 if (local->sta_sw_scanning || local->sta_hw_scanning) {
4088 if (local->scan_dev == dev)
4089 return 0;
4090 return -EBUSY;
4091 }
4092
4093 ifsta->scan_ssid_len = ssid_len;
4094 if (ssid_len)
4095 memcpy(ifsta->scan_ssid, ssid, ssid_len);
4096 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
4097 queue_work(local->hw.workqueue, &ifsta->work);
4098 return 0; 2426 return 0;
4099} 2427}
4100 2428
4101static char * 2429int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
4102ieee80211_sta_scan_result(struct net_device *dev,
4103 struct iw_request_info *info,
4104 struct ieee80211_sta_bss *bss,
4105 char *current_ev, char *end_buf)
4106{ 2430{
4107 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2431 struct ieee80211_if_sta *ifsta;
4108 struct iw_event iwe; 2432 int res;
4109
4110 if (time_after(jiffies,
4111 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
4112 return current_ev;
4113
4114 memset(&iwe, 0, sizeof(iwe));
4115 iwe.cmd = SIOCGIWAP;
4116 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
4117 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
4118 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4119 IW_EV_ADDR_LEN);
4120
4121 memset(&iwe, 0, sizeof(iwe));
4122 iwe.cmd = SIOCGIWESSID;
4123 if (bss_mesh_cfg(bss)) {
4124 iwe.u.data.length = bss_mesh_id_len(bss);
4125 iwe.u.data.flags = 1;
4126 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4127 &iwe, bss_mesh_id(bss));
4128 } else {
4129 iwe.u.data.length = bss->ssid_len;
4130 iwe.u.data.flags = 1;
4131 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4132 &iwe, bss->ssid);
4133 }
4134
4135 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
4136 || bss_mesh_cfg(bss)) {
4137 memset(&iwe, 0, sizeof(iwe));
4138 iwe.cmd = SIOCGIWMODE;
4139 if (bss_mesh_cfg(bss))
4140 iwe.u.mode = IW_MODE_MESH;
4141 else if (bss->capability & WLAN_CAPABILITY_ESS)
4142 iwe.u.mode = IW_MODE_MASTER;
4143 else
4144 iwe.u.mode = IW_MODE_ADHOC;
4145 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4146 &iwe, IW_EV_UINT_LEN);
4147 }
4148
4149 memset(&iwe, 0, sizeof(iwe));
4150 iwe.cmd = SIOCGIWFREQ;
4151 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
4152 iwe.u.freq.e = 0;
4153 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4154 IW_EV_FREQ_LEN);
4155
4156 memset(&iwe, 0, sizeof(iwe));
4157 iwe.cmd = SIOCGIWFREQ;
4158 iwe.u.freq.m = bss->freq;
4159 iwe.u.freq.e = 6;
4160 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4161 IW_EV_FREQ_LEN);
4162 memset(&iwe, 0, sizeof(iwe));
4163 iwe.cmd = IWEVQUAL;
4164 iwe.u.qual.qual = bss->qual;
4165 iwe.u.qual.level = bss->signal;
4166 iwe.u.qual.noise = bss->noise;
4167 iwe.u.qual.updated = local->wstats_flags;
4168 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4169 IW_EV_QUAL_LEN);
4170
4171 memset(&iwe, 0, sizeof(iwe));
4172 iwe.cmd = SIOCGIWENCODE;
4173 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
4174 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
4175 else
4176 iwe.u.data.flags = IW_ENCODE_DISABLED;
4177 iwe.u.data.length = 0;
4178 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4179 &iwe, "");
4180
4181 if (bss && bss->wpa_ie) {
4182 memset(&iwe, 0, sizeof(iwe));
4183 iwe.cmd = IWEVGENIE;
4184 iwe.u.data.length = bss->wpa_ie_len;
4185 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4186 &iwe, bss->wpa_ie);
4187 }
4188
4189 if (bss && bss->rsn_ie) {
4190 memset(&iwe, 0, sizeof(iwe));
4191 iwe.cmd = IWEVGENIE;
4192 iwe.u.data.length = bss->rsn_ie_len;
4193 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4194 &iwe, bss->rsn_ie);
4195 }
4196
4197 if (bss && bss->ht_ie) {
4198 memset(&iwe, 0, sizeof(iwe));
4199 iwe.cmd = IWEVGENIE;
4200 iwe.u.data.length = bss->ht_ie_len;
4201 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4202 &iwe, bss->ht_ie);
4203 }
4204
4205 if (bss && bss->supp_rates_len > 0) {
4206 /* display all supported rates in readable format */
4207 char *p = current_ev + iwe_stream_lcp_len(info);
4208 int i;
4209
4210 memset(&iwe, 0, sizeof(iwe));
4211 iwe.cmd = SIOCGIWRATE;
4212 /* Those two flags are ignored... */
4213 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
4214
4215 for (i = 0; i < bss->supp_rates_len; i++) {
4216 iwe.u.bitrate.value = ((bss->supp_rates[i] &
4217 0x7f) * 500000);
4218 p = iwe_stream_add_value(info, current_ev, p,
4219 end_buf, &iwe, IW_EV_PARAM_LEN);
4220 }
4221 current_ev = p;
4222 }
4223 2433
4224 if (bss) { 2434 ifsta = &sdata->u.sta;
4225 char *buf;
4226 buf = kmalloc(30, GFP_ATOMIC);
4227 if (buf) {
4228 memset(&iwe, 0, sizeof(iwe));
4229 iwe.cmd = IWEVCUSTOM;
4230 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
4231 iwe.u.data.length = strlen(buf);
4232 current_ev = iwe_stream_add_point(info, current_ev,
4233 end_buf,
4234 &iwe, buf);
4235 memset(&iwe, 0, sizeof(iwe));
4236 iwe.cmd = IWEVCUSTOM;
4237 sprintf(buf, " Last beacon: %dms ago",
4238 jiffies_to_msecs(jiffies - bss->last_update));
4239 iwe.u.data.length = strlen(buf);
4240 current_ev = iwe_stream_add_point(info, current_ev,
4241 end_buf, &iwe, buf);
4242 kfree(buf);
4243 }
4244 }
4245 2435
4246 if (bss_mesh_cfg(bss)) { 2436 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
4247 char *buf; 2437 memcpy(ifsta->bssid, bssid, ETH_ALEN);
4248 u8 *cfg = bss_mesh_cfg(bss); 2438 res = 0;
4249 buf = kmalloc(50, GFP_ATOMIC); 2439 /*
4250 if (buf) { 2440 * Hack! See also ieee80211_sta_set_ssid.
4251 memset(&iwe, 0, sizeof(iwe)); 2441 */
4252 iwe.cmd = IWEVCUSTOM; 2442 if (netif_running(sdata->dev))
4253 sprintf(buf, "Mesh network (version %d)", cfg[0]); 2443 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
4254 iwe.u.data.length = strlen(buf); 2444 if (res) {
4255 current_ev = iwe_stream_add_point(info, current_ev, 2445 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
4256 end_buf, 2446 "the low-level driver\n", sdata->dev->name);
4257 &iwe, buf); 2447 return res;
4258 sprintf(buf, "Path Selection Protocol ID: "
4259 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
4260 cfg[4]);
4261 iwe.u.data.length = strlen(buf);
4262 current_ev = iwe_stream_add_point(info, current_ev,
4263 end_buf,
4264 &iwe, buf);
4265 sprintf(buf, "Path Selection Metric ID: "
4266 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
4267 cfg[8]);
4268 iwe.u.data.length = strlen(buf);
4269 current_ev = iwe_stream_add_point(info, current_ev,
4270 end_buf,
4271 &iwe, buf);
4272 sprintf(buf, "Congestion Control Mode ID: "
4273 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
4274 cfg[11], cfg[12]);
4275 iwe.u.data.length = strlen(buf);
4276 current_ev = iwe_stream_add_point(info, current_ev,
4277 end_buf,
4278 &iwe, buf);
4279 sprintf(buf, "Channel Precedence: "
4280 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
4281 cfg[15], cfg[16]);
4282 iwe.u.data.length = strlen(buf);
4283 current_ev = iwe_stream_add_point(info, current_ev,
4284 end_buf,
4285 &iwe, buf);
4286 kfree(buf);
4287 } 2448 }
4288 } 2449 }
4289 2450
4290 return current_ev; 2451 if (is_valid_ether_addr(bssid))
4291} 2452 ifsta->flags |= IEEE80211_STA_BSSID_SET;
4292 2453 else
2454 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
4293 2455
4294int ieee80211_sta_scan_results(struct net_device *dev, 2456 return 0;
4295 struct iw_request_info *info,
4296 char *buf, size_t len)
4297{
4298 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4299 char *current_ev = buf;
4300 char *end_buf = buf + len;
4301 struct ieee80211_sta_bss *bss;
4302
4303 spin_lock_bh(&local->sta_bss_lock);
4304 list_for_each_entry(bss, &local->sta_bss_list, list) {
4305 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
4306 spin_unlock_bh(&local->sta_bss_lock);
4307 return -E2BIG;
4308 }
4309 current_ev = ieee80211_sta_scan_result(dev, info, bss,
4310 current_ev, end_buf);
4311 }
4312 spin_unlock_bh(&local->sta_bss_lock);
4313 return current_ev - buf;
4314} 2457}
4315 2458
4316 2459int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
4317int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4318{ 2460{
4319 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4320 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2461 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4321 2462
4322 kfree(ifsta->extra_ie); 2463 kfree(ifsta->extra_ie);
@@ -4335,92 +2476,60 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4335 return 0; 2476 return 0;
4336} 2477}
4337 2478
4338 2479int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason)
4339struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4340 struct sk_buff *skb, u8 *bssid,
4341 u8 *addr, u64 supp_rates)
4342{
4343 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4344 struct sta_info *sta;
4345 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4346 DECLARE_MAC_BUF(mac);
4347 int band = local->hw.conf.channel->band;
4348
4349 /* TODO: Could consider removing the least recently used entry and
4350 * allow new one to be added. */
4351 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
4352 if (net_ratelimit()) {
4353 printk(KERN_DEBUG "%s: No room for a new IBSS STA "
4354 "entry %s\n", dev->name, print_mac(mac, addr));
4355 }
4356 return NULL;
4357 }
4358
4359 if (compare_ether_addr(bssid, sdata->u.sta.bssid))
4360 return NULL;
4361
4362#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
4363 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
4364 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
4365#endif
4366
4367 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
4368 if (!sta)
4369 return NULL;
4370
4371 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4372
4373 if (supp_rates)
4374 sta->supp_rates[band] = supp_rates;
4375 else
4376 sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
4377
4378 rate_control_rate_init(sta, local);
4379
4380 if (sta_info_insert(sta))
4381 return NULL;
4382
4383 return sta;
4384}
4385
4386
4387int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason)
4388{ 2480{
4389 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4390 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2481 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4391 2482
4392 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", 2483 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
4393 dev->name, reason); 2484 sdata->dev->name, reason);
4394 2485
4395 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 2486 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
4396 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 2487 sdata->vif.type != NL80211_IFTYPE_ADHOC)
4397 return -EINVAL; 2488 return -EINVAL;
4398 2489
4399 ieee80211_send_deauth(dev, ifsta, reason); 2490 ieee80211_set_disassoc(sdata, ifsta, true, true, reason);
4400 ieee80211_set_disassoc(dev, ifsta, 1);
4401 return 0; 2491 return 0;
4402} 2492}
4403 2493
4404 2494int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
4405int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
4406{ 2495{
4407 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4408 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2496 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4409 2497
4410 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", 2498 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
4411 dev->name, reason); 2499 sdata->dev->name, reason);
4412 2500
4413 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2501 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4414 return -EINVAL; 2502 return -EINVAL;
4415 2503
4416 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) 2504 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED))
4417 return -1; 2505 return -1;
4418 2506
4419 ieee80211_send_disassoc(dev, ifsta, reason); 2507 ieee80211_set_disassoc(sdata, ifsta, false, true, reason);
4420 ieee80211_set_disassoc(dev, ifsta, 0);
4421 return 0; 2508 return 0;
4422} 2509}
4423 2510
2511/* scan finished notification */
2512void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2513{
2514 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2515 struct ieee80211_if_sta *ifsta;
2516
2517 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2518 ifsta = &sdata->u.sta;
2519 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
2520 (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) &&
2521 !ieee80211_sta_active_ibss(sdata)))
2522 ieee80211_sta_find_ibss(sdata, ifsta);
2523 }
2524
2525 /* Restart STA timers */
2526 rcu_read_lock();
2527 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2528 ieee80211_restart_sta_timer(sdata);
2529 rcu_read_unlock();
2530}
2531
2532/* driver notification call */
4424void ieee80211_notify_mac(struct ieee80211_hw *hw, 2533void ieee80211_notify_mac(struct ieee80211_hw *hw,
4425 enum ieee80211_notification_types notif_type) 2534 enum ieee80211_notification_types notif_type)
4426{ 2535{
@@ -4431,10 +2540,10 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw,
4431 case IEEE80211_NOTIFY_RE_ASSOC: 2540 case IEEE80211_NOTIFY_RE_ASSOC:
4432 rcu_read_lock(); 2541 rcu_read_lock();
4433 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2542 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4434 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2543 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4435 continue; 2544 continue;
4436 2545
4437 ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); 2546 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
4438 } 2547 }
4439 rcu_read_unlock(); 2548 rcu_read_unlock();
4440 break; 2549 break;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index ede7ab56f65b..5f18c27eb900 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -134,7 +134,7 @@ static inline int rate_supported(struct sta_info *sta,
134 enum ieee80211_band band, 134 enum ieee80211_band band,
135 int index) 135 int index)
136{ 136{
137 return (sta == NULL || sta->supp_rates[band] & BIT(index)); 137 return (sta == NULL || sta->sta.supp_rates[band] & BIT(index));
138} 138}
139 139
140static inline s8 140static inline s8
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 0a9135b974b5..ffafc5da572e 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -180,6 +180,8 @@ struct rc_pid_sta_info {
180 u32 tx_num_failed; 180 u32 tx_num_failed;
181 u32 tx_num_xmit; 181 u32 tx_num_xmit;
182 182
183 int txrate_idx;
184
183 /* Average failed frames percentage error (i.e. actual vs. target 185 /* Average failed frames percentage error (i.e. actual vs. target
184 * percentage), scaled by RC_PID_SMOOTHING. This value is computed 186 * percentage), scaled by RC_PID_SMOOTHING. This value is computed
185 * using using an exponential weighted average technique: 187 * using using an exponential weighted average technique:
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a914ba73ccf5..bc1c4569caa1 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -75,7 +75,8 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
75 struct ieee80211_sub_if_data *sdata; 75 struct ieee80211_sub_if_data *sdata;
76 struct ieee80211_supported_band *sband; 76 struct ieee80211_supported_band *sband;
77 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; 77 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
78 int cur = sta->txrate_idx; 78 struct rc_pid_sta_info *spinfo = (void *)sta->rate_ctrl_priv;
79 int cur = spinfo->txrate_idx;
79 80
80 sdata = sta->sdata; 81 sdata = sta->sdata;
81 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 82 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -111,7 +112,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
111 /* Fit the rate found to the nearest supported rate. */ 112 /* Fit the rate found to the nearest supported rate. */
112 do { 113 do {
113 if (rate_supported(sta, band, rinfo[tmp].index)) { 114 if (rate_supported(sta, band, rinfo[tmp].index)) {
114 sta->txrate_idx = rinfo[tmp].index; 115 spinfo->txrate_idx = rinfo[tmp].index;
115 break; 116 break;
116 } 117 }
117 if (adj < 0) 118 if (adj < 0)
@@ -121,9 +122,9 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
121 } while (tmp < n_bitrates && tmp >= 0); 122 } while (tmp < n_bitrates && tmp >= 0);
122 123
123#ifdef CONFIG_MAC80211_DEBUGFS 124#ifdef CONFIG_MAC80211_DEBUGFS
124 rate_control_pid_event_rate_change( 125 rate_control_pid_event_rate_change(&spinfo->events,
125 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, 126 spinfo->txrate_idx,
126 sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate); 127 sband->bitrates[spinfo->txrate_idx].bitrate);
127#endif 128#endif
128} 129}
129 130
@@ -148,9 +149,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
148 struct ieee80211_local *local, 149 struct ieee80211_local *local,
149 struct sta_info *sta) 150 struct sta_info *sta)
150{ 151{
151#ifdef CONFIG_MAC80211_MESH
152 struct ieee80211_sub_if_data *sdata = sta->sdata; 152 struct ieee80211_sub_if_data *sdata = sta->sdata;
153#endif
154 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv; 153 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv;
155 struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 154 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
156 struct ieee80211_supported_band *sband; 155 struct ieee80211_supported_band *sband;
@@ -181,11 +180,8 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
181 pf = spinfo->last_pf; 180 pf = spinfo->last_pf;
182 else { 181 else {
183 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; 182 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
184#ifdef CONFIG_MAC80211_MESH 183 if (ieee80211_vif_is_mesh(&sdata->vif) && pf == 100)
185 if (pf == 100 &&
186 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT)
187 mesh_plink_broken(sta); 184 mesh_plink_broken(sta);
188#endif
189 pf <<= RC_PID_ARITH_SHIFT; 185 pf <<= RC_PID_ARITH_SHIFT;
190 sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) 186 sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
191 >> RC_PID_ARITH_SHIFT; 187 >> RC_PID_ARITH_SHIFT;
@@ -195,16 +191,16 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
195 spinfo->tx_num_failed = 0; 191 spinfo->tx_num_failed = 0;
196 192
197 /* If we just switched rate, update the rate behaviour info. */ 193 /* If we just switched rate, update the rate behaviour info. */
198 if (pinfo->oldrate != sta->txrate_idx) { 194 if (pinfo->oldrate != spinfo->txrate_idx) {
199 195
200 i = rinfo[pinfo->oldrate].rev_index; 196 i = rinfo[pinfo->oldrate].rev_index;
201 j = rinfo[sta->txrate_idx].rev_index; 197 j = rinfo[spinfo->txrate_idx].rev_index;
202 198
203 tmp = (pf - spinfo->last_pf); 199 tmp = (pf - spinfo->last_pf);
204 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); 200 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
205 201
206 rinfo[j].diff = rinfo[i].diff + tmp; 202 rinfo[j].diff = rinfo[i].diff + tmp;
207 pinfo->oldrate = sta->txrate_idx; 203 pinfo->oldrate = spinfo->txrate_idx;
208 } 204 }
209 rate_control_pid_normalize(pinfo, sband->n_bitrates); 205 rate_control_pid_normalize(pinfo, sband->n_bitrates);
210 206
@@ -257,19 +253,20 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
257 if (!sta) 253 if (!sta)
258 goto unlock; 254 goto unlock;
259 255
256 spinfo = sta->rate_ctrl_priv;
257
260 /* Don't update the state if we're not controlling the rate. */ 258 /* Don't update the state if we're not controlling the rate. */
261 sdata = sta->sdata; 259 sdata = sta->sdata;
262 if (sdata->force_unicast_rateidx > -1) { 260 if (sdata->force_unicast_rateidx > -1) {
263 sta->txrate_idx = sdata->max_ratectrl_rateidx; 261 spinfo->txrate_idx = sdata->max_ratectrl_rateidx;
264 goto unlock; 262 goto unlock;
265 } 263 }
266 264
267 /* Ignore all frames that were sent with a different rate than the rate 265 /* Ignore all frames that were sent with a different rate than the rate
268 * we currently advise mac80211 to use. */ 266 * we currently advise mac80211 to use. */
269 if (info->tx_rate_idx != sta->txrate_idx) 267 if (info->tx_rate_idx != spinfo->txrate_idx)
270 goto unlock; 268 goto unlock;
271 269
272 spinfo = sta->rate_ctrl_priv;
273 spinfo->tx_num_xmit++; 270 spinfo->tx_num_xmit++;
274 271
275#ifdef CONFIG_MAC80211_DEBUGFS 272#ifdef CONFIG_MAC80211_DEBUGFS
@@ -287,17 +284,6 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
287 spinfo->tx_num_xmit++; 284 spinfo->tx_num_xmit++;
288 } 285 }
289 286
290 if (info->status.excessive_retries) {
291 sta->tx_retry_failed++;
292 sta->tx_num_consecutive_failures++;
293 sta->tx_num_mpdu_fail++;
294 } else {
295 sta->tx_num_consecutive_failures = 0;
296 sta->tx_num_mpdu_ok++;
297 }
298 sta->tx_retry_count += info->status.retry_count;
299 sta->tx_num_mpdu_fail += info->status.retry_count;
300
301 /* Update PID controller state. */ 287 /* Update PID controller state. */
302 period = (HZ * pinfo->sampling_period + 500) / 1000; 288 period = (HZ * pinfo->sampling_period + 500) / 1000;
303 if (!period) 289 if (!period)
@@ -317,6 +303,7 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 303 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
318 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 304 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
319 struct ieee80211_sub_if_data *sdata; 305 struct ieee80211_sub_if_data *sdata;
306 struct rc_pid_sta_info *spinfo;
320 struct sta_info *sta; 307 struct sta_info *sta;
321 int rateidx; 308 int rateidx;
322 u16 fc; 309 u16 fc;
@@ -337,16 +324,15 @@ static void rate_control_pid_get_rate(void *priv, struct net_device *dev,
337 324
338 /* If a forced rate is in effect, select it. */ 325 /* If a forced rate is in effect, select it. */
339 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 326 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
327 spinfo = (struct rc_pid_sta_info *)sta->rate_ctrl_priv;
340 if (sdata->force_unicast_rateidx > -1) 328 if (sdata->force_unicast_rateidx > -1)
341 sta->txrate_idx = sdata->force_unicast_rateidx; 329 spinfo->txrate_idx = sdata->force_unicast_rateidx;
342 330
343 rateidx = sta->txrate_idx; 331 rateidx = spinfo->txrate_idx;
344 332
345 if (rateidx >= sband->n_bitrates) 333 if (rateidx >= sband->n_bitrates)
346 rateidx = sband->n_bitrates - 1; 334 rateidx = sband->n_bitrates - 1;
347 335
348 sta->last_txrate_idx = rateidx;
349
350 rcu_read_unlock(); 336 rcu_read_unlock();
351 337
352 sel->rate_idx = rateidx; 338 sel->rate_idx = rateidx;
@@ -367,9 +353,10 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta,
367 * Until that method is implemented, we will use the lowest supported 353 * Until that method is implemented, we will use the lowest supported
368 * rate as a workaround. */ 354 * rate as a workaround. */
369 struct ieee80211_supported_band *sband; 355 struct ieee80211_supported_band *sband;
356 struct rc_pid_sta_info *spinfo = (void *)sta->rate_ctrl_priv;
370 357
371 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 358 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
372 sta->txrate_idx = rate_lowest_index(local, sband, sta); 359 spinfo->txrate_idx = rate_lowest_index(local, sband, sta);
373 sta->fail_avg = 0; 360 sta->fail_avg = 0;
374} 361}
375 362
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6db854505193..92d898b901e9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,6 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 /* IEEE80211_RADIOTAP_FLAGS */ 143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS; 145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
146 pos++; 148 pos++;
147 149
148 /* IEEE80211_RADIOTAP_RATE */ 150 /* IEEE80211_RADIOTAP_RATE */
@@ -155,8 +157,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
155 if (status->band == IEEE80211_BAND_5GHZ) 157 if (status->band == IEEE80211_BAND_5GHZ)
156 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | 158 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
157 IEEE80211_CHAN_5GHZ); 159 IEEE80211_CHAN_5GHZ);
160 else if (rate->flags & IEEE80211_RATE_ERP_G)
161 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
162 IEEE80211_CHAN_2GHZ);
158 else 163 else
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | 164 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
160 IEEE80211_CHAN_2GHZ); 165 IEEE80211_CHAN_2GHZ);
161 pos += 2; 166 pos += 2;
162 167
@@ -290,7 +295,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
290 if (!netif_running(sdata->dev)) 295 if (!netif_running(sdata->dev))
291 continue; 296 continue;
292 297
293 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) 298 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
294 continue; 299 continue;
295 300
296 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
@@ -398,12 +403,12 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
398 struct ieee80211_local *local = rx->local; 403 struct ieee80211_local *local = rx->local;
399 struct sk_buff *skb = rx->skb; 404 struct sk_buff *skb = rx->skb;
400 405
401 if (unlikely(local->sta_hw_scanning)) 406 if (unlikely(local->hw_scanning))
402 return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); 407 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
403 408
404 if (unlikely(local->sta_sw_scanning)) { 409 if (unlikely(local->sw_scanning)) {
405 /* drop all the other packets during a software scan anyway */ 410 /* drop all the other packets during a software scan anyway */
406 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) 411 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
407 != RX_QUEUED) 412 != RX_QUEUED)
408 dev_kfree_skb(skb); 413 dev_kfree_skb(skb);
409 return RX_QUEUED; 414 return RX_QUEUED;
@@ -461,7 +466,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
461 466
462 if (ieee80211_is_data(hdr->frame_control) && 467 if (ieee80211_is_data(hdr->frame_control) &&
463 is_multicast_ether_addr(hdr->addr1) && 468 is_multicast_ether_addr(hdr->addr1) &&
464 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) 469 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
465 return RX_DROP_MONITOR; 470 return RX_DROP_MONITOR;
466#undef msh_h_get 471#undef msh_h_get
467 472
@@ -496,8 +501,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
496 /* Drop disallowed frame classes based on STA auth/assoc state; 501 /* Drop disallowed frame classes based on STA auth/assoc state;
497 * IEEE 802.11, Chap 5.5. 502 * IEEE 802.11, Chap 5.5.
498 * 503 *
499 * 80211.o does filtering only based on association state, i.e., it 504 * mac80211 filters only based on association state, i.e. it drops
500 * drops Class 3 frames from not associated stations. hostapd sends 505 * Class 3 frames from not associated stations. hostapd sends
501 * deauth/disassoc frames when needed. In addition, hostapd is 506 * deauth/disassoc frames when needed. In addition, hostapd is
502 * responsible for filtering on both auth and assoc states. 507 * responsible for filtering on both auth and assoc states.
503 */ 508 */
@@ -507,7 +512,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
507 512
508 if (unlikely((ieee80211_is_data(hdr->frame_control) || 513 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
509 ieee80211_is_pspoll(hdr->frame_control)) && 514 ieee80211_is_pspoll(hdr->frame_control)) &&
510 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 515 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
511 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { 516 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
512 if ((!ieee80211_has_fromds(hdr->frame_control) && 517 if ((!ieee80211_has_fromds(hdr->frame_control) &&
513 !ieee80211_has_tods(hdr->frame_control) && 518 !ieee80211_has_tods(hdr->frame_control) &&
@@ -656,7 +661,7 @@ static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
656 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); 661 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
657#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 662#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
658 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 663 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
659 dev->name, print_mac(mac, sta->addr), sta->aid); 664 dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
660#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 665#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
661} 666}
662 667
@@ -680,7 +685,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
680 685
681#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 686#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
682 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", 687 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
683 dev->name, print_mac(mac, sta->addr), sta->aid); 688 dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
684#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 689#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
685 690
686 /* Send all buffered frames to the station */ 691 /* Send all buffered frames to the station */
@@ -697,7 +702,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
697#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 702#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
698 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " 703 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
699 "since STA not sleeping anymore\n", dev->name, 704 "since STA not sleeping anymore\n", dev->name,
700 print_mac(mac, sta->addr), sta->aid); 705 print_mac(mac, sta->sta.addr), sta->sta.aid);
701#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 706#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
702 info->flags |= IEEE80211_TX_CTL_REQUEUE; 707 info->flags |= IEEE80211_TX_CTL_REQUEUE;
703 dev_queue_xmit(skb); 708 dev_queue_xmit(skb);
@@ -719,14 +724,14 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
719 /* Update last_rx only for IBSS packets which are for the current 724 /* Update last_rx only for IBSS packets which are for the current
720 * BSSID to avoid keeping the current IBSS network alive in cases where 725 * BSSID to avoid keeping the current IBSS network alive in cases where
721 * other STAs are using different BSSID. */ 726 * other STAs are using different BSSID. */
722 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 727 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
723 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 728 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
724 IEEE80211_IF_TYPE_IBSS); 729 NL80211_IFTYPE_ADHOC);
725 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) 730 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
726 sta->last_rx = jiffies; 731 sta->last_rx = jiffies;
727 } else 732 } else
728 if (!is_multicast_ether_addr(hdr->addr1) || 733 if (!is_multicast_ether_addr(hdr->addr1) ||
729 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) { 734 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
730 /* Update last_rx only for unicast frames in order to prevent 735 /* Update last_rx only for unicast frames in order to prevent
731 * the Probe Request frames (the only broadcast frames from a 736 * the Probe Request frames (the only broadcast frames from a
732 * STA in infrastructure mode) from keeping a connection alive. 737 * STA in infrastructure mode) from keeping a connection alive.
@@ -746,8 +751,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
746 sta->last_noise = rx->status->noise; 751 sta->last_noise = rx->status->noise;
747 752
748 if (!ieee80211_has_morefrags(hdr->frame_control) && 753 if (!ieee80211_has_morefrags(hdr->frame_control) &&
749 (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP || 754 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
750 rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) { 755 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
751 /* Change STA power saving mode only in the end of a frame 756 /* Change STA power saving mode only in the end of a frame
752 * exchange sequence */ 757 * exchange sequence */
753 if (test_sta_flags(sta, WLAN_STA_PS) && 758 if (test_sta_flags(sta, WLAN_STA_PS) &&
@@ -816,7 +821,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
816 821
817static inline struct ieee80211_fragment_entry * 822static inline struct ieee80211_fragment_entry *
818ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 823ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
819 u16 fc, unsigned int frag, unsigned int seq, 824 unsigned int frag, unsigned int seq,
820 int rx_queue, struct ieee80211_hdr *hdr) 825 int rx_queue, struct ieee80211_hdr *hdr)
821{ 826{
822 struct ieee80211_fragment_entry *entry; 827 struct ieee80211_fragment_entry *entry;
@@ -825,7 +830,6 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
825 idx = sdata->fragment_next; 830 idx = sdata->fragment_next;
826 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 831 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
827 struct ieee80211_hdr *f_hdr; 832 struct ieee80211_hdr *f_hdr;
828 u16 f_fc;
829 833
830 idx--; 834 idx--;
831 if (idx < 0) 835 if (idx < 0)
@@ -837,10 +841,13 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
837 entry->last_frag + 1 != frag) 841 entry->last_frag + 1 != frag)
838 continue; 842 continue;
839 843
840 f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; 844 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
841 f_fc = le16_to_cpu(f_hdr->frame_control);
842 845
843 if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || 846 /*
847 * Check ftype and addresses are equal, else check next fragment
848 */
849 if (((hdr->frame_control ^ f_hdr->frame_control) &
850 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
844 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 851 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
845 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 852 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
846 continue; 853 continue;
@@ -860,16 +867,18 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
860{ 867{
861 struct ieee80211_hdr *hdr; 868 struct ieee80211_hdr *hdr;
862 u16 sc; 869 u16 sc;
870 __le16 fc;
863 unsigned int frag, seq; 871 unsigned int frag, seq;
864 struct ieee80211_fragment_entry *entry; 872 struct ieee80211_fragment_entry *entry;
865 struct sk_buff *skb; 873 struct sk_buff *skb;
866 DECLARE_MAC_BUF(mac); 874 DECLARE_MAC_BUF(mac);
867 875
868 hdr = (struct ieee80211_hdr *) rx->skb->data; 876 hdr = (struct ieee80211_hdr *)rx->skb->data;
877 fc = hdr->frame_control;
869 sc = le16_to_cpu(hdr->seq_ctrl); 878 sc = le16_to_cpu(hdr->seq_ctrl);
870 frag = sc & IEEE80211_SCTL_FRAG; 879 frag = sc & IEEE80211_SCTL_FRAG;
871 880
872 if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || 881 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
873 (rx->skb)->len < 24 || 882 (rx->skb)->len < 24 ||
874 is_multicast_ether_addr(hdr->addr1))) { 883 is_multicast_ether_addr(hdr->addr1))) {
875 /* not fragmented */ 884 /* not fragmented */
@@ -884,7 +893,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
884 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 893 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
885 rx->queue, &(rx->skb)); 894 rx->queue, &(rx->skb));
886 if (rx->key && rx->key->conf.alg == ALG_CCMP && 895 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
887 (rx->fc & IEEE80211_FCTL_PROTECTED)) { 896 ieee80211_has_protected(fc)) {
888 /* Store CCMP PN so that we can verify that the next 897 /* Store CCMP PN so that we can verify that the next
889 * fragment has a sequential PN value. */ 898 * fragment has a sequential PN value. */
890 entry->ccmp = 1; 899 entry->ccmp = 1;
@@ -898,8 +907,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
898 /* This is a fragment for a frame that should already be pending in 907 /* This is a fragment for a frame that should already be pending in
899 * fragment cache. Add this fragment to the end of the pending entry. 908 * fragment cache. Add this fragment to the end of the pending entry.
900 */ 909 */
901 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, 910 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
902 rx->queue, hdr);
903 if (!entry) { 911 if (!entry) {
904 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 912 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
905 return RX_DROP_MONITOR; 913 return RX_DROP_MONITOR;
@@ -924,11 +932,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
924 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 932 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
925 } 933 }
926 934
927 skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); 935 skb_pull(rx->skb, ieee80211_hdrlen(fc));
928 __skb_queue_tail(&entry->skb_list, rx->skb); 936 __skb_queue_tail(&entry->skb_list, rx->skb);
929 entry->last_frag = frag; 937 entry->last_frag = frag;
930 entry->extra_len += rx->skb->len; 938 entry->extra_len += rx->skb->len;
931 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 939 if (ieee80211_has_morefrags(fc)) {
932 rx->skb = NULL; 940 rx->skb = NULL;
933 return RX_QUEUED; 941 return RX_QUEUED;
934 } 942 }
@@ -968,15 +976,14 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
968 struct sk_buff *skb; 976 struct sk_buff *skb;
969 int no_pending_pkts; 977 int no_pending_pkts;
970 DECLARE_MAC_BUF(mac); 978 DECLARE_MAC_BUF(mac);
979 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
971 980
972 if (likely(!rx->sta || 981 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
973 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
974 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
975 !(rx->flags & IEEE80211_RX_RA_MATCH))) 982 !(rx->flags & IEEE80211_RX_RA_MATCH)))
976 return RX_CONTINUE; 983 return RX_CONTINUE;
977 984
978 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && 985 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
979 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 986 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
980 return RX_DROP_UNUSABLE; 987 return RX_DROP_UNUSABLE;
981 988
982 skb = skb_dequeue(&rx->sta->tx_filtered); 989 skb = skb_dequeue(&rx->sta->tx_filtered);
@@ -1000,7 +1007,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1000 1007
1001#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1008#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1002 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 1009 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
1003 print_mac(mac, rx->sta->addr), rx->sta->aid, 1010 print_mac(mac, rx->sta->sta.addr), rx->sta->sta.aid,
1004 skb_queue_len(&rx->sta->ps_tx_buf)); 1011 skb_queue_len(&rx->sta->ps_tx_buf));
1005#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1012#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1006 1013
@@ -1025,7 +1032,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1025 */ 1032 */
1026 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 1033 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1027 "though there are no buffered frames for it\n", 1034 "though there are no buffered frames for it\n",
1028 rx->dev->name, print_mac(mac, rx->sta->addr)); 1035 rx->dev->name, print_mac(mac, rx->sta->sta.addr));
1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1036#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1030 } 1037 }
1031 1038
@@ -1050,7 +1057,6 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1050 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1057 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1051 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1058 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1052 /* change frame type to non QOS */ 1059 /* change frame type to non QOS */
1053 rx->fc &= ~IEEE80211_STYPE_QOS_DATA;
1054 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1060 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1055 1061
1056 return RX_CONTINUE; 1062 return RX_CONTINUE;
@@ -1067,7 +1073,7 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1067} 1073}
1068 1074
1069static int 1075static int
1070ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) 1076ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1071{ 1077{
1072 /* 1078 /*
1073 * Pass through unencrypted frames if the hardware has 1079 * Pass through unencrypted frames if the hardware has
@@ -1077,9 +1083,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx)
1077 return 0; 1083 return 0;
1078 1084
1079 /* Drop unencrypted frames if key is set. */ 1085 /* Drop unencrypted frames if key is set. */
1080 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && 1086 if (unlikely(!ieee80211_has_protected(fc) &&
1081 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 1087 !ieee80211_is_nullfunc(fc) &&
1082 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
1083 (rx->key || rx->sdata->drop_unencrypted))) 1088 (rx->key || rx->sdata->drop_unencrypted)))
1084 return -EACCES; 1089 return -EACCES;
1085 1090
@@ -1091,7 +1096,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1091{ 1096{
1092 struct net_device *dev = rx->dev; 1097 struct net_device *dev = rx->dev;
1093 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 1098 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1094 u16 fc, hdrlen, ethertype; 1099 u16 hdrlen, ethertype;
1095 u8 *payload; 1100 u8 *payload;
1096 u8 dst[ETH_ALEN]; 1101 u8 dst[ETH_ALEN];
1097 u8 src[ETH_ALEN] __aligned(2); 1102 u8 src[ETH_ALEN] __aligned(2);
@@ -1102,12 +1107,10 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1102 DECLARE_MAC_BUF(mac3); 1107 DECLARE_MAC_BUF(mac3);
1103 DECLARE_MAC_BUF(mac4); 1108 DECLARE_MAC_BUF(mac4);
1104 1109
1105 fc = rx->fc; 1110 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1106
1107 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1108 return -1; 1111 return -1;
1109 1112
1110 hdrlen = ieee80211_get_hdrlen(fc); 1113 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1111 1114
1112 if (ieee80211_vif_is_mesh(&sdata->vif)) 1115 if (ieee80211_vif_is_mesh(&sdata->vif))
1113 hdrlen += ieee80211_get_mesh_hdrlen( 1116 hdrlen += ieee80211_get_mesh_hdrlen(
@@ -1122,42 +1125,29 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1122 * 1 0 BSSID SA DA n/a 1125 * 1 0 BSSID SA DA n/a
1123 * 1 1 RA TA DA SA 1126 * 1 1 RA TA DA SA
1124 */ 1127 */
1125 1128 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1126 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 1129 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1127 case IEEE80211_FCTL_TODS: 1130
1128 /* BSSID SA DA */ 1131 switch (hdr->frame_control &
1129 memcpy(dst, hdr->addr3, ETH_ALEN); 1132 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1130 memcpy(src, hdr->addr2, ETH_ALEN); 1133 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1131 1134 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1132 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1135 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1133 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1134 return -1; 1136 return -1;
1135 break; 1137 break;
1136 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1138 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1137 /* RA TA DA SA */ 1139 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1138 memcpy(dst, hdr->addr3, ETH_ALEN); 1140 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1139 memcpy(src, hdr->addr4, ETH_ALEN);
1140
1141 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1142 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1143 return -1; 1141 return -1;
1144 break; 1142 break;
1145 case IEEE80211_FCTL_FROMDS: 1143 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1146 /* DA BSSID SA */ 1144 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1147 memcpy(dst, hdr->addr1, ETH_ALEN);
1148 memcpy(src, hdr->addr3, ETH_ALEN);
1149
1150 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1151 (is_multicast_ether_addr(dst) && 1145 (is_multicast_ether_addr(dst) &&
1152 !compare_ether_addr(src, dev->dev_addr))) 1146 !compare_ether_addr(src, dev->dev_addr)))
1153 return -1; 1147 return -1;
1154 break; 1148 break;
1155 case 0: 1149 case __constant_cpu_to_le16(0):
1156 /* DA SA BSSID */ 1150 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1157 memcpy(dst, hdr->addr1, ETH_ALEN);
1158 memcpy(src, hdr->addr2, ETH_ALEN);
1159
1160 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1161 return -1; 1151 return -1;
1162 break; 1152 break;
1163 } 1153 }
@@ -1193,7 +1183,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1193/* 1183/*
1194 * requires that rx->skb is a frame with ethernet header 1184 * requires that rx->skb is a frame with ethernet header
1195 */ 1185 */
1196static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) 1186static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1197{ 1187{
1198 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1188 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1199 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1189 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
@@ -1209,7 +1199,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
1209 return true; 1199 return true;
1210 1200
1211 if (ieee80211_802_1x_port_control(rx) || 1201 if (ieee80211_802_1x_port_control(rx) ||
1212 ieee80211_drop_unencrypted(rx)) 1202 ieee80211_drop_unencrypted(rx, fc))
1213 return false; 1203 return false;
1214 1204
1215 return true; 1205 return true;
@@ -1231,8 +1221,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1231 skb = rx->skb; 1221 skb = rx->skb;
1232 xmit_skb = NULL; 1222 xmit_skb = NULL;
1233 1223
1234 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || 1224 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1235 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && 1225 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1226 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1236 (rx->flags & IEEE80211_RX_RA_MATCH)) { 1227 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1237 if (is_multicast_ether_addr(ehdr->h_dest)) { 1228 if (is_multicast_ether_addr(ehdr->h_dest)) {
1238 /* 1229 /*
@@ -1279,20 +1270,21 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1279{ 1270{
1280 struct net_device *dev = rx->dev; 1271 struct net_device *dev = rx->dev;
1281 struct ieee80211_local *local = rx->local; 1272 struct ieee80211_local *local = rx->local;
1282 u16 fc, ethertype; 1273 u16 ethertype;
1283 u8 *payload; 1274 u8 *payload;
1284 struct sk_buff *skb = rx->skb, *frame = NULL; 1275 struct sk_buff *skb = rx->skb, *frame = NULL;
1276 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1277 __le16 fc = hdr->frame_control;
1285 const struct ethhdr *eth; 1278 const struct ethhdr *eth;
1286 int remaining, err; 1279 int remaining, err;
1287 u8 dst[ETH_ALEN]; 1280 u8 dst[ETH_ALEN];
1288 u8 src[ETH_ALEN]; 1281 u8 src[ETH_ALEN];
1289 DECLARE_MAC_BUF(mac); 1282 DECLARE_MAC_BUF(mac);
1290 1283
1291 fc = rx->fc; 1284 if (unlikely(!ieee80211_is_data(fc)))
1292 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1293 return RX_CONTINUE; 1285 return RX_CONTINUE;
1294 1286
1295 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1287 if (unlikely(!ieee80211_is_data_present(fc)))
1296 return RX_DROP_MONITOR; 1288 return RX_DROP_MONITOR;
1297 1289
1298 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1290 if (!(rx->flags & IEEE80211_RX_AMSDU))
@@ -1374,7 +1366,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1374 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); 1366 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1375 } 1367 }
1376 1368
1377 if (!ieee80211_frame_allowed(rx)) { 1369 if (!ieee80211_frame_allowed(rx, fc)) {
1378 if (skb == frame) /* last frame */ 1370 if (skb == frame) /* last frame */
1379 return RX_DROP_UNUSABLE; 1371 return RX_DROP_UNUSABLE;
1380 dev_kfree_skb(frame); 1372 dev_kfree_skb(frame);
@@ -1413,7 +1405,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1413 1405
1414 if (rx->flags & IEEE80211_RX_RA_MATCH) { 1406 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1415 if (!mesh_hdr->ttl) 1407 if (!mesh_hdr->ttl)
1416 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta, 1408 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1417 dropped_frames_ttl); 1409 dropped_frames_ttl);
1418 else { 1410 else {
1419 struct ieee80211_hdr *fwd_hdr; 1411 struct ieee80211_hdr *fwd_hdr;
@@ -1448,21 +1440,21 @@ static ieee80211_rx_result debug_noinline
1448ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1440ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1449{ 1441{
1450 struct net_device *dev = rx->dev; 1442 struct net_device *dev = rx->dev;
1451 u16 fc; 1443 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1444 __le16 fc = hdr->frame_control;
1452 int err; 1445 int err;
1453 1446
1454 fc = rx->fc; 1447 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1455 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1456 return RX_CONTINUE; 1448 return RX_CONTINUE;
1457 1449
1458 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1450 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1459 return RX_DROP_MONITOR; 1451 return RX_DROP_MONITOR;
1460 1452
1461 err = ieee80211_data_to_8023(rx); 1453 err = ieee80211_data_to_8023(rx);
1462 if (unlikely(err)) 1454 if (unlikely(err))
1463 return RX_DROP_UNUSABLE; 1455 return RX_DROP_UNUSABLE;
1464 1456
1465 if (!ieee80211_frame_allowed(rx)) 1457 if (!ieee80211_frame_allowed(rx, fc))
1466 return RX_DROP_MONITOR; 1458 return RX_DROP_MONITOR;
1467 1459
1468 rx->skb->dev = dev; 1460 rx->skb->dev = dev;
@@ -1520,22 +1512,97 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1520} 1512}
1521 1513
1522static ieee80211_rx_result debug_noinline 1514static ieee80211_rx_result debug_noinline
1515ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1516{
1517 struct ieee80211_local *local = rx->local;
1518 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1519 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1520 int len = rx->skb->len;
1521
1522 if (!ieee80211_is_action(mgmt->frame_control))
1523 return RX_CONTINUE;
1524
1525 if (!rx->sta)
1526 return RX_DROP_MONITOR;
1527
1528 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1529 return RX_DROP_MONITOR;
1530
1531 /* all categories we currently handle have action_code */
1532 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1533 return RX_DROP_MONITOR;
1534
1535 /*
1536 * FIXME: revisit this, I'm sure we should handle most
1537 * of these frames in other modes as well!
1538 */
1539 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1540 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1541 return RX_DROP_MONITOR;
1542
1543 switch (mgmt->u.action.category) {
1544 case WLAN_CATEGORY_BACK:
1545 switch (mgmt->u.action.u.addba_req.action_code) {
1546 case WLAN_ACTION_ADDBA_REQ:
1547 if (len < (IEEE80211_MIN_ACTION_SIZE +
1548 sizeof(mgmt->u.action.u.addba_req)))
1549 return RX_DROP_MONITOR;
1550 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1551 break;
1552 case WLAN_ACTION_ADDBA_RESP:
1553 if (len < (IEEE80211_MIN_ACTION_SIZE +
1554 sizeof(mgmt->u.action.u.addba_resp)))
1555 return RX_DROP_MONITOR;
1556 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1557 break;
1558 case WLAN_ACTION_DELBA:
1559 if (len < (IEEE80211_MIN_ACTION_SIZE +
1560 sizeof(mgmt->u.action.u.delba)))
1561 return RX_DROP_MONITOR;
1562 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1563 break;
1564 }
1565 break;
1566 case WLAN_CATEGORY_SPECTRUM_MGMT:
1567 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1568 return RX_DROP_MONITOR;
1569 switch (mgmt->u.action.u.measurement.action_code) {
1570 case WLAN_ACTION_SPCT_MSR_REQ:
1571 if (len < (IEEE80211_MIN_ACTION_SIZE +
1572 sizeof(mgmt->u.action.u.measurement)))
1573 return RX_DROP_MONITOR;
1574 ieee80211_process_measurement_req(sdata, mgmt, len);
1575 break;
1576 }
1577 break;
1578 default:
1579 return RX_CONTINUE;
1580 }
1581
1582 rx->sta->rx_packets++;
1583 dev_kfree_skb(rx->skb);
1584 return RX_QUEUED;
1585}
1586
1587static ieee80211_rx_result debug_noinline
1523ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1588ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1524{ 1589{
1525 struct ieee80211_sub_if_data *sdata; 1590 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1526 1591
1527 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1592 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1528 return RX_DROP_MONITOR; 1593 return RX_DROP_MONITOR;
1529 1594
1530 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1595 if (ieee80211_vif_is_mesh(&sdata->vif))
1531 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || 1596 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1532 sdata->vif.type == IEEE80211_IF_TYPE_IBSS || 1597
1533 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && 1598 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1534 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1599 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1535 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); 1600 return RX_DROP_MONITOR;
1536 else 1601
1602 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1537 return RX_DROP_MONITOR; 1603 return RX_DROP_MONITOR;
1538 1604
1605 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1539 return RX_QUEUED; 1606 return RX_QUEUED;
1540} 1607}
1541 1608
@@ -1565,7 +1632,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1565 if (!ieee80211_has_protected(hdr->frame_control)) 1632 if (!ieee80211_has_protected(hdr->frame_control))
1566 goto ignore; 1633 goto ignore;
1567 1634
1568 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { 1635 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1569 /* 1636 /*
1570 * APs with pairwise keys should never receive Michael MIC 1637 * APs with pairwise keys should never receive Michael MIC
1571 * errors for non-zero keyidx because these are reserved for 1638 * errors for non-zero keyidx because these are reserved for
@@ -1579,7 +1646,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1579 !ieee80211_is_auth(hdr->frame_control)) 1646 !ieee80211_is_auth(hdr->frame_control))
1580 goto ignore; 1647 goto ignore;
1581 1648
1582 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1649 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1583 ignore: 1650 ignore:
1584 dev_kfree_skb(rx->skb); 1651 dev_kfree_skb(rx->skb);
1585 rx->skb = NULL; 1652 rx->skb = NULL;
@@ -1635,7 +1702,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1635 if (!netif_running(sdata->dev)) 1702 if (!netif_running(sdata->dev))
1636 continue; 1703 continue;
1637 1704
1638 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || 1705 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1639 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 1706 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1640 continue; 1707 continue;
1641 1708
@@ -1698,6 +1765,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1698 CALL_RXH(ieee80211_rx_h_mesh_fwding); 1765 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1699 CALL_RXH(ieee80211_rx_h_data) 1766 CALL_RXH(ieee80211_rx_h_data)
1700 CALL_RXH(ieee80211_rx_h_ctrl) 1767 CALL_RXH(ieee80211_rx_h_ctrl)
1768 CALL_RXH(ieee80211_rx_h_action)
1701 CALL_RXH(ieee80211_rx_h_mgmt) 1769 CALL_RXH(ieee80211_rx_h_mgmt)
1702 1770
1703#undef CALL_RXH 1771#undef CALL_RXH
@@ -1733,7 +1801,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1733 int multicast = is_multicast_ether_addr(hdr->addr1); 1801 int multicast = is_multicast_ether_addr(hdr->addr1);
1734 1802
1735 switch (sdata->vif.type) { 1803 switch (sdata->vif.type) {
1736 case IEEE80211_IF_TYPE_STA: 1804 case NL80211_IFTYPE_STATION:
1737 if (!bssid) 1805 if (!bssid)
1738 return 0; 1806 return 0;
1739 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1807 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1748,14 +1816,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1748 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1816 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1749 } 1817 }
1750 break; 1818 break;
1751 case IEEE80211_IF_TYPE_IBSS: 1819 case NL80211_IFTYPE_ADHOC:
1752 if (!bssid) 1820 if (!bssid)
1753 return 0; 1821 return 0;
1754 if (ieee80211_is_beacon(hdr->frame_control)) { 1822 if (ieee80211_is_beacon(hdr->frame_control)) {
1755 if (!rx->sta)
1756 rx->sta = ieee80211_ibss_add_sta(sdata->dev,
1757 rx->skb, bssid, hdr->addr2,
1758 BIT(rx->status->rate_idx));
1759 return 1; 1823 return 1;
1760 } 1824 }
1761 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1825 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1769,11 +1833,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1769 return 0; 1833 return 0;
1770 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1834 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1771 } else if (!rx->sta) 1835 } else if (!rx->sta)
1772 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1836 rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
1773 bssid, hdr->addr2, 1837 bssid, hdr->addr2,
1774 BIT(rx->status->rate_idx)); 1838 BIT(rx->status->rate_idx));
1775 break; 1839 break;
1776 case IEEE80211_IF_TYPE_MESH_POINT: 1840 case NL80211_IFTYPE_MESH_POINT:
1777 if (!multicast && 1841 if (!multicast &&
1778 compare_ether_addr(sdata->dev->dev_addr, 1842 compare_ether_addr(sdata->dev->dev_addr,
1779 hdr->addr1) != 0) { 1843 hdr->addr1) != 0) {
@@ -1783,8 +1847,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1783 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1847 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1784 } 1848 }
1785 break; 1849 break;
1786 case IEEE80211_IF_TYPE_VLAN: 1850 case NL80211_IFTYPE_AP_VLAN:
1787 case IEEE80211_IF_TYPE_AP: 1851 case NL80211_IFTYPE_AP:
1788 if (!bssid) { 1852 if (!bssid) {
1789 if (compare_ether_addr(sdata->dev->dev_addr, 1853 if (compare_ether_addr(sdata->dev->dev_addr,
1790 hdr->addr1)) 1854 hdr->addr1))
@@ -1796,16 +1860,17 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1796 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1860 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1797 } 1861 }
1798 break; 1862 break;
1799 case IEEE80211_IF_TYPE_WDS: 1863 case NL80211_IFTYPE_WDS:
1800 if (bssid || !ieee80211_is_data(hdr->frame_control)) 1864 if (bssid || !ieee80211_is_data(hdr->frame_control))
1801 return 0; 1865 return 0;
1802 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 1866 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1803 return 0; 1867 return 0;
1804 break; 1868 break;
1805 case IEEE80211_IF_TYPE_MNTR: 1869 case NL80211_IFTYPE_MONITOR:
1806 /* take everything */ 1870 /* take everything */
1807 break; 1871 break;
1808 case IEEE80211_IF_TYPE_INVALID: 1872 case NL80211_IFTYPE_UNSPECIFIED:
1873 case __NL80211_IFTYPE_AFTER_LAST:
1809 /* should never get here */ 1874 /* should never get here */
1810 WARN_ON(1); 1875 WARN_ON(1);
1811 break; 1876 break;
@@ -1827,23 +1892,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1827 struct ieee80211_sub_if_data *sdata; 1892 struct ieee80211_sub_if_data *sdata;
1828 struct ieee80211_hdr *hdr; 1893 struct ieee80211_hdr *hdr;
1829 struct ieee80211_rx_data rx; 1894 struct ieee80211_rx_data rx;
1830 u16 type;
1831 int prepares; 1895 int prepares;
1832 struct ieee80211_sub_if_data *prev = NULL; 1896 struct ieee80211_sub_if_data *prev = NULL;
1833 struct sk_buff *skb_new; 1897 struct sk_buff *skb_new;
1834 u8 *bssid; 1898 u8 *bssid;
1835 1899
1836 hdr = (struct ieee80211_hdr *) skb->data; 1900 hdr = (struct ieee80211_hdr *)skb->data;
1837 memset(&rx, 0, sizeof(rx)); 1901 memset(&rx, 0, sizeof(rx));
1838 rx.skb = skb; 1902 rx.skb = skb;
1839 rx.local = local; 1903 rx.local = local;
1840 1904
1841 rx.status = status; 1905 rx.status = status;
1842 rx.rate = rate; 1906 rx.rate = rate;
1843 rx.fc = le16_to_cpu(hdr->frame_control);
1844 type = rx.fc & IEEE80211_FCTL_FTYPE;
1845 1907
1846 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1908 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
1847 local->dot11ReceivedFragmentCount++; 1909 local->dot11ReceivedFragmentCount++;
1848 1910
1849 rx.sta = sta_info_get(local, hdr->addr2); 1911 rx.sta = sta_info_get(local, hdr->addr2);
@@ -1857,7 +1919,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1857 return; 1919 return;
1858 } 1920 }
1859 1921
1860 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) 1922 if (unlikely(local->sw_scanning || local->hw_scanning))
1861 rx.flags |= IEEE80211_RX_IN_SCAN; 1923 rx.flags |= IEEE80211_RX_IN_SCAN;
1862 1924
1863 ieee80211_parse_qos(&rx); 1925 ieee80211_parse_qos(&rx);
@@ -1869,7 +1931,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1869 if (!netif_running(sdata->dev)) 1931 if (!netif_running(sdata->dev))
1870 continue; 1932 continue;
1871 1933
1872 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) 1934 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
1873 continue; 1935 continue;
1874 1936
1875 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 1937 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
@@ -1904,14 +1966,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1904 prev->dev->name); 1966 prev->dev->name);
1905 continue; 1967 continue;
1906 } 1968 }
1907 rx.fc = le16_to_cpu(hdr->frame_control);
1908 ieee80211_invoke_rx_handlers(prev, &rx, skb_new); 1969 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1909 prev = sdata; 1970 prev = sdata;
1910 } 1971 }
1911 if (prev) { 1972 if (prev)
1912 rx.fc = le16_to_cpu(hdr->frame_control);
1913 ieee80211_invoke_rx_handlers(prev, &rx, skb); 1973 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1914 } else 1974 else
1915 dev_kfree_skb(skb); 1975 dev_kfree_skb(skb);
1916} 1976}
1917 1977
@@ -2080,7 +2140,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2080 /* if this mpdu is fragmented - terminate rx aggregation session */ 2140 /* if this mpdu is fragmented - terminate rx aggregation session */
2081 sc = le16_to_cpu(hdr->seq_ctrl); 2141 sc = le16_to_cpu(hdr->seq_ctrl);
2082 if (sc & IEEE80211_SCTL_FRAG) { 2142 if (sc & IEEE80211_SCTL_FRAG) {
2083 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 2143 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2084 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 2144 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2085 ret = 1; 2145 ret = 1;
2086 goto end_reorder; 2146 goto end_reorder;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
new file mode 100644
index 000000000000..8e6685e7ae85
--- /dev/null
+++ b/net/mac80211/scan.c
@@ -0,0 +1,937 @@
1/*
2 * Scanning implementation
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15/* TODO:
16 * order BSS list by RSSI(?) ("quality of AP")
17 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
18 * SSID)
19 */
20
21#include <linux/wireless.h>
22#include <linux/if_arp.h>
23#include <net/mac80211.h>
24#include <net/iw_handler.h>
25
26#include "ieee80211_i.h"
27#include "mesh.h"
28
29#define IEEE80211_PROBE_DELAY (HZ / 33)
30#define IEEE80211_CHANNEL_TIME (HZ / 33)
31#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
32
33void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
34{
35 spin_lock_init(&local->bss_lock);
36 INIT_LIST_HEAD(&local->bss_list);
37}
38
39void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
40{
41 struct ieee80211_bss *bss, *tmp;
42
43 list_for_each_entry_safe(bss, tmp, &local->bss_list, list)
44 ieee80211_rx_bss_put(local, bss);
45}
46
47struct ieee80211_bss *
48ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
49 u8 *ssid, u8 ssid_len)
50{
51 struct ieee80211_bss *bss;
52
53 spin_lock_bh(&local->bss_lock);
54 bss = local->bss_hash[STA_HASH(bssid)];
55 while (bss) {
56 if (!bss_mesh_cfg(bss) &&
57 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
58 bss->freq == freq &&
59 bss->ssid_len == ssid_len &&
60 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
61 atomic_inc(&bss->users);
62 break;
63 }
64 bss = bss->hnext;
65 }
66 spin_unlock_bh(&local->bss_lock);
67 return bss;
68}
69
70/* Caller must hold local->bss_lock */
71static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local,
72 struct ieee80211_bss *bss)
73{
74 u8 hash_idx;
75
76 if (bss_mesh_cfg(bss))
77 hash_idx = mesh_id_hash(bss_mesh_id(bss),
78 bss_mesh_id_len(bss));
79 else
80 hash_idx = STA_HASH(bss->bssid);
81
82 bss->hnext = local->bss_hash[hash_idx];
83 local->bss_hash[hash_idx] = bss;
84}
85
86/* Caller must hold local->bss_lock */
87static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
88 struct ieee80211_bss *bss)
89{
90 struct ieee80211_bss *b, *prev = NULL;
91 b = local->bss_hash[STA_HASH(bss->bssid)];
92 while (b) {
93 if (b == bss) {
94 if (!prev)
95 local->bss_hash[STA_HASH(bss->bssid)] =
96 bss->hnext;
97 else
98 prev->hnext = bss->hnext;
99 break;
100 }
101 prev = b;
102 b = b->hnext;
103 }
104}
105
106struct ieee80211_bss *
107ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
108 u8 *ssid, u8 ssid_len)
109{
110 struct ieee80211_bss *bss;
111
112 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
113 if (!bss)
114 return NULL;
115 atomic_set(&bss->users, 2);
116 memcpy(bss->bssid, bssid, ETH_ALEN);
117 bss->freq = freq;
118 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
119 memcpy(bss->ssid, ssid, ssid_len);
120 bss->ssid_len = ssid_len;
121 }
122
123 spin_lock_bh(&local->bss_lock);
124 /* TODO: order by RSSI? */
125 list_add_tail(&bss->list, &local->bss_list);
126 __ieee80211_rx_bss_hash_add(local, bss);
127 spin_unlock_bh(&local->bss_lock);
128 return bss;
129}
130
131#ifdef CONFIG_MAC80211_MESH
132static struct ieee80211_bss *
133ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
134 u8 *mesh_cfg, int freq)
135{
136 struct ieee80211_bss *bss;
137
138 spin_lock_bh(&local->bss_lock);
139 bss = local->bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
140 while (bss) {
141 if (bss_mesh_cfg(bss) &&
142 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
143 bss->freq == freq &&
144 mesh_id_len == bss->mesh_id_len &&
145 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
146 mesh_id_len))) {
147 atomic_inc(&bss->users);
148 break;
149 }
150 bss = bss->hnext;
151 }
152 spin_unlock_bh(&local->bss_lock);
153 return bss;
154}
155
156static struct ieee80211_bss *
157ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
158 u8 *mesh_cfg, int mesh_config_len, int freq)
159{
160 struct ieee80211_bss *bss;
161
162 if (mesh_config_len != MESH_CFG_LEN)
163 return NULL;
164
165 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
166 if (!bss)
167 return NULL;
168
169 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
170 if (!bss->mesh_cfg) {
171 kfree(bss);
172 return NULL;
173 }
174
175 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
176 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
177 if (!bss->mesh_id) {
178 kfree(bss->mesh_cfg);
179 kfree(bss);
180 return NULL;
181 }
182 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
183 }
184
185 atomic_set(&bss->users, 2);
186 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
187 bss->mesh_id_len = mesh_id_len;
188 bss->freq = freq;
189 spin_lock_bh(&local->bss_lock);
190 /* TODO: order by RSSI? */
191 list_add_tail(&bss->list, &local->bss_list);
192 __ieee80211_rx_bss_hash_add(local, bss);
193 spin_unlock_bh(&local->bss_lock);
194 return bss;
195}
196#endif
197
198static void ieee80211_rx_bss_free(struct ieee80211_bss *bss)
199{
200 kfree(bss->ies);
201 kfree(bss_mesh_id(bss));
202 kfree(bss_mesh_cfg(bss));
203 kfree(bss);
204}
205
206void ieee80211_rx_bss_put(struct ieee80211_local *local,
207 struct ieee80211_bss *bss)
208{
209 local_bh_disable();
210 if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) {
211 local_bh_enable();
212 return;
213 }
214
215 __ieee80211_rx_bss_hash_del(local, bss);
216 list_del(&bss->list);
217 spin_unlock_bh(&local->bss_lock);
218 ieee80211_rx_bss_free(bss);
219}
220
221struct ieee80211_bss *
222ieee80211_bss_info_update(struct ieee80211_local *local,
223 struct ieee80211_rx_status *rx_status,
224 struct ieee80211_mgmt *mgmt,
225 size_t len,
226 struct ieee802_11_elems *elems,
227 int freq, bool beacon)
228{
229 struct ieee80211_bss *bss;
230 int clen;
231
232#ifdef CONFIG_MAC80211_MESH
233 if (elems->mesh_config)
234 bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id,
235 elems->mesh_id_len, elems->mesh_config, freq);
236 else
237#endif
238 bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq,
239 elems->ssid, elems->ssid_len);
240 if (!bss) {
241#ifdef CONFIG_MAC80211_MESH
242 if (elems->mesh_config)
243 bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id,
244 elems->mesh_id_len, elems->mesh_config,
245 elems->mesh_config_len, freq);
246 else
247#endif
248 bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq,
249 elems->ssid, elems->ssid_len);
250 if (!bss)
251 return NULL;
252 } else {
253#if 0
254 /* TODO: order by RSSI? */
255 spin_lock_bh(&local->bss_lock);
256 list_move_tail(&bss->list, &local->bss_list);
257 spin_unlock_bh(&local->bss_lock);
258#endif
259 }
260
261 /* save the ERP value so that it is available at association time */
262 if (elems->erp_info && elems->erp_info_len >= 1) {
263 bss->erp_value = elems->erp_info[0];
264 bss->has_erp_value = 1;
265 }
266
267 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
268 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
269
270 if (elems->tim) {
271 struct ieee80211_tim_ie *tim_ie =
272 (struct ieee80211_tim_ie *)elems->tim;
273 bss->dtim_period = tim_ie->dtim_period;
274 }
275
276 /* set default value for buggy APs */
277 if (!elems->tim || bss->dtim_period == 0)
278 bss->dtim_period = 1;
279
280 bss->supp_rates_len = 0;
281 if (elems->supp_rates) {
282 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
283 if (clen > elems->supp_rates_len)
284 clen = elems->supp_rates_len;
285 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
286 clen);
287 bss->supp_rates_len += clen;
288 }
289 if (elems->ext_supp_rates) {
290 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
291 if (clen > elems->ext_supp_rates_len)
292 clen = elems->ext_supp_rates_len;
293 memcpy(&bss->supp_rates[bss->supp_rates_len],
294 elems->ext_supp_rates, clen);
295 bss->supp_rates_len += clen;
296 }
297
298 bss->band = rx_status->band;
299
300 bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
301 bss->last_update = jiffies;
302 bss->signal = rx_status->signal;
303 bss->noise = rx_status->noise;
304 bss->qual = rx_status->qual;
305 bss->wmm_used = elems->wmm_param || elems->wmm_info;
306
307 if (!beacon)
308 bss->last_probe_resp = jiffies;
309
310 /*
311 * For probe responses, or if we don't have any information yet,
312 * use the IEs from the beacon.
313 */
314 if (!bss->ies || !beacon) {
315 if (bss->ies == NULL || bss->ies_len < elems->total_len) {
316 kfree(bss->ies);
317 bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
318 }
319 if (bss->ies) {
320 memcpy(bss->ies, elems->ie_start, elems->total_len);
321 bss->ies_len = elems->total_len;
322 } else
323 bss->ies_len = 0;
324 }
325
326 return bss;
327}
328
329ieee80211_rx_result
330ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
331 struct ieee80211_rx_status *rx_status)
332{
333 struct ieee80211_mgmt *mgmt;
334 struct ieee80211_bss *bss;
335 u8 *elements;
336 struct ieee80211_channel *channel;
337 size_t baselen;
338 int freq;
339 __le16 fc;
340 bool presp, beacon = false;
341 struct ieee802_11_elems elems;
342
343 if (skb->len < 2)
344 return RX_DROP_UNUSABLE;
345
346 mgmt = (struct ieee80211_mgmt *) skb->data;
347 fc = mgmt->frame_control;
348
349 if (ieee80211_is_ctl(fc))
350 return RX_CONTINUE;
351
352 if (skb->len < 24)
353 return RX_DROP_MONITOR;
354
355 presp = ieee80211_is_probe_resp(fc);
356 if (presp) {
357 /* ignore ProbeResp to foreign address */
358 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
359 return RX_DROP_MONITOR;
360
361 presp = true;
362 elements = mgmt->u.probe_resp.variable;
363 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
364 } else {
365 beacon = ieee80211_is_beacon(fc);
366 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
367 elements = mgmt->u.beacon.variable;
368 }
369
370 if (!presp && !beacon)
371 return RX_CONTINUE;
372
373 if (baselen > skb->len)
374 return RX_DROP_MONITOR;
375
376 ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
377
378 if (elems.ds_params && elems.ds_params_len == 1)
379 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
380 else
381 freq = rx_status->freq;
382
383 channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
384
385 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
386 return RX_DROP_MONITOR;
387
388 bss = ieee80211_bss_info_update(sdata->local, rx_status,
389 mgmt, skb->len, &elems,
390 freq, beacon);
391 ieee80211_rx_bss_put(sdata->local, bss);
392
393 dev_kfree_skb(skb);
394 return RX_QUEUED;
395}
396
397static void ieee80211_send_nullfunc(struct ieee80211_local *local,
398 struct ieee80211_sub_if_data *sdata,
399 int powersave)
400{
401 struct sk_buff *skb;
402 struct ieee80211_hdr *nullfunc;
403 __le16 fc;
404
405 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
406 if (!skb) {
407 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
408 "frame\n", sdata->dev->name);
409 return;
410 }
411 skb_reserve(skb, local->hw.extra_tx_headroom);
412
413 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
414 memset(nullfunc, 0, 24);
415 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
416 IEEE80211_FCTL_TODS);
417 if (powersave)
418 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
419 nullfunc->frame_control = fc;
420 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
421 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
422 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
423
424 ieee80211_tx_skb(sdata, skb, 0);
425}
426
427void ieee80211_scan_completed(struct ieee80211_hw *hw)
428{
429 struct ieee80211_local *local = hw_to_local(hw);
430 struct ieee80211_sub_if_data *sdata;
431 union iwreq_data wrqu;
432
433 if (WARN_ON(!local->hw_scanning && !local->sw_scanning))
434 return;
435
436 local->last_scan_completed = jiffies;
437 memset(&wrqu, 0, sizeof(wrqu));
438
439 /*
440 * local->scan_sdata could have been NULLed by the interface
441 * down code in case we were scanning on an interface that is
442 * being taken down.
443 */
444 sdata = local->scan_sdata;
445 if (sdata)
446 wireless_send_event(sdata->dev, SIOCGIWSCAN, &wrqu, NULL);
447
448 if (local->hw_scanning) {
449 local->hw_scanning = false;
450 if (ieee80211_hw_config(local))
451 printk(KERN_DEBUG "%s: failed to restore operational "
452 "channel after scan\n", wiphy_name(local->hw.wiphy));
453
454 goto done;
455 }
456
457 local->sw_scanning = false;
458 if (ieee80211_hw_config(local))
459 printk(KERN_DEBUG "%s: failed to restore operational "
460 "channel after scan\n", wiphy_name(local->hw.wiphy));
461
462
463 netif_tx_lock_bh(local->mdev);
464 netif_addr_lock(local->mdev);
465 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
466 local->ops->configure_filter(local_to_hw(local),
467 FIF_BCN_PRBRESP_PROMISC,
468 &local->filter_flags,
469 local->mdev->mc_count,
470 local->mdev->mc_list);
471
472 netif_addr_unlock(local->mdev);
473 netif_tx_unlock_bh(local->mdev);
474
475 rcu_read_lock();
476 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
477 /* Tell AP we're back */
478 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
479 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
480 ieee80211_send_nullfunc(local, sdata, 0);
481 netif_tx_wake_all_queues(sdata->dev);
482 }
483 } else
484 netif_tx_wake_all_queues(sdata->dev);
485 }
486 rcu_read_unlock();
487
488 done:
489 ieee80211_mlme_notify_scan_completed(local);
490 ieee80211_mesh_notify_scan_completed(local);
491}
492EXPORT_SYMBOL(ieee80211_scan_completed);
493
494
495void ieee80211_scan_work(struct work_struct *work)
496{
497 struct ieee80211_local *local =
498 container_of(work, struct ieee80211_local, scan_work.work);
499 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
500 struct ieee80211_supported_band *sband;
501 struct ieee80211_channel *chan;
502 int skip;
503 unsigned long next_delay = 0;
504
505 /*
506 * Avoid re-scheduling when the sdata is going away.
507 */
508 if (!netif_running(sdata->dev))
509 return;
510
511 switch (local->scan_state) {
512 case SCAN_SET_CHANNEL:
513 /*
514 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
515 * after we successfully scanned the last channel of the last
516 * band (and the last band is supported by the hw)
517 */
518 if (local->scan_band < IEEE80211_NUM_BANDS)
519 sband = local->hw.wiphy->bands[local->scan_band];
520 else
521 sband = NULL;
522
523 /*
524 * If we are at an unsupported band and have more bands
525 * left to scan, advance to the next supported one.
526 */
527 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
528 local->scan_band++;
529 sband = local->hw.wiphy->bands[local->scan_band];
530 local->scan_channel_idx = 0;
531 }
532
533 /* if no more bands/channels left, complete scan */
534 if (!sband || local->scan_channel_idx >= sband->n_channels) {
535 ieee80211_scan_completed(local_to_hw(local));
536 return;
537 }
538 skip = 0;
539 chan = &sband->channels[local->scan_channel_idx];
540
541 if (chan->flags & IEEE80211_CHAN_DISABLED ||
542 (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
543 chan->flags & IEEE80211_CHAN_NO_IBSS))
544 skip = 1;
545
546 if (!skip) {
547 local->scan_channel = chan;
548 if (ieee80211_hw_config(local)) {
549 printk(KERN_DEBUG "%s: failed to set freq to "
550 "%d MHz for scan\n", wiphy_name(local->hw.wiphy),
551 chan->center_freq);
552 skip = 1;
553 }
554 }
555
556 /* advance state machine to next channel/band */
557 local->scan_channel_idx++;
558 if (local->scan_channel_idx >= sband->n_channels) {
559 /*
560 * scan_band may end up == IEEE80211_NUM_BANDS, but
561 * we'll catch that case above and complete the scan
562 * if that is the case.
563 */
564 local->scan_band++;
565 local->scan_channel_idx = 0;
566 }
567
568 if (skip)
569 break;
570
571 next_delay = IEEE80211_PROBE_DELAY +
572 usecs_to_jiffies(local->hw.channel_change_time);
573 local->scan_state = SCAN_SEND_PROBE;
574 break;
575 case SCAN_SEND_PROBE:
576 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
577 local->scan_state = SCAN_SET_CHANNEL;
578
579 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
580 break;
581 ieee80211_send_probe_req(sdata, NULL, local->scan_ssid,
582 local->scan_ssid_len);
583 next_delay = IEEE80211_CHANNEL_TIME;
584 break;
585 }
586
587 queue_delayed_work(local->hw.workqueue, &local->scan_work,
588 next_delay);
589}
590
591
592int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
593 u8 *ssid, size_t ssid_len)
594{
595 struct ieee80211_local *local = scan_sdata->local;
596 struct ieee80211_sub_if_data *sdata;
597
598 if (ssid_len > IEEE80211_MAX_SSID_LEN)
599 return -EINVAL;
600
601 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1)
602 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
603 * BSSID: MACAddress
604 * SSID
605 * ScanType: ACTIVE, PASSIVE
606 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
607 * a Probe frame during active scanning
608 * ChannelList
609 * MinChannelTime (>= ProbeDelay), in TU
610 * MaxChannelTime: (>= MinChannelTime), in TU
611 */
612
613 /* MLME-SCAN.confirm
614 * BSSDescriptionSet
615 * ResultCode: SUCCESS, INVALID_PARAMETERS
616 */
617
618 if (local->sw_scanning || local->hw_scanning) {
619 if (local->scan_sdata == scan_sdata)
620 return 0;
621 return -EBUSY;
622 }
623
624 if (local->ops->hw_scan) {
625 int rc;
626
627 local->hw_scanning = true;
628 rc = local->ops->hw_scan(local_to_hw(local), ssid, ssid_len);
629 if (rc) {
630 local->hw_scanning = false;
631 return rc;
632 }
633 local->scan_sdata = scan_sdata;
634 return 0;
635 }
636
637 local->sw_scanning = true;
638
639 rcu_read_lock();
640 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
641 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
642 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
643 netif_tx_stop_all_queues(sdata->dev);
644 ieee80211_send_nullfunc(local, sdata, 1);
645 }
646 } else
647 netif_tx_stop_all_queues(sdata->dev);
648 }
649 rcu_read_unlock();
650
651 if (ssid) {
652 local->scan_ssid_len = ssid_len;
653 memcpy(local->scan_ssid, ssid, ssid_len);
654 } else
655 local->scan_ssid_len = 0;
656 local->scan_state = SCAN_SET_CHANNEL;
657 local->scan_channel_idx = 0;
658 local->scan_band = IEEE80211_BAND_2GHZ;
659 local->scan_sdata = scan_sdata;
660
661 netif_addr_lock_bh(local->mdev);
662 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
663 local->ops->configure_filter(local_to_hw(local),
664 FIF_BCN_PRBRESP_PROMISC,
665 &local->filter_flags,
666 local->mdev->mc_count,
667 local->mdev->mc_list);
668 netif_addr_unlock_bh(local->mdev);
669
670 /* TODO: start scan as soon as all nullfunc frames are ACKed */
671 queue_delayed_work(local->hw.workqueue, &local->scan_work,
672 IEEE80211_CHANNEL_TIME);
673
674 return 0;
675}
676
677
678int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
679 u8 *ssid, size_t ssid_len)
680{
681 struct ieee80211_local *local = sdata->local;
682 struct ieee80211_if_sta *ifsta;
683
684 if (sdata->vif.type != NL80211_IFTYPE_STATION)
685 return ieee80211_start_scan(sdata, ssid, ssid_len);
686
687 /*
688 * STA has a state machine that might need to defer scanning
689 * while it's trying to associate/authenticate, therefore we
690 * queue it up to the state machine in that case.
691 */
692
693 if (local->sw_scanning || local->hw_scanning) {
694 if (local->scan_sdata == sdata)
695 return 0;
696 return -EBUSY;
697 }
698
699 ifsta = &sdata->u.sta;
700
701 ifsta->scan_ssid_len = ssid_len;
702 if (ssid_len)
703 memcpy(ifsta->scan_ssid, ssid, ssid_len);
704 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
705 queue_work(local->hw.workqueue, &ifsta->work);
706
707 return 0;
708}
709
710
711static void ieee80211_scan_add_ies(struct iw_request_info *info,
712 struct ieee80211_bss *bss,
713 char **current_ev, char *end_buf)
714{
715 u8 *pos, *end, *next;
716 struct iw_event iwe;
717
718 if (bss == NULL || bss->ies == NULL)
719 return;
720
721 /*
722 * If needed, fragment the IEs buffer (at IE boundaries) into short
723 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
724 */
725 pos = bss->ies;
726 end = pos + bss->ies_len;
727
728 while (end - pos > IW_GENERIC_IE_MAX) {
729 next = pos + 2 + pos[1];
730 while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
731 next = next + 2 + next[1];
732
733 memset(&iwe, 0, sizeof(iwe));
734 iwe.cmd = IWEVGENIE;
735 iwe.u.data.length = next - pos;
736 *current_ev = iwe_stream_add_point(info, *current_ev,
737 end_buf, &iwe, pos);
738
739 pos = next;
740 }
741
742 if (end > pos) {
743 memset(&iwe, 0, sizeof(iwe));
744 iwe.cmd = IWEVGENIE;
745 iwe.u.data.length = end - pos;
746 *current_ev = iwe_stream_add_point(info, *current_ev,
747 end_buf, &iwe, pos);
748 }
749}
750
751
752static char *
753ieee80211_scan_result(struct ieee80211_local *local,
754 struct iw_request_info *info,
755 struct ieee80211_bss *bss,
756 char *current_ev, char *end_buf)
757{
758 struct iw_event iwe;
759 char *buf;
760
761 if (time_after(jiffies,
762 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
763 return current_ev;
764
765 memset(&iwe, 0, sizeof(iwe));
766 iwe.cmd = SIOCGIWAP;
767 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
768 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
769 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
770 IW_EV_ADDR_LEN);
771
772 memset(&iwe, 0, sizeof(iwe));
773 iwe.cmd = SIOCGIWESSID;
774 if (bss_mesh_cfg(bss)) {
775 iwe.u.data.length = bss_mesh_id_len(bss);
776 iwe.u.data.flags = 1;
777 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
778 &iwe, bss_mesh_id(bss));
779 } else {
780 iwe.u.data.length = bss->ssid_len;
781 iwe.u.data.flags = 1;
782 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
783 &iwe, bss->ssid);
784 }
785
786 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
787 || bss_mesh_cfg(bss)) {
788 memset(&iwe, 0, sizeof(iwe));
789 iwe.cmd = SIOCGIWMODE;
790 if (bss_mesh_cfg(bss))
791 iwe.u.mode = IW_MODE_MESH;
792 else if (bss->capability & WLAN_CAPABILITY_ESS)
793 iwe.u.mode = IW_MODE_MASTER;
794 else
795 iwe.u.mode = IW_MODE_ADHOC;
796 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
797 &iwe, IW_EV_UINT_LEN);
798 }
799
800 memset(&iwe, 0, sizeof(iwe));
801 iwe.cmd = SIOCGIWFREQ;
802 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
803 iwe.u.freq.e = 0;
804 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
805 IW_EV_FREQ_LEN);
806
807 memset(&iwe, 0, sizeof(iwe));
808 iwe.cmd = SIOCGIWFREQ;
809 iwe.u.freq.m = bss->freq;
810 iwe.u.freq.e = 6;
811 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
812 IW_EV_FREQ_LEN);
813 memset(&iwe, 0, sizeof(iwe));
814 iwe.cmd = IWEVQUAL;
815 iwe.u.qual.qual = bss->qual;
816 iwe.u.qual.level = bss->signal;
817 iwe.u.qual.noise = bss->noise;
818 iwe.u.qual.updated = local->wstats_flags;
819 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
820 IW_EV_QUAL_LEN);
821
822 memset(&iwe, 0, sizeof(iwe));
823 iwe.cmd = SIOCGIWENCODE;
824 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
825 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
826 else
827 iwe.u.data.flags = IW_ENCODE_DISABLED;
828 iwe.u.data.length = 0;
829 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
830 &iwe, "");
831
832 ieee80211_scan_add_ies(info, bss, &current_ev, end_buf);
833
834 if (bss->supp_rates_len > 0) {
835 /* display all supported rates in readable format */
836 char *p = current_ev + iwe_stream_lcp_len(info);
837 int i;
838
839 memset(&iwe, 0, sizeof(iwe));
840 iwe.cmd = SIOCGIWRATE;
841 /* Those two flags are ignored... */
842 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
843
844 for (i = 0; i < bss->supp_rates_len; i++) {
845 iwe.u.bitrate.value = ((bss->supp_rates[i] &
846 0x7f) * 500000);
847 p = iwe_stream_add_value(info, current_ev, p,
848 end_buf, &iwe, IW_EV_PARAM_LEN);
849 }
850 current_ev = p;
851 }
852
853 buf = kmalloc(30, GFP_ATOMIC);
854 if (buf) {
855 memset(&iwe, 0, sizeof(iwe));
856 iwe.cmd = IWEVCUSTOM;
857 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
858 iwe.u.data.length = strlen(buf);
859 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
860 &iwe, buf);
861 memset(&iwe, 0, sizeof(iwe));
862 iwe.cmd = IWEVCUSTOM;
863 sprintf(buf, " Last beacon: %dms ago",
864 jiffies_to_msecs(jiffies - bss->last_update));
865 iwe.u.data.length = strlen(buf);
866 current_ev = iwe_stream_add_point(info, current_ev,
867 end_buf, &iwe, buf);
868 kfree(buf);
869 }
870
871 if (bss_mesh_cfg(bss)) {
872 u8 *cfg = bss_mesh_cfg(bss);
873 buf = kmalloc(50, GFP_ATOMIC);
874 if (buf) {
875 memset(&iwe, 0, sizeof(iwe));
876 iwe.cmd = IWEVCUSTOM;
877 sprintf(buf, "Mesh network (version %d)", cfg[0]);
878 iwe.u.data.length = strlen(buf);
879 current_ev = iwe_stream_add_point(info, current_ev,
880 end_buf,
881 &iwe, buf);
882 sprintf(buf, "Path Selection Protocol ID: "
883 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
884 cfg[4]);
885 iwe.u.data.length = strlen(buf);
886 current_ev = iwe_stream_add_point(info, current_ev,
887 end_buf,
888 &iwe, buf);
889 sprintf(buf, "Path Selection Metric ID: "
890 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
891 cfg[8]);
892 iwe.u.data.length = strlen(buf);
893 current_ev = iwe_stream_add_point(info, current_ev,
894 end_buf,
895 &iwe, buf);
896 sprintf(buf, "Congestion Control Mode ID: "
897 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
898 cfg[11], cfg[12]);
899 iwe.u.data.length = strlen(buf);
900 current_ev = iwe_stream_add_point(info, current_ev,
901 end_buf,
902 &iwe, buf);
903 sprintf(buf, "Channel Precedence: "
904 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
905 cfg[15], cfg[16]);
906 iwe.u.data.length = strlen(buf);
907 current_ev = iwe_stream_add_point(info, current_ev,
908 end_buf,
909 &iwe, buf);
910 kfree(buf);
911 }
912 }
913
914 return current_ev;
915}
916
917
918int ieee80211_scan_results(struct ieee80211_local *local,
919 struct iw_request_info *info,
920 char *buf, size_t len)
921{
922 char *current_ev = buf;
923 char *end_buf = buf + len;
924 struct ieee80211_bss *bss;
925
926 spin_lock_bh(&local->bss_lock);
927 list_for_each_entry(bss, &local->bss_list, list) {
928 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
929 spin_unlock_bh(&local->bss_lock);
930 return -E2BIG;
931 }
932 current_ev = ieee80211_scan_result(local, info, bss,
933 current_ev, end_buf);
934 }
935 spin_unlock_bh(&local->bss_lock);
936 return current_ev - buf;
937}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
new file mode 100644
index 000000000000..f72bad636d8e
--- /dev/null
+++ b/net/mac80211/spectmgmt.c
@@ -0,0 +1,86 @@
1/*
2 * spectrum management
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation
10 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/ieee80211.h>
18#include <net/wireless.h>
19#include <net/mac80211.h>
20#include "ieee80211_i.h"
21#include "sta_info.h"
22#include "wme.h"
23
24static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
25 struct ieee80211_msrment_ie *request_ie,
26 const u8 *da, const u8 *bssid,
27 u8 dialog_token)
28{
29 struct ieee80211_local *local = sdata->local;
30 struct sk_buff *skb;
31 struct ieee80211_mgmt *msr_report;
32
33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
34 sizeof(struct ieee80211_msrment_ie));
35
36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name);
39 return;
40 }
41
42 skb_reserve(skb, local->hw.extra_tx_headroom);
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION);
50
51 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
52 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
53 msr_report->u.action.u.measurement.action_code =
54 WLAN_ACTION_SPCT_MSR_RPRT;
55 msr_report->u.action.u.measurement.dialog_token = dialog_token;
56
57 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
58 msr_report->u.action.u.measurement.length =
59 sizeof(struct ieee80211_msrment_ie);
60
61 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
62 sizeof(struct ieee80211_msrment_ie));
63 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
64 msr_report->u.action.u.measurement.msr_elem.mode |=
65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
67
68 ieee80211_tx_skb(sdata, skb, 0);
69}
70
71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
72 struct ieee80211_mgmt *mgmt,
73 size_t len)
74{
75 /*
76 * Ignoring measurement request is spec violation.
77 * Mandatory measurements must be reported optional
78 * measurements might be refused or reported incapable
79 * For now just refuse
80 * TODO: Answer basic measurement as unmeasured
81 */
82 ieee80211_send_refuse_measurement_request(sdata,
83 &mgmt->u.action.u.measurement.msr_elem,
84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token);
86}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f2ba653b9d69..d9774ac2e0f7 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -73,11 +73,11 @@ static int sta_info_hash_del(struct ieee80211_local *local,
73{ 73{
74 struct sta_info *s; 74 struct sta_info *s;
75 75
76 s = local->sta_hash[STA_HASH(sta->addr)]; 76 s = local->sta_hash[STA_HASH(sta->sta.addr)];
77 if (!s) 77 if (!s)
78 return -ENOENT; 78 return -ENOENT;
79 if (s == sta) { 79 if (s == sta) {
80 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], 80 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
81 s->hnext); 81 s->hnext);
82 return 0; 82 return 0;
83 } 83 }
@@ -94,13 +94,13 @@ static int sta_info_hash_del(struct ieee80211_local *local,
94 94
95/* protected by RCU */ 95/* protected by RCU */
96static struct sta_info *__sta_info_find(struct ieee80211_local *local, 96static struct sta_info *__sta_info_find(struct ieee80211_local *local,
97 u8 *addr) 97 const u8 *addr)
98{ 98{
99 struct sta_info *sta; 99 struct sta_info *sta;
100 100
101 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 101 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
102 while (sta) { 102 while (sta) {
103 if (compare_ether_addr(sta->addr, addr) == 0) 103 if (compare_ether_addr(sta->sta.addr, addr) == 0)
104 break; 104 break;
105 sta = rcu_dereference(sta->hnext); 105 sta = rcu_dereference(sta->hnext);
106 } 106 }
@@ -151,7 +151,7 @@ static void __sta_info_free(struct ieee80211_local *local,
151 151
152#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 152#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
153 printk(KERN_DEBUG "%s: Destroyed STA %s\n", 153 printk(KERN_DEBUG "%s: Destroyed STA %s\n",
154 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); 154 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
155#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 155#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
156 156
157 kfree(sta); 157 kfree(sta);
@@ -219,8 +219,8 @@ void sta_info_destroy(struct sta_info *sta)
219static void sta_info_hash_add(struct ieee80211_local *local, 219static void sta_info_hash_add(struct ieee80211_local *local,
220 struct sta_info *sta) 220 struct sta_info *sta)
221{ 221{
222 sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; 222 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
223 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta); 223 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
224} 224}
225 225
226struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 226struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
@@ -231,14 +231,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
231 int i; 231 int i;
232 DECLARE_MAC_BUF(mbuf); 232 DECLARE_MAC_BUF(mbuf);
233 233
234 sta = kzalloc(sizeof(*sta), gfp); 234 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
235 if (!sta) 235 if (!sta)
236 return NULL; 236 return NULL;
237 237
238 spin_lock_init(&sta->lock); 238 spin_lock_init(&sta->lock);
239 spin_lock_init(&sta->flaglock); 239 spin_lock_init(&sta->flaglock);
240 240
241 memcpy(sta->addr, addr, ETH_ALEN); 241 memcpy(sta->sta.addr, addr, ETH_ALEN);
242 sta->local = local; 242 sta->local = local;
243 sta->sdata = sdata; 243 sta->sdata = sdata;
244 244
@@ -271,7 +271,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
271 271
272#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 272#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
273 printk(KERN_DEBUG "%s: Allocated STA %s\n", 273 printk(KERN_DEBUG "%s: Allocated STA %s\n",
274 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); 274 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
275#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 275#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
276 276
277#ifdef CONFIG_MAC80211_MESH 277#ifdef CONFIG_MAC80211_MESH
@@ -300,15 +300,15 @@ int sta_info_insert(struct sta_info *sta)
300 goto out_free; 300 goto out_free;
301 } 301 }
302 302
303 if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0 || 303 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 ||
304 is_multicast_ether_addr(sta->addr))) { 304 is_multicast_ether_addr(sta->sta.addr))) {
305 err = -EINVAL; 305 err = -EINVAL;
306 goto out_free; 306 goto out_free;
307 } 307 }
308 308
309 spin_lock_irqsave(&local->sta_lock, flags); 309 spin_lock_irqsave(&local->sta_lock, flags);
310 /* check if STA exists already */ 310 /* check if STA exists already */
311 if (__sta_info_find(local, sta->addr)) { 311 if (__sta_info_find(local, sta->sta.addr)) {
312 spin_unlock_irqrestore(&local->sta_lock, flags); 312 spin_unlock_irqrestore(&local->sta_lock, flags);
313 err = -EEXIST; 313 err = -EEXIST;
314 goto out_free; 314 goto out_free;
@@ -319,18 +319,18 @@ int sta_info_insert(struct sta_info *sta)
319 319
320 /* notify driver */ 320 /* notify driver */
321 if (local->ops->sta_notify) { 321 if (local->ops->sta_notify) {
322 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 322 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
323 sdata = container_of(sdata->bss, 323 sdata = container_of(sdata->bss,
324 struct ieee80211_sub_if_data, 324 struct ieee80211_sub_if_data,
325 u.ap); 325 u.ap);
326 326
327 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 327 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
328 STA_NOTIFY_ADD, sta->addr); 328 STA_NOTIFY_ADD, &sta->sta);
329 } 329 }
330 330
331#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 331#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
332 printk(KERN_DEBUG "%s: Inserted STA %s\n", 332 printk(KERN_DEBUG "%s: Inserted STA %s\n",
333 wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); 333 wiphy_name(local->hw.wiphy), print_mac(mac, sta->sta.addr));
334#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 334#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
335 335
336 spin_unlock_irqrestore(&local->sta_lock, flags); 336 spin_unlock_irqrestore(&local->sta_lock, flags);
@@ -379,11 +379,12 @@ static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
379{ 379{
380 BUG_ON(!bss); 380 BUG_ON(!bss);
381 381
382 __bss_tim_set(bss, sta->aid); 382 __bss_tim_set(bss, sta->sta.aid);
383 383
384 if (sta->local->ops->set_tim) { 384 if (sta->local->ops->set_tim) {
385 sta->local->tim_in_locked_section = true; 385 sta->local->tim_in_locked_section = true;
386 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); 386 sta->local->ops->set_tim(local_to_hw(sta->local),
387 &sta->sta, true);
387 sta->local->tim_in_locked_section = false; 388 sta->local->tim_in_locked_section = false;
388 } 389 }
389} 390}
@@ -404,11 +405,12 @@ static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
404{ 405{
405 BUG_ON(!bss); 406 BUG_ON(!bss);
406 407
407 __bss_tim_clear(bss, sta->aid); 408 __bss_tim_clear(bss, sta->sta.aid);
408 409
409 if (sta->local->ops->set_tim) { 410 if (sta->local->ops->set_tim) {
410 sta->local->tim_in_locked_section = true; 411 sta->local->tim_in_locked_section = true;
411 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); 412 sta->local->ops->set_tim(local_to_hw(sta->local),
413 &sta->sta, false);
412 sta->local->tim_in_locked_section = false; 414 sta->local->tim_in_locked_section = false;
413 } 415 }
414} 416}
@@ -424,7 +426,7 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
424 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 426 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
425} 427}
426 428
427void __sta_info_unlink(struct sta_info **sta) 429static void __sta_info_unlink(struct sta_info **sta)
428{ 430{
429 struct ieee80211_local *local = (*sta)->local; 431 struct ieee80211_local *local = (*sta)->local;
430 struct ieee80211_sub_if_data *sdata = (*sta)->sdata; 432 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
@@ -456,13 +458,13 @@ void __sta_info_unlink(struct sta_info **sta)
456 local->num_sta--; 458 local->num_sta--;
457 459
458 if (local->ops->sta_notify) { 460 if (local->ops->sta_notify) {
459 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 461 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
460 sdata = container_of(sdata->bss, 462 sdata = container_of(sdata->bss,
461 struct ieee80211_sub_if_data, 463 struct ieee80211_sub_if_data,
462 u.ap); 464 u.ap);
463 465
464 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 466 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
465 STA_NOTIFY_REMOVE, (*sta)->addr); 467 STA_NOTIFY_REMOVE, &(*sta)->sta);
466 } 468 }
467 469
468 if (ieee80211_vif_is_mesh(&sdata->vif)) { 470 if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -474,7 +476,7 @@ void __sta_info_unlink(struct sta_info **sta)
474 476
475#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 477#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
476 printk(KERN_DEBUG "%s: Removed STA %s\n", 478 printk(KERN_DEBUG "%s: Removed STA %s\n",
477 wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr)); 479 wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->sta.addr));
478#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 480#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
479 481
480 /* 482 /*
@@ -570,7 +572,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
570 local->total_ps_buffered--; 572 local->total_ps_buffered--;
571#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 573#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
572 printk(KERN_DEBUG "Buffered frame expired (STA " 574 printk(KERN_DEBUG "Buffered frame expired (STA "
573 "%s)\n", print_mac(mac, sta->addr)); 575 "%s)\n", print_mac(mac, sta->sta.addr));
574#endif 576#endif
575 dev_kfree_skb(skb); 577 dev_kfree_skb(skb);
576 578
@@ -802,3 +804,40 @@ void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
802 schedule_work(&local->sta_flush_work); 804 schedule_work(&local->sta_flush_work);
803 spin_unlock_irqrestore(&local->sta_lock, flags); 805 spin_unlock_irqrestore(&local->sta_lock, flags);
804} 806}
807
808void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
809 unsigned long exp_time)
810{
811 struct ieee80211_local *local = sdata->local;
812 struct sta_info *sta, *tmp;
813 LIST_HEAD(tmp_list);
814 DECLARE_MAC_BUF(mac);
815 unsigned long flags;
816
817 spin_lock_irqsave(&local->sta_lock, flags);
818 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
819 if (time_after(jiffies, sta->last_rx + exp_time)) {
820#ifdef CONFIG_MAC80211_IBSS_DEBUG
821 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
822 sdata->dev->name, print_mac(mac, sta->sta.addr));
823#endif
824 __sta_info_unlink(&sta);
825 if (sta)
826 list_add(&sta->list, &tmp_list);
827 }
828 spin_unlock_irqrestore(&local->sta_lock, flags);
829
830 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
831 sta_info_destroy(sta);
832}
833
834struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
835 const u8 *addr)
836{
837 struct sta_info *sta = __sta_info_find(hw_to_local(hw), addr);
838
839 if (!sta)
840 return NULL;
841 return &sta->sta;
842}
843EXPORT_SYMBOL(ieee80211_find_sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 109db787ccb7..daedfa9e1c63 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -167,8 +167,6 @@ struct sta_ampdu_mlme {
167 * @lock: used for locking all fields that require locking, see comments 167 * @lock: used for locking all fields that require locking, see comments
168 * in the header file. 168 * in the header file.
169 * @flaglock: spinlock for flags accesses 169 * @flaglock: spinlock for flags accesses
170 * @ht_info: HT capabilities of this STA
171 * @supp_rates: Bitmap of supported rates (per band)
172 * @addr: MAC address of this STA 170 * @addr: MAC address of this STA
173 * @aid: STA's unique AID (1..2007, 0 = not assigned yet), 171 * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
174 * only used in AP (and IBSS?) mode 172 * only used in AP (and IBSS?) mode
@@ -195,15 +193,12 @@ struct sta_ampdu_mlme {
195 * @tx_filtered_count: TBD 193 * @tx_filtered_count: TBD
196 * @tx_retry_failed: TBD 194 * @tx_retry_failed: TBD
197 * @tx_retry_count: TBD 195 * @tx_retry_count: TBD
198 * @tx_num_consecutive_failures: TBD
199 * @tx_num_mpdu_ok: TBD
200 * @tx_num_mpdu_fail: TBD
201 * @fail_avg: moving percentage of failed MSDUs 196 * @fail_avg: moving percentage of failed MSDUs
202 * @tx_packets: number of RX/TX MSDUs 197 * @tx_packets: number of RX/TX MSDUs
203 * @tx_bytes: TBD 198 * @tx_bytes: TBD
204 * @tx_fragments: number of transmitted MPDUs 199 * @tx_fragments: number of transmitted MPDUs
205 * @txrate_idx: TBD 200 * @last_txrate_idx: Index of the last used transmit rate
206 * @last_txrate_idx: TBD 201 * @tid_seq: TBD
207 * @wme_tx_queue: TBD 202 * @wme_tx_queue: TBD
208 * @ampdu_mlme: TBD 203 * @ampdu_mlme: TBD
209 * @timer_to_tid: identity mapping to ID timers 204 * @timer_to_tid: identity mapping to ID timers
@@ -217,6 +212,7 @@ struct sta_ampdu_mlme {
217 * @plink_timeout: TBD 212 * @plink_timeout: TBD
218 * @plink_timer: TBD 213 * @plink_timer: TBD
219 * @debugfs: debug filesystem info 214 * @debugfs: debug filesystem info
215 * @sta: station information we share with the driver
220 */ 216 */
221struct sta_info { 217struct sta_info {
222 /* General information, mostly static */ 218 /* General information, mostly static */
@@ -229,10 +225,7 @@ struct sta_info {
229 void *rate_ctrl_priv; 225 void *rate_ctrl_priv;
230 spinlock_t lock; 226 spinlock_t lock;
231 spinlock_t flaglock; 227 spinlock_t flaglock;
232 struct ieee80211_ht_info ht_info; 228
233 u64 supp_rates[IEEE80211_NUM_BANDS];
234 u8 addr[ETH_ALEN];
235 u16 aid;
236 u16 listen_interval; 229 u16 listen_interval;
237 230
238 /* 231 /*
@@ -272,10 +265,6 @@ struct sta_info {
272 /* Updated from TX status path only, no locking requirements */ 265 /* Updated from TX status path only, no locking requirements */
273 unsigned long tx_filtered_count; 266 unsigned long tx_filtered_count;
274 unsigned long tx_retry_failed, tx_retry_count; 267 unsigned long tx_retry_failed, tx_retry_count;
275 /* TODO: update in generic code not rate control? */
276 u32 tx_num_consecutive_failures;
277 u32 tx_num_mpdu_ok;
278 u32 tx_num_mpdu_fail;
279 /* moving percentage of failed MSDUs */ 268 /* moving percentage of failed MSDUs */
280 unsigned int fail_avg; 269 unsigned int fail_avg;
281 270
@@ -283,8 +272,7 @@ struct sta_info {
283 unsigned long tx_packets; 272 unsigned long tx_packets;
284 unsigned long tx_bytes; 273 unsigned long tx_bytes;
285 unsigned long tx_fragments; 274 unsigned long tx_fragments;
286 int txrate_idx; 275 unsigned int last_txrate_idx;
287 int last_txrate_idx;
288 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; 276 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
289#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 277#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
290 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; 278 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
@@ -326,6 +314,9 @@ struct sta_info {
326 struct dentry *agg_status; 314 struct dentry *agg_status;
327 } debugfs; 315 } debugfs;
328#endif 316#endif
317
318 /* keep last! */
319 struct ieee80211_sta sta;
329}; 320};
330 321
331static inline enum plink_state sta_plink_state(struct sta_info *sta) 322static inline enum plink_state sta_plink_state(struct sta_info *sta)
@@ -451,7 +442,6 @@ int sta_info_insert(struct sta_info *sta);
451 * has already unlinked it. 442 * has already unlinked it.
452 */ 443 */
453void sta_info_unlink(struct sta_info **sta); 444void sta_info_unlink(struct sta_info **sta);
454void __sta_info_unlink(struct sta_info **sta);
455 445
456void sta_info_destroy(struct sta_info *sta); 446void sta_info_destroy(struct sta_info *sta);
457void sta_info_set_tim_bit(struct sta_info *sta); 447void sta_info_set_tim_bit(struct sta_info *sta);
@@ -463,5 +453,7 @@ void sta_info_stop(struct ieee80211_local *local);
463int sta_info_flush(struct ieee80211_local *local, 453int sta_info_flush(struct ieee80211_local *local,
464 struct ieee80211_sub_if_data *sdata); 454 struct ieee80211_sub_if_data *sdata);
465void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata); 455void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
456void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
457 unsigned long exp_time);
466 458
467#endif /* STA_INFO_H */ 459#endif /* STA_INFO_H */
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 995f7af3d25e..34b32bc8f609 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -304,7 +304,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
305 u8 bcast[ETH_ALEN] = 305 u8 bcast[ETH_ALEN] =
306 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 306 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
307 u8 *sta_addr = key->sta->addr; 307 u8 *sta_addr = key->sta->sta.addr;
308 308
309 if (is_multicast_ether_addr(ra)) 309 if (is_multicast_ether_addr(ra))
310 sta_addr = bcast; 310 sta_addr = bcast;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4788f7b91f49..20d683641b42 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -38,43 +38,6 @@
38 38
39/* misc utils */ 39/* misc utils */
40 40
41#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
42static void ieee80211_dump_frame(const char *ifname, const char *title,
43 const struct sk_buff *skb)
44{
45 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
46 unsigned int hdrlen;
47 DECLARE_MAC_BUF(mac);
48
49 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
50 if (skb->len < 4) {
51 printk("\n");
52 return;
53 }
54
55 hdrlen = ieee80211_hdrlen(hdr->frame_control);
56 if (hdrlen > skb->len)
57 hdrlen = skb->len;
58 if (hdrlen >= 4)
59 printk(" FC=0x%04x DUR=0x%04x",
60 le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id));
61 if (hdrlen >= 10)
62 printk(" A1=%s", print_mac(mac, hdr->addr1));
63 if (hdrlen >= 16)
64 printk(" A2=%s", print_mac(mac, hdr->addr2));
65 if (hdrlen >= 24)
66 printk(" A3=%s", print_mac(mac, hdr->addr3));
67 if (hdrlen >= 30)
68 printk(" A4=%s", print_mac(mac, hdr->addr4));
69 printk("\n");
70}
71#else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
72static inline void ieee80211_dump_frame(const char *ifname, const char *title,
73 struct sk_buff *skb)
74{
75}
76#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
77
78static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 41static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
79 int next_frag_len) 42 int next_frag_len)
80{ 43{
@@ -82,6 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82 struct ieee80211_rate *txrate; 45 struct ieee80211_rate *txrate;
83 struct ieee80211_local *local = tx->local; 46 struct ieee80211_local *local = tx->local;
84 struct ieee80211_supported_band *sband; 47 struct ieee80211_supported_band *sband;
48 struct ieee80211_hdr *hdr;
85 49
86 sband = local->hw.wiphy->bands[tx->channel->band]; 50 sband = local->hw.wiphy->bands[tx->channel->band];
87 txrate = &sband->bitrates[tx->rate_idx]; 51 txrate = &sband->bitrates[tx->rate_idx];
@@ -107,10 +71,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
107 * at the highest possible rate belonging to the PHY rates in the 71 * at the highest possible rate belonging to the PHY rates in the
108 * BSSBasicRateSet 72 * BSSBasicRateSet
109 */ 73 */
110 74 hdr = (struct ieee80211_hdr *)tx->skb->data;
111 if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { 75 if (ieee80211_is_ctl(hdr->frame_control)) {
112 /* TODO: These control frames are not currently sent by 76 /* TODO: These control frames are not currently sent by
113 * 80211.o, but should they be implemented, this function 77 * mac80211, but should they be implemented, this function
114 * needs to be updated to support duration field calculation. 78 * needs to be updated to support duration field calculation.
115 * 79 *
116 * RTS: time needed to transmit pending data/mgmt frame plus 80 * RTS: time needed to transmit pending data/mgmt frame plus
@@ -152,7 +116,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
152 if (r->bitrate > txrate->bitrate) 116 if (r->bitrate > txrate->bitrate)
153 break; 117 break;
154 118
155 if (tx->sdata->basic_rates & BIT(i)) 119 if (tx->sdata->bss_conf.basic_rates & BIT(i))
156 rate = r->bitrate; 120 rate = r->bitrate;
157 121
158 switch (sband->band) { 122 switch (sband->band) {
@@ -213,21 +177,19 @@ static int inline is_ieee80211_device(struct net_device *dev,
213static ieee80211_tx_result debug_noinline 177static ieee80211_tx_result debug_noinline
214ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 178ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
215{ 179{
216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 180
217 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
218#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 182 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
220 u32 sta_flags; 183 u32 sta_flags;
221 184
222 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 185 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
223 return TX_CONTINUE; 186 return TX_CONTINUE;
224 187
225 if (unlikely(tx->local->sta_sw_scanning) && 188 if (unlikely(tx->local->sw_scanning) &&
226 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 189 !ieee80211_is_probe_req(hdr->frame_control))
227 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
228 return TX_DROP; 190 return TX_DROP;
229 191
230 if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) 192 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
231 return TX_CONTINUE; 193 return TX_CONTINUE;
232 194
233 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 195 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
@@ -237,8 +199,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
237 199
238 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 200 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
239 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 201 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
240 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 202 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
241 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 203 ieee80211_is_data(hdr->frame_control))) {
242#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 204#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
243 DECLARE_MAC_BUF(mac); 205 DECLARE_MAC_BUF(mac);
244 printk(KERN_DEBUG "%s: dropped data frame to not " 206 printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -249,9 +211,9 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
249 return TX_DROP; 211 return TX_DROP;
250 } 212 }
251 } else { 213 } else {
252 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 214 if (unlikely(ieee80211_is_data(hdr->frame_control) &&
253 tx->local->num_sta == 0 && 215 tx->local->num_sta == 0 &&
254 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { 216 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
255 /* 217 /*
256 * No associated STAs - no need to send multicast 218 * No associated STAs - no need to send multicast
257 * frames. 219 * frames.
@@ -282,7 +244,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
282 244
283 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 245 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
284 struct ieee80211_if_ap *ap; 246 struct ieee80211_if_ap *ap;
285 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 247 if (sdata->vif.type != NL80211_IFTYPE_AP)
286 continue; 248 continue;
287 ap = &sdata->u.ap; 249 ap = &sdata->u.ap;
288 skb = skb_dequeue(&ap->ps_bc_buf); 250 skb = skb_dequeue(&ap->ps_bc_buf);
@@ -315,6 +277,7 @@ static ieee80211_tx_result
315ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 277ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
316{ 278{
317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
280 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
318 281
319 /* 282 /*
320 * broadcast/multicast frame 283 * broadcast/multicast frame
@@ -329,7 +292,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
329 return TX_CONTINUE; 292 return TX_CONTINUE;
330 293
331 /* no buffering for ordered frames */ 294 /* no buffering for ordered frames */
332 if (tx->fc & IEEE80211_FCTL_ORDER) 295 if (ieee80211_has_order(hdr->frame_control))
333 return TX_CONTINUE; 296 return TX_CONTINUE;
334 297
335 /* no stations in PS mode */ 298 /* no stations in PS mode */
@@ -367,12 +330,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367{ 330{
368 struct sta_info *sta = tx->sta; 331 struct sta_info *sta = tx->sta;
369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
370 u32 staflags; 334 u32 staflags;
371 DECLARE_MAC_BUF(mac); 335 DECLARE_MAC_BUF(mac);
372 336
373 if (unlikely(!sta || 337 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
374 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
376 return TX_CONTINUE; 338 return TX_CONTINUE;
377 339
378 staflags = get_sta_flags(sta); 340 staflags = get_sta_flags(sta);
@@ -382,7 +344,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
382#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 344#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
383 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 345 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
384 "before %d)\n", 346 "before %d)\n",
385 print_mac(mac, sta->addr), sta->aid, 347 print_mac(mac, sta->sta.addr), sta->sta.aid,
386 skb_queue_len(&sta->ps_tx_buf)); 348 skb_queue_len(&sta->ps_tx_buf));
387#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 349#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
388 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 350 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -393,7 +355,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
393 if (net_ratelimit()) { 355 if (net_ratelimit()) {
394 printk(KERN_DEBUG "%s: STA %s TX " 356 printk(KERN_DEBUG "%s: STA %s TX "
395 "buffer full - dropping oldest frame\n", 357 "buffer full - dropping oldest frame\n",
396 tx->dev->name, print_mac(mac, sta->addr)); 358 tx->dev->name, print_mac(mac, sta->sta.addr));
397 } 359 }
398#endif 360#endif
399 dev_kfree_skb(old); 361 dev_kfree_skb(old);
@@ -412,7 +374,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
412 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { 374 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
413 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " 375 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
414 "set -> send frame\n", tx->dev->name, 376 "set -> send frame\n", tx->dev->name,
415 print_mac(mac, sta->addr)); 377 print_mac(mac, sta->sta.addr));
416 } 378 }
417#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 379#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
418 clear_sta_flags(sta, WLAN_STA_PSPOLL); 380 clear_sta_flags(sta, WLAN_STA_PSPOLL);
@@ -437,7 +399,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
437{ 399{
438 struct ieee80211_key *key; 400 struct ieee80211_key *key;
439 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 401 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
440 u16 fc = tx->fc; 402 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
441 403
442 if (unlikely(tx->skb->do_not_encrypt)) 404 if (unlikely(tx->skb->do_not_encrypt))
443 tx->key = NULL; 405 tx->key = NULL;
@@ -454,22 +416,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
454 tx->key = NULL; 416 tx->key = NULL;
455 417
456 if (tx->key) { 418 if (tx->key) {
457 u16 ftype, stype;
458
459 tx->key->tx_rx_count++; 419 tx->key->tx_rx_count++;
460 /* TODO: add threshold stuff again */ 420 /* TODO: add threshold stuff again */
461 421
462 switch (tx->key->conf.alg) { 422 switch (tx->key->conf.alg) {
463 case ALG_WEP: 423 case ALG_WEP:
464 ftype = fc & IEEE80211_FCTL_FTYPE; 424 if (ieee80211_is_auth(hdr->frame_control))
465 stype = fc & IEEE80211_FCTL_STYPE;
466
467 if (ftype == IEEE80211_FTYPE_MGMT &&
468 stype == IEEE80211_STYPE_AUTH)
469 break; 425 break;
470 case ALG_TKIP: 426 case ALG_TKIP:
471 case ALG_CCMP: 427 case ALG_CCMP:
472 if (!WLAN_FC_DATA_PRESENT(fc)) 428 if (!ieee80211_is_data_present(hdr->frame_control))
473 tx->key = NULL; 429 tx->key = NULL;
474 break; 430 break;
475 } 431 }
@@ -492,6 +448,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
492 448
493 if (likely(tx->rate_idx < 0)) { 449 if (likely(tx->rate_idx < 0)) {
494 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); 450 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel);
451 if (tx->sta)
452 tx->sta->last_txrate_idx = rsel.rate_idx;
495 tx->rate_idx = rsel.rate_idx; 453 tx->rate_idx = rsel.rate_idx;
496 if (unlikely(rsel.probe_idx >= 0)) { 454 if (unlikely(rsel.probe_idx >= 0)) {
497 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 455 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
@@ -535,7 +493,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
535 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 493 sband = tx->local->hw.wiphy->bands[tx->channel->band];
536 494
537 if (tx->sta) 495 if (tx->sta)
538 info->control.aid = tx->sta->aid; 496 info->control.sta = &tx->sta->sta;
539 497
540 if (!info->control.retry_limit) { 498 if (!info->control.retry_limit) {
541 if (!is_multicast_ether_addr(hdr->addr1)) { 499 if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -601,7 +559,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
601 for (idx = 0; idx < sband->n_bitrates; idx++) { 559 for (idx = 0; idx < sband->n_bitrates; idx++) {
602 if (sband->bitrates[idx].bitrate > rate->bitrate) 560 if (sband->bitrates[idx].bitrate > rate->bitrate)
603 continue; 561 continue;
604 if (tx->sdata->basic_rates & BIT(idx) && 562 if (tx->sdata->bss_conf.basic_rates & BIT(idx) &&
605 (baserate < 0 || 563 (baserate < 0 ||
606 (sband->bitrates[baserate].bitrate 564 (sband->bitrates[baserate].bitrate
607 < sband->bitrates[idx].bitrate))) 565 < sband->bitrates[idx].bitrate)))
@@ -615,7 +573,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
615 } 573 }
616 574
617 if (tx->sta) 575 if (tx->sta)
618 info->control.aid = tx->sta->aid; 576 info->control.sta = &tx->sta->sta;
619 577
620 return TX_CONTINUE; 578 return TX_CONTINUE;
621} 579}
@@ -629,7 +587,14 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
629 u8 *qc; 587 u8 *qc;
630 int tid; 588 int tid;
631 589
632 /* only for injected frames */ 590 /*
591 * Packet injection may want to control the sequence
592 * number, if we have no matching interface then we
593 * neither assign one ourselves nor ask the driver to.
594 */
595 if (unlikely(!info->control.vif))
596 return TX_CONTINUE;
597
633 if (unlikely(ieee80211_is_ctl(hdr->frame_control))) 598 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
634 return TX_CONTINUE; 599 return TX_CONTINUE;
635 600
@@ -854,7 +819,6 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
854 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 819 sband = tx->local->hw.wiphy->bands[tx->channel->band];
855 820
856 skb->do_not_encrypt = 1; 821 skb->do_not_encrypt = 1;
857 info->flags |= IEEE80211_TX_CTL_INJECTED;
858 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 822 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
859 823
860 /* 824 /*
@@ -986,7 +950,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
986 950
987 /* process and remove the injection radiotap header */ 951 /* process and remove the injection radiotap header */
988 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 952 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
989 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { 953 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
990 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) 954 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP)
991 return TX_DROP; 955 return TX_DROP;
992 956
@@ -1000,7 +964,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1000 hdr = (struct ieee80211_hdr *) skb->data; 964 hdr = (struct ieee80211_hdr *) skb->data;
1001 965
1002 tx->sta = sta_info_get(local, hdr->addr1); 966 tx->sta = sta_info_get(local, hdr->addr1);
1003 tx->fc = le16_to_cpu(hdr->frame_control);
1004 967
1005 if (is_multicast_ether_addr(hdr->addr1)) { 968 if (is_multicast_ether_addr(hdr->addr1)) {
1006 tx->flags &= ~IEEE80211_TX_UNICAST; 969 tx->flags &= ~IEEE80211_TX_UNICAST;
@@ -1025,7 +988,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1025 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 988 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1026 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 989 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1027 990
1028 hdrlen = ieee80211_get_hdrlen(tx->fc); 991 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1029 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 992 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1030 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 993 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1031 tx->ethertype = (pos[0] << 8) | pos[1]; 994 tx->ethertype = (pos[0] << 8) | pos[1];
@@ -1068,8 +1031,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1068 return IEEE80211_TX_AGAIN; 1031 return IEEE80211_TX_AGAIN;
1069 info = IEEE80211_SKB_CB(skb); 1032 info = IEEE80211_SKB_CB(skb);
1070 1033
1071 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1072 "TX to low-level driver", skb);
1073 ret = local->ops->tx(local_to_hw(local), skb); 1034 ret = local->ops->tx(local_to_hw(local), skb);
1074 if (ret) 1035 if (ret)
1075 return IEEE80211_TX_AGAIN; 1036 return IEEE80211_TX_AGAIN;
@@ -1099,9 +1060,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1099 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 1060 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1100 } 1061 }
1101 1062
1102 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1103 "TX to low-level driver",
1104 tx->extra_frag[i]);
1105 ret = local->ops->tx(local_to_hw(local), 1063 ret = local->ops->tx(local_to_hw(local),
1106 tx->extra_frag[i]); 1064 tx->extra_frag[i]);
1107 if (ret) 1065 if (ret)
@@ -1306,6 +1264,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1306 struct ieee80211_sub_if_data *osdata; 1264 struct ieee80211_sub_if_data *osdata;
1307 int headroom; 1265 int headroom;
1308 bool may_encrypt; 1266 bool may_encrypt;
1267 enum {
1268 NOT_MONITOR,
1269 FOUND_SDATA,
1270 UNKNOWN_ADDRESS,
1271 } monitor_iface = NOT_MONITOR;
1309 int ret; 1272 int ret;
1310 1273
1311 if (skb->iif) 1274 if (skb->iif)
@@ -1335,12 +1298,56 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1335 if (is_multicast_ether_addr(hdr->addr3)) 1298 if (is_multicast_ether_addr(hdr->addr3))
1336 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1299 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1337 else 1300 else
1338 if (mesh_nexthop_lookup(skb, odev)) 1301 if (mesh_nexthop_lookup(skb, osdata))
1339 return 0; 1302 return 0;
1340 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1303 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1341 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, 1304 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
1342 fwded_frames); 1305 fwded_frames);
1343 } 1306 }
1307 } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1308 struct ieee80211_sub_if_data *sdata;
1309 struct ieee80211_local *local = osdata->local;
1310 struct ieee80211_hdr *hdr;
1311 int hdrlen;
1312 u16 len_rthdr;
1313
1314 info->flags |= IEEE80211_TX_CTL_INJECTED;
1315 monitor_iface = UNKNOWN_ADDRESS;
1316
1317 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1318 hdr = (struct ieee80211_hdr *)skb->data + len_rthdr;
1319 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1320
1321 /* check the header is complete in the frame */
1322 if (likely(skb->len >= len_rthdr + hdrlen)) {
1323 /*
1324 * We process outgoing injected frames that have a
1325 * local address we handle as though they are our
1326 * own frames.
1327 * This code here isn't entirely correct, the local
1328 * MAC address is not necessarily enough to find
1329 * the interface to use; for that proper VLAN/WDS
1330 * support we will need a different mechanism.
1331 */
1332
1333 rcu_read_lock();
1334 list_for_each_entry_rcu(sdata, &local->interfaces,
1335 list) {
1336 if (!netif_running(sdata->dev))
1337 continue;
1338 if (compare_ether_addr(sdata->dev->dev_addr,
1339 hdr->addr2)) {
1340 dev_hold(sdata->dev);
1341 dev_put(odev);
1342 osdata = sdata;
1343 odev = osdata->dev;
1344 skb->iif = sdata->dev->ifindex;
1345 monitor_iface = FOUND_SDATA;
1346 break;
1347 }
1348 }
1349 rcu_read_unlock();
1350 }
1344 } 1351 }
1345 1352
1346 may_encrypt = !skb->do_not_encrypt; 1353 may_encrypt = !skb->do_not_encrypt;
@@ -1357,7 +1364,12 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1357 return 0; 1364 return 0;
1358 } 1365 }
1359 1366
1360 info->control.vif = &osdata->vif; 1367 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1368 osdata = container_of(osdata->bss,
1369 struct ieee80211_sub_if_data,
1370 u.ap);
1371 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1372 info->control.vif = &osdata->vif;
1361 ret = ieee80211_tx(odev, skb); 1373 ret = ieee80211_tx(odev, skb);
1362 dev_put(odev); 1374 dev_put(odev);
1363 1375
@@ -1465,8 +1477,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1465 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1477 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1466 1478
1467 switch (sdata->vif.type) { 1479 switch (sdata->vif.type) {
1468 case IEEE80211_IF_TYPE_AP: 1480 case NL80211_IFTYPE_AP:
1469 case IEEE80211_IF_TYPE_VLAN: 1481 case NL80211_IFTYPE_AP_VLAN:
1470 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1482 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1471 /* DA BSSID SA */ 1483 /* DA BSSID SA */
1472 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1484 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1474,7 +1486,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1474 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1486 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1475 hdrlen = 24; 1487 hdrlen = 24;
1476 break; 1488 break;
1477 case IEEE80211_IF_TYPE_WDS: 1489 case NL80211_IFTYPE_WDS:
1478 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1490 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1479 /* RA TA DA SA */ 1491 /* RA TA DA SA */
1480 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1492 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
@@ -1484,16 +1496,16 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1484 hdrlen = 30; 1496 hdrlen = 30;
1485 break; 1497 break;
1486#ifdef CONFIG_MAC80211_MESH 1498#ifdef CONFIG_MAC80211_MESH
1487 case IEEE80211_IF_TYPE_MESH_POINT: 1499 case NL80211_IFTYPE_MESH_POINT:
1488 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1500 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1489 /* RA TA DA SA */ 1501 /* RA TA DA SA */
1490 memset(hdr.addr1, 0, ETH_ALEN); 1502 memset(hdr.addr1, 0, ETH_ALEN);
1491 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); 1503 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1492 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1504 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1493 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); 1505 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1494 if (!sdata->u.sta.mshcfg.dot11MeshTTL) { 1506 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1495 /* Do not send frames with mesh_ttl == 0 */ 1507 /* Do not send frames with mesh_ttl == 0 */
1496 sdata->u.sta.mshstats.dropped_frames_ttl++; 1508 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1497 ret = 0; 1509 ret = 0;
1498 goto fail; 1510 goto fail;
1499 } 1511 }
@@ -1501,7 +1513,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1501 hdrlen = 30; 1513 hdrlen = 30;
1502 break; 1514 break;
1503#endif 1515#endif
1504 case IEEE80211_IF_TYPE_STA: 1516 case NL80211_IFTYPE_STATION:
1505 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1517 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1506 /* BSSID SA DA */ 1518 /* BSSID SA DA */
1507 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); 1519 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
@@ -1509,7 +1521,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1509 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1521 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1510 hdrlen = 24; 1522 hdrlen = 24;
1511 break; 1523 break;
1512 case IEEE80211_IF_TYPE_IBSS: 1524 case NL80211_IFTYPE_ADHOC:
1513 /* DA SA BSSID */ 1525 /* DA SA BSSID */
1514 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1526 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1515 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1527 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1588,19 +1600,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1588 nh_pos -= skip_header_bytes; 1600 nh_pos -= skip_header_bytes;
1589 h_pos -= skip_header_bytes; 1601 h_pos -= skip_header_bytes;
1590 1602
1591 /* TODO: implement support for fragments so that there is no need to
1592 * reallocate and copy payload; it might be enough to support one
1593 * extra fragment that would be copied in the beginning of the frame
1594 * data.. anyway, it would be nice to include this into skb structure
1595 * somehow
1596 *
1597 * There are few options for this:
1598 * use skb->cb as an extra space for 802.11 header
1599 * allocate new buffer if not enough headroom
1600 * make sure that there is enough headroom in every skb by increasing
1601 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1602 * alloc_skb() (net/core/skbuff.c)
1603 */
1604 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); 1603 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1605 1604
1606 /* 1605 /*
@@ -1823,10 +1822,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1823 struct rate_selection rsel; 1822 struct rate_selection rsel;
1824 struct beacon_data *beacon; 1823 struct beacon_data *beacon;
1825 struct ieee80211_supported_band *sband; 1824 struct ieee80211_supported_band *sband;
1826 struct ieee80211_mgmt *mgmt;
1827 int *num_beacons;
1828 enum ieee80211_band band = local->hw.conf.channel->band; 1825 enum ieee80211_band band = local->hw.conf.channel->band;
1829 u8 *pos;
1830 1826
1831 sband = local->hw.wiphy->bands[band]; 1827 sband = local->hw.wiphy->bands[band];
1832 1828
@@ -1835,7 +1831,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1835 sdata = vif_to_sdata(vif); 1831 sdata = vif_to_sdata(vif);
1836 bdev = sdata->dev; 1832 bdev = sdata->dev;
1837 1833
1838 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 1834 if (sdata->vif.type == NL80211_IFTYPE_AP) {
1839 ap = &sdata->u.ap; 1835 ap = &sdata->u.ap;
1840 beacon = rcu_dereference(ap->beacon); 1836 beacon = rcu_dereference(ap->beacon);
1841 if (ap && beacon) { 1837 if (ap && beacon) {
@@ -1873,11 +1869,9 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1873 if (beacon->tail) 1869 if (beacon->tail)
1874 memcpy(skb_put(skb, beacon->tail_len), 1870 memcpy(skb_put(skb, beacon->tail_len),
1875 beacon->tail, beacon->tail_len); 1871 beacon->tail, beacon->tail_len);
1876
1877 num_beacons = &ap->num_beacons;
1878 } else 1872 } else
1879 goto out; 1873 goto out;
1880 } else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1874 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1881 struct ieee80211_hdr *hdr; 1875 struct ieee80211_hdr *hdr;
1882 ifsta = &sdata->u.sta; 1876 ifsta = &sdata->u.sta;
1883 1877
@@ -1889,11 +1883,13 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1889 goto out; 1883 goto out;
1890 1884
1891 hdr = (struct ieee80211_hdr *) skb->data; 1885 hdr = (struct ieee80211_hdr *) skb->data;
1892 hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1886 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1893 IEEE80211_STYPE_BEACON); 1887 IEEE80211_STYPE_BEACON);
1894 1888
1895 num_beacons = &ifsta->num_beacons;
1896 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 1889 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1890 struct ieee80211_mgmt *mgmt;
1891 u8 *pos;
1892
1897 /* headroom, head length, tail length and maximum TIM length */ 1893 /* headroom, head length, tail length and maximum TIM length */
1898 skb = dev_alloc_skb(local->tx_headroom + 400); 1894 skb = dev_alloc_skb(local->tx_headroom + 400);
1899 if (!skb) 1895 if (!skb)
@@ -1916,9 +1912,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1916 *pos++ = WLAN_EID_SSID; 1912 *pos++ = WLAN_EID_SSID;
1917 *pos++ = 0x0; 1913 *pos++ = 0x0;
1918 1914
1919 mesh_mgmt_ies_add(skb, sdata->dev); 1915 mesh_mgmt_ies_add(skb, sdata);
1920
1921 num_beacons = &sdata->u.sta.num_beacons;
1922 } else { 1916 } else {
1923 WARN_ON(1); 1917 WARN_ON(1);
1924 goto out; 1918 goto out;
@@ -1955,7 +1949,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1955 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1949 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1956 info->control.retry_limit = 1; 1950 info->control.retry_limit = 1;
1957 1951
1958 (*num_beacons)++;
1959out: 1952out:
1960 rcu_read_unlock(); 1953 rcu_read_unlock();
1961 return skb; 1954 return skb;
@@ -2017,7 +2010,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2017 rcu_read_lock(); 2010 rcu_read_lock();
2018 beacon = rcu_dereference(bss->beacon); 2011 beacon = rcu_dereference(bss->beacon);
2019 2012
2020 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head) 2013 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
2021 goto out; 2014 goto out;
2022 2015
2023 if (bss->dtim_count != 0) 2016 if (bss->dtim_count != 0)
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0d463c80c404..f32561ec224c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -43,7 +43,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
43 43
44 44
45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
46 enum ieee80211_if_types type) 46 enum nl80211_iftype type)
47{ 47{
48 __le16 fc = hdr->frame_control; 48 __le16 fc = hdr->frame_control;
49 49
@@ -77,10 +77,10 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
77 77
78 if (ieee80211_is_back_req(fc)) { 78 if (ieee80211_is_back_req(fc)) {
79 switch (type) { 79 switch (type) {
80 case IEEE80211_IF_TYPE_STA: 80 case NL80211_IFTYPE_STATION:
81 return hdr->addr2; 81 return hdr->addr2;
82 case IEEE80211_IF_TYPE_AP: 82 case NL80211_IFTYPE_AP:
83 case IEEE80211_IF_TYPE_VLAN: 83 case NL80211_IFTYPE_AP_VLAN:
84 return hdr->addr1; 84 return hdr->addr1;
85 default: 85 default:
86 break; /* fall through to the return */ 86 break; /* fall through to the return */
@@ -91,45 +91,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
91 return NULL; 91 return NULL;
92} 92}
93 93
94int ieee80211_get_hdrlen(u16 fc)
95{
96 int hdrlen = 24;
97
98 switch (fc & IEEE80211_FCTL_FTYPE) {
99 case IEEE80211_FTYPE_DATA:
100 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
101 hdrlen = 30; /* Addr4 */
102 /*
103 * The QoS Control field is two bytes and its presence is
104 * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to
105 * hdrlen if that bit is set.
106 * This works by masking out the bit and shifting it to
107 * bit position 1 so the result has the value 0 or 2.
108 */
109 hdrlen += (fc & IEEE80211_STYPE_QOS_DATA)
110 >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1);
111 break;
112 case IEEE80211_FTYPE_CTL:
113 /*
114 * ACK and CTS are 10 bytes, all others 16. To see how
115 * to get this condition consider
116 * subtype mask: 0b0000000011110000 (0x00F0)
117 * ACK subtype: 0b0000000011010000 (0x00D0)
118 * CTS subtype: 0b0000000011000000 (0x00C0)
119 * bits that matter: ^^^ (0x00E0)
120 * value of those: 0b0000000011000000 (0x00C0)
121 */
122 if ((fc & 0xE0) == 0xC0)
123 hdrlen = 10;
124 else
125 hdrlen = 16;
126 break;
127 }
128
129 return hdrlen;
130}
131EXPORT_SYMBOL(ieee80211_get_hdrlen);
132
133unsigned int ieee80211_hdrlen(__le16 fc) 94unsigned int ieee80211_hdrlen(__le16 fc)
134{ 95{
135 unsigned int hdrlen = 24; 96 unsigned int hdrlen = 24;
@@ -270,16 +231,21 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
270 struct ieee80211_rate *rate) 231 struct ieee80211_rate *rate)
271{ 232{
272 struct ieee80211_local *local = hw_to_local(hw); 233 struct ieee80211_local *local = hw_to_local(hw);
273 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 234 struct ieee80211_sub_if_data *sdata;
274 u16 dur; 235 u16 dur;
275 int erp; 236 int erp;
237 bool short_preamble = false;
276 238
277 erp = 0; 239 erp = 0;
278 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 240 if (vif) {
279 erp = rate->flags & IEEE80211_RATE_ERP_G; 241 sdata = vif_to_sdata(vif);
242 short_preamble = sdata->bss_conf.use_short_preamble;
243 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
244 erp = rate->flags & IEEE80211_RATE_ERP_G;
245 }
280 246
281 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, 247 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
282 sdata->bss_conf.use_short_preamble); 248 short_preamble);
283 249
284 return cpu_to_le16(dur); 250 return cpu_to_le16(dur);
285} 251}
@@ -291,7 +257,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
291{ 257{
292 struct ieee80211_local *local = hw_to_local(hw); 258 struct ieee80211_local *local = hw_to_local(hw);
293 struct ieee80211_rate *rate; 259 struct ieee80211_rate *rate;
294 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 260 struct ieee80211_sub_if_data *sdata;
295 bool short_preamble; 261 bool short_preamble;
296 int erp; 262 int erp;
297 u16 dur; 263 u16 dur;
@@ -299,13 +265,17 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
299 265
300 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 266 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
301 267
302 short_preamble = sdata->bss_conf.use_short_preamble; 268 short_preamble = false;
303 269
304 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; 270 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
305 271
306 erp = 0; 272 erp = 0;
307 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 273 if (vif) {
308 erp = rate->flags & IEEE80211_RATE_ERP_G; 274 sdata = vif_to_sdata(vif);
275 short_preamble = sdata->bss_conf.use_short_preamble;
276 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
277 erp = rate->flags & IEEE80211_RATE_ERP_G;
278 }
309 279
310 /* CTS duration */ 280 /* CTS duration */
311 dur = ieee80211_frame_duration(local, 10, rate->bitrate, 281 dur = ieee80211_frame_duration(local, 10, rate->bitrate,
@@ -328,7 +298,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
328{ 298{
329 struct ieee80211_local *local = hw_to_local(hw); 299 struct ieee80211_local *local = hw_to_local(hw);
330 struct ieee80211_rate *rate; 300 struct ieee80211_rate *rate;
331 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 301 struct ieee80211_sub_if_data *sdata;
332 bool short_preamble; 302 bool short_preamble;
333 int erp; 303 int erp;
334 u16 dur; 304 u16 dur;
@@ -336,12 +306,16 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
336 306
337 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 307 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
338 308
339 short_preamble = sdata->bss_conf.use_short_preamble; 309 short_preamble = false;
340 310
341 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; 311 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
342 erp = 0; 312 erp = 0;
343 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 313 if (vif) {
344 erp = rate->flags & IEEE80211_RATE_ERP_G; 314 sdata = vif_to_sdata(vif);
315 short_preamble = sdata->bss_conf.use_short_preamble;
316 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
317 erp = rate->flags & IEEE80211_RATE_ERP_G;
318 }
345 319
346 /* Data frame duration */ 320 /* Data frame duration */
347 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 321 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
@@ -386,6 +360,13 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw)
386} 360}
387EXPORT_SYMBOL(ieee80211_stop_queues); 361EXPORT_SYMBOL(ieee80211_stop_queues);
388 362
363int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
364{
365 struct ieee80211_local *local = hw_to_local(hw);
366 return __netif_subqueue_stopped(local->mdev, queue);
367}
368EXPORT_SYMBOL(ieee80211_queue_stopped);
369
389void ieee80211_wake_queues(struct ieee80211_hw *hw) 370void ieee80211_wake_queues(struct ieee80211_hw *hw)
390{ 371{
391 int i; 372 int i;
@@ -408,15 +389,16 @@ void ieee80211_iterate_active_interfaces(
408 389
409 list_for_each_entry(sdata, &local->interfaces, list) { 390 list_for_each_entry(sdata, &local->interfaces, list) {
410 switch (sdata->vif.type) { 391 switch (sdata->vif.type) {
411 case IEEE80211_IF_TYPE_INVALID: 392 case __NL80211_IFTYPE_AFTER_LAST:
412 case IEEE80211_IF_TYPE_MNTR: 393 case NL80211_IFTYPE_UNSPECIFIED:
413 case IEEE80211_IF_TYPE_VLAN: 394 case NL80211_IFTYPE_MONITOR:
395 case NL80211_IFTYPE_AP_VLAN:
414 continue; 396 continue;
415 case IEEE80211_IF_TYPE_AP: 397 case NL80211_IFTYPE_AP:
416 case IEEE80211_IF_TYPE_STA: 398 case NL80211_IFTYPE_STATION:
417 case IEEE80211_IF_TYPE_IBSS: 399 case NL80211_IFTYPE_ADHOC:
418 case IEEE80211_IF_TYPE_WDS: 400 case NL80211_IFTYPE_WDS:
419 case IEEE80211_IF_TYPE_MESH_POINT: 401 case NL80211_IFTYPE_MESH_POINT:
420 break; 402 break;
421 } 403 }
422 if (netif_running(sdata->dev)) 404 if (netif_running(sdata->dev))
@@ -441,15 +423,16 @@ void ieee80211_iterate_active_interfaces_atomic(
441 423
442 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 424 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
443 switch (sdata->vif.type) { 425 switch (sdata->vif.type) {
444 case IEEE80211_IF_TYPE_INVALID: 426 case __NL80211_IFTYPE_AFTER_LAST:
445 case IEEE80211_IF_TYPE_MNTR: 427 case NL80211_IFTYPE_UNSPECIFIED:
446 case IEEE80211_IF_TYPE_VLAN: 428 case NL80211_IFTYPE_MONITOR:
429 case NL80211_IFTYPE_AP_VLAN:
447 continue; 430 continue;
448 case IEEE80211_IF_TYPE_AP: 431 case NL80211_IFTYPE_AP:
449 case IEEE80211_IF_TYPE_STA: 432 case NL80211_IFTYPE_STATION:
450 case IEEE80211_IF_TYPE_IBSS: 433 case NL80211_IFTYPE_ADHOC:
451 case IEEE80211_IF_TYPE_WDS: 434 case NL80211_IFTYPE_WDS:
452 case IEEE80211_IF_TYPE_MESH_POINT: 435 case NL80211_IFTYPE_MESH_POINT:
453 break; 436 break;
454 } 437 }
455 if (netif_running(sdata->dev)) 438 if (netif_running(sdata->dev))
@@ -460,3 +443,243 @@ void ieee80211_iterate_active_interfaces_atomic(
460 rcu_read_unlock(); 443 rcu_read_unlock();
461} 444}
462EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); 445EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
446
447void ieee802_11_parse_elems(u8 *start, size_t len,
448 struct ieee802_11_elems *elems)
449{
450 size_t left = len;
451 u8 *pos = start;
452
453 memset(elems, 0, sizeof(*elems));
454 elems->ie_start = start;
455 elems->total_len = len;
456
457 while (left >= 2) {
458 u8 id, elen;
459
460 id = *pos++;
461 elen = *pos++;
462 left -= 2;
463
464 if (elen > left)
465 return;
466
467 switch (id) {
468 case WLAN_EID_SSID:
469 elems->ssid = pos;
470 elems->ssid_len = elen;
471 break;
472 case WLAN_EID_SUPP_RATES:
473 elems->supp_rates = pos;
474 elems->supp_rates_len = elen;
475 break;
476 case WLAN_EID_FH_PARAMS:
477 elems->fh_params = pos;
478 elems->fh_params_len = elen;
479 break;
480 case WLAN_EID_DS_PARAMS:
481 elems->ds_params = pos;
482 elems->ds_params_len = elen;
483 break;
484 case WLAN_EID_CF_PARAMS:
485 elems->cf_params = pos;
486 elems->cf_params_len = elen;
487 break;
488 case WLAN_EID_TIM:
489 elems->tim = pos;
490 elems->tim_len = elen;
491 break;
492 case WLAN_EID_IBSS_PARAMS:
493 elems->ibss_params = pos;
494 elems->ibss_params_len = elen;
495 break;
496 case WLAN_EID_CHALLENGE:
497 elems->challenge = pos;
498 elems->challenge_len = elen;
499 break;
500 case WLAN_EID_WPA:
501 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
502 pos[2] == 0xf2) {
503 /* Microsoft OUI (00:50:F2) */
504 if (pos[3] == 1) {
505 /* OUI Type 1 - WPA IE */
506 elems->wpa = pos;
507 elems->wpa_len = elen;
508 } else if (elen >= 5 && pos[3] == 2) {
509 if (pos[4] == 0) {
510 elems->wmm_info = pos;
511 elems->wmm_info_len = elen;
512 } else if (pos[4] == 1) {
513 elems->wmm_param = pos;
514 elems->wmm_param_len = elen;
515 }
516 }
517 }
518 break;
519 case WLAN_EID_RSN:
520 elems->rsn = pos;
521 elems->rsn_len = elen;
522 break;
523 case WLAN_EID_ERP_INFO:
524 elems->erp_info = pos;
525 elems->erp_info_len = elen;
526 break;
527 case WLAN_EID_EXT_SUPP_RATES:
528 elems->ext_supp_rates = pos;
529 elems->ext_supp_rates_len = elen;
530 break;
531 case WLAN_EID_HT_CAPABILITY:
532 elems->ht_cap_elem = pos;
533 elems->ht_cap_elem_len = elen;
534 break;
535 case WLAN_EID_HT_EXTRA_INFO:
536 elems->ht_info_elem = pos;
537 elems->ht_info_elem_len = elen;
538 break;
539 case WLAN_EID_MESH_ID:
540 elems->mesh_id = pos;
541 elems->mesh_id_len = elen;
542 break;
543 case WLAN_EID_MESH_CONFIG:
544 elems->mesh_config = pos;
545 elems->mesh_config_len = elen;
546 break;
547 case WLAN_EID_PEER_LINK:
548 elems->peer_link = pos;
549 elems->peer_link_len = elen;
550 break;
551 case WLAN_EID_PREQ:
552 elems->preq = pos;
553 elems->preq_len = elen;
554 break;
555 case WLAN_EID_PREP:
556 elems->prep = pos;
557 elems->prep_len = elen;
558 break;
559 case WLAN_EID_PERR:
560 elems->perr = pos;
561 elems->perr_len = elen;
562 break;
563 case WLAN_EID_CHANNEL_SWITCH:
564 elems->ch_switch_elem = pos;
565 elems->ch_switch_elem_len = elen;
566 break;
567 case WLAN_EID_QUIET:
568 if (!elems->quiet_elem) {
569 elems->quiet_elem = pos;
570 elems->quiet_elem_len = elen;
571 }
572 elems->num_of_quiet_elem++;
573 break;
574 case WLAN_EID_COUNTRY:
575 elems->country_elem = pos;
576 elems->country_elem_len = elen;
577 break;
578 case WLAN_EID_PWR_CONSTRAINT:
579 elems->pwr_constr_elem = pos;
580 elems->pwr_constr_elem_len = elen;
581 break;
582 default:
583 break;
584 }
585
586 left -= elen;
587 pos += elen;
588 }
589}
590
591void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
592{
593 struct ieee80211_local *local = sdata->local;
594 struct ieee80211_tx_queue_params qparam;
595 int i;
596
597 if (!local->ops->conf_tx)
598 return;
599
600 memset(&qparam, 0, sizeof(qparam));
601
602 qparam.aifs = 2;
603
604 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
605 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
606 qparam.cw_min = 31;
607 else
608 qparam.cw_min = 15;
609
610 qparam.cw_max = 1023;
611 qparam.txop = 0;
612
613 for (i = 0; i < local_to_hw(local)->queues; i++)
614 local->ops->conf_tx(local_to_hw(local), i, &qparam);
615}
616
617void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
618 int encrypt)
619{
620 skb->dev = sdata->local->mdev;
621 skb_set_mac_header(skb, 0);
622 skb_set_network_header(skb, 0);
623 skb_set_transport_header(skb, 0);
624
625 skb->iif = sdata->dev->ifindex;
626 skb->do_not_encrypt = !encrypt;
627
628 dev_queue_xmit(skb);
629}
630
631int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
632{
633 int ret = -EINVAL;
634 struct ieee80211_channel *chan;
635 struct ieee80211_local *local = sdata->local;
636
637 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
638
639 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
640 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
641 chan->flags & IEEE80211_CHAN_NO_IBSS) {
642 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
643 "%d MHz\n", sdata->dev->name, chan->center_freq);
644 return ret;
645 }
646 local->oper_channel = chan;
647
648 if (local->sw_scanning || local->hw_scanning)
649 ret = 0;
650 else
651 ret = ieee80211_hw_config(local);
652
653 rate_control_clear(local);
654 }
655
656 return ret;
657}
658
659u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
660 enum ieee80211_band band)
661{
662 struct ieee80211_supported_band *sband;
663 struct ieee80211_rate *bitrates;
664 u64 mandatory_rates;
665 enum ieee80211_rate_flags mandatory_flag;
666 int i;
667
668 sband = local->hw.wiphy->bands[band];
669 if (!sband) {
670 WARN_ON(1);
671 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
672 }
673
674 if (band == IEEE80211_BAND_2GHZ)
675 mandatory_flag = IEEE80211_RATE_MANDATORY_B;
676 else
677 mandatory_flag = IEEE80211_RATE_MANDATORY_A;
678
679 bitrates = sband->bitrates;
680 mandatory_rates = 0;
681 for (i = 0; i < sband->n_bitrates; i++)
682 if (bitrates[i].flags & mandatory_flag)
683 mandatory_rates |= BIT(i);
684 return mandatory_rates;
685}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5c2bf0a3d4db..376c84987e4f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -228,11 +228,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
228 return -1; 228 return -1;
229 229
230 hdrlen = ieee80211_hdrlen(hdr->frame_control); 230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
231 231 if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
232 if (skb->len < 8 + hdrlen)
233 return -1; 232 return -1;
234 233
235 len = skb->len - hdrlen - 8; 234 len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
236 235
237 keyidx = skb->data[hdrlen + 3] >> 6; 236 keyidx = skb->data[hdrlen + 3] >> 6;
238 237
@@ -292,9 +291,10 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
292ieee80211_rx_result 291ieee80211_rx_result
293ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) 292ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
294{ 293{
295 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
296 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 295
297 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) 296 if (!ieee80211_is_data(hdr->frame_control) &&
297 !ieee80211_is_auth(hdr->frame_control))
298 return RX_CONTINUE; 298 return RX_CONTINUE;
299 299
300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
@@ -303,7 +303,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
305 /* remove ICV */ 305 /* remove ICV */
306 skb_trim(rx->skb, rx->skb->len - 4); 306 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
307 } 307 }
308 308
309 return RX_CONTINUE; 309 return RX_CONTINUE;
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 34fa8ed1e784..7e0d53abde24 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,22 +27,19 @@
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
29 29
30static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, 30static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
31 int idx, int alg, int remove, 31 int idx, int alg, int remove,
32 int set_tx_key, const u8 *_key, 32 int set_tx_key, const u8 *_key,
33 size_t key_len) 33 size_t key_len)
34{ 34{
35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 35 struct ieee80211_local *local = sdata->local;
36 struct sta_info *sta; 36 struct sta_info *sta;
37 struct ieee80211_key *key; 37 struct ieee80211_key *key;
38 struct ieee80211_sub_if_data *sdata;
39 int err; 38 int err;
40 39
41 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
42
43 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { 40 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
44 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", 41 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
45 dev->name, idx); 42 sdata->dev->name, idx);
46 return -EINVAL; 43 return -EINVAL;
47 } 44 }
48 45
@@ -125,13 +122,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
125 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) 122 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
126 return -EOPNOTSUPP; 123 return -EOPNOTSUPP;
127 124
128 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 125 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
129 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 126 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
130 int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); 127 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
131 if (ret) 128 if (ret)
132 return ret; 129 return ret;
133 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 130 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
134 ieee80211_sta_req_auth(dev, &sdata->u.sta); 131 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
135 return 0; 132 return 0;
136 } 133 }
137 134
@@ -276,21 +273,21 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
276 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
277 int type; 274 int type;
278 275
279 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 276 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
280 return -EOPNOTSUPP; 277 return -EOPNOTSUPP;
281 278
282 switch (*mode) { 279 switch (*mode) {
283 case IW_MODE_INFRA: 280 case IW_MODE_INFRA:
284 type = IEEE80211_IF_TYPE_STA; 281 type = NL80211_IFTYPE_STATION;
285 break; 282 break;
286 case IW_MODE_ADHOC: 283 case IW_MODE_ADHOC:
287 type = IEEE80211_IF_TYPE_IBSS; 284 type = NL80211_IFTYPE_ADHOC;
288 break; 285 break;
289 case IW_MODE_REPEAT: 286 case IW_MODE_REPEAT:
290 type = IEEE80211_IF_TYPE_WDS; 287 type = NL80211_IFTYPE_WDS;
291 break; 288 break;
292 case IW_MODE_MONITOR: 289 case IW_MODE_MONITOR:
293 type = IEEE80211_IF_TYPE_MNTR; 290 type = NL80211_IFTYPE_MONITOR;
294 break; 291 break;
295 default: 292 default:
296 return -EINVAL; 293 return -EINVAL;
@@ -308,22 +305,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
308 305
309 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 306 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
310 switch (sdata->vif.type) { 307 switch (sdata->vif.type) {
311 case IEEE80211_IF_TYPE_AP: 308 case NL80211_IFTYPE_AP:
312 *mode = IW_MODE_MASTER; 309 *mode = IW_MODE_MASTER;
313 break; 310 break;
314 case IEEE80211_IF_TYPE_STA: 311 case NL80211_IFTYPE_STATION:
315 *mode = IW_MODE_INFRA; 312 *mode = IW_MODE_INFRA;
316 break; 313 break;
317 case IEEE80211_IF_TYPE_IBSS: 314 case NL80211_IFTYPE_ADHOC:
318 *mode = IW_MODE_ADHOC; 315 *mode = IW_MODE_ADHOC;
319 break; 316 break;
320 case IEEE80211_IF_TYPE_MNTR: 317 case NL80211_IFTYPE_MONITOR:
321 *mode = IW_MODE_MONITOR; 318 *mode = IW_MODE_MONITOR;
322 break; 319 break;
323 case IEEE80211_IF_TYPE_WDS: 320 case NL80211_IFTYPE_WDS:
324 *mode = IW_MODE_REPEAT; 321 *mode = IW_MODE_REPEAT;
325 break; 322 break;
326 case IEEE80211_IF_TYPE_VLAN: 323 case NL80211_IFTYPE_AP_VLAN:
327 *mode = IW_MODE_SECOND; /* FIXME */ 324 *mode = IW_MODE_SECOND; /* FIXME */
328 break; 325 break;
329 default: 326 default:
@@ -333,60 +330,31 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
333 return 0; 330 return 0;
334} 331}
335 332
336int ieee80211_set_freq(struct net_device *dev, int freqMHz)
337{
338 int ret = -EINVAL;
339 struct ieee80211_channel *chan;
340 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
341 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
342
343 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
344
345 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
346 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
347 chan->flags & IEEE80211_CHAN_NO_IBSS) {
348 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
349 "%d MHz\n", dev->name, chan->center_freq);
350 return ret;
351 }
352 local->oper_channel = chan;
353
354 if (local->sta_sw_scanning || local->sta_hw_scanning)
355 ret = 0;
356 else
357 ret = ieee80211_hw_config(local);
358
359 rate_control_clear(local);
360 }
361
362 return ret;
363}
364
365static int ieee80211_ioctl_siwfreq(struct net_device *dev, 333static int ieee80211_ioctl_siwfreq(struct net_device *dev,
366 struct iw_request_info *info, 334 struct iw_request_info *info,
367 struct iw_freq *freq, char *extra) 335 struct iw_freq *freq, char *extra)
368{ 336{
369 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 337 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
370 338
371 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 339 if (sdata->vif.type == NL80211_IFTYPE_STATION)
372 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; 340 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
373 341
374 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ 342 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
375 if (freq->e == 0) { 343 if (freq->e == 0) {
376 if (freq->m < 0) { 344 if (freq->m < 0) {
377 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 345 if (sdata->vif.type == NL80211_IFTYPE_STATION)
378 sdata->u.sta.flags |= 346 sdata->u.sta.flags |=
379 IEEE80211_STA_AUTO_CHANNEL_SEL; 347 IEEE80211_STA_AUTO_CHANNEL_SEL;
380 return 0; 348 return 0;
381 } else 349 } else
382 return ieee80211_set_freq(dev, 350 return ieee80211_set_freq(sdata,
383 ieee80211_channel_to_frequency(freq->m)); 351 ieee80211_channel_to_frequency(freq->m));
384 } else { 352 } else {
385 int i, div = 1000000; 353 int i, div = 1000000;
386 for (i = 0; i < freq->e; i++) 354 for (i = 0; i < freq->e; i++)
387 div /= 10; 355 div /= 10;
388 if (div > 0) 356 if (div > 0)
389 return ieee80211_set_freq(dev, freq->m / div); 357 return ieee80211_set_freq(sdata, freq->m / div);
390 else 358 else
391 return -EINVAL; 359 return -EINVAL;
392 } 360 }
@@ -418,8 +386,8 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
418 len--; 386 len--;
419 387
420 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 388 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
421 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 389 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
422 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 390 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
423 int ret; 391 int ret;
424 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 392 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
425 if (len > IEEE80211_MAX_SSID_LEN) 393 if (len > IEEE80211_MAX_SSID_LEN)
@@ -432,14 +400,14 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
432 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 400 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
433 else 401 else
434 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; 402 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL;
435 ret = ieee80211_sta_set_ssid(dev, ssid, len); 403 ret = ieee80211_sta_set_ssid(sdata, ssid, len);
436 if (ret) 404 if (ret)
437 return ret; 405 return ret;
438 ieee80211_sta_req_auth(dev, &sdata->u.sta); 406 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
439 return 0; 407 return 0;
440 } 408 }
441 409
442 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 410 if (sdata->vif.type == NL80211_IFTYPE_AP) {
443 memcpy(sdata->u.ap.ssid, ssid, len); 411 memcpy(sdata->u.ap.ssid, ssid, len);
444 memset(sdata->u.ap.ssid + len, 0, 412 memset(sdata->u.ap.ssid + len, 0,
445 IEEE80211_MAX_SSID_LEN - len); 413 IEEE80211_MAX_SSID_LEN - len);
@@ -458,9 +426,9 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
458 426
459 struct ieee80211_sub_if_data *sdata; 427 struct ieee80211_sub_if_data *sdata;
460 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 428 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
461 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 429 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
462 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 430 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
463 int res = ieee80211_sta_get_ssid(dev, ssid, &len); 431 int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
464 if (res == 0) { 432 if (res == 0) {
465 data->length = len; 433 data->length = len;
466 data->flags = 1; 434 data->flags = 1;
@@ -469,7 +437,7 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
469 return res; 437 return res;
470 } 438 }
471 439
472 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 440 if (sdata->vif.type == NL80211_IFTYPE_AP) {
473 len = sdata->u.ap.ssid_len; 441 len = sdata->u.ap.ssid_len;
474 if (len > IW_ESSID_MAX_SIZE) 442 if (len > IW_ESSID_MAX_SIZE)
475 len = IW_ESSID_MAX_SIZE; 443 len = IW_ESSID_MAX_SIZE;
@@ -489,8 +457,8 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
489 struct ieee80211_sub_if_data *sdata; 457 struct ieee80211_sub_if_data *sdata;
490 458
491 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 459 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
492 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 460 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
493 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 461 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
494 int ret; 462 int ret;
495 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 463 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
496 memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data, 464 memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data,
@@ -504,12 +472,12 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
504 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; 472 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL;
505 else 473 else
506 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 474 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
507 ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); 475 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
508 if (ret) 476 if (ret)
509 return ret; 477 return ret;
510 ieee80211_sta_req_auth(dev, &sdata->u.sta); 478 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
511 return 0; 479 return 0;
512 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 480 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
513 /* 481 /*
514 * If it is necessary to update the WDS peer address 482 * If it is necessary to update the WDS peer address
515 * while the interface is running, then we need to do 483 * while the interface is running, then we need to do
@@ -537,10 +505,10 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
537 struct ieee80211_sub_if_data *sdata; 505 struct ieee80211_sub_if_data *sdata;
538 506
539 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 507 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
540 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 508 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
541 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 509 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
542 if (sdata->u.sta.state == IEEE80211_ASSOCIATED || 510 if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED ||
543 sdata->u.sta.state == IEEE80211_IBSS_JOINED) { 511 sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) {
544 ap_addr->sa_family = ARPHRD_ETHER; 512 ap_addr->sa_family = ARPHRD_ETHER;
545 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); 513 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
546 return 0; 514 return 0;
@@ -548,7 +516,7 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
548 memset(&ap_addr->sa_data, 0, ETH_ALEN); 516 memset(&ap_addr->sa_data, 0, ETH_ALEN);
549 return 0; 517 return 0;
550 } 518 }
551 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 519 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
552 ap_addr->sa_family = ARPHRD_ETHER; 520 ap_addr->sa_family = ARPHRD_ETHER;
553 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); 521 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
554 return 0; 522 return 0;
@@ -570,10 +538,10 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
570 if (!netif_running(dev)) 538 if (!netif_running(dev))
571 return -ENETDOWN; 539 return -ENETDOWN;
572 540
573 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 541 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
574 sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 542 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
575 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT && 543 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
576 sdata->vif.type != IEEE80211_IF_TYPE_AP) 544 sdata->vif.type != NL80211_IFTYPE_AP)
577 return -EOPNOTSUPP; 545 return -EOPNOTSUPP;
578 546
579 /* if SSID was specified explicitly then use that */ 547 /* if SSID was specified explicitly then use that */
@@ -584,7 +552,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
584 ssid_len = req->essid_len; 552 ssid_len = req->essid_len;
585 } 553 }
586 554
587 return ieee80211_sta_req_scan(dev, ssid, ssid_len); 555 return ieee80211_request_scan(sdata, ssid, ssid_len);
588} 556}
589 557
590 558
@@ -594,11 +562,14 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
594{ 562{
595 int res; 563 int res;
596 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 564 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
565 struct ieee80211_sub_if_data *sdata;
566
567 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
597 568
598 if (local->sta_sw_scanning || local->sta_hw_scanning) 569 if (local->sw_scanning || local->hw_scanning)
599 return -EAGAIN; 570 return -EAGAIN;
600 571
601 res = ieee80211_sta_scan_results(dev, info, extra, data->length); 572 res = ieee80211_scan_results(local, info, extra, data->length);
602 if (res >= 0) { 573 if (res >= 0) {
603 data->length = res; 574 data->length = res;
604 return 0; 575 return 0;
@@ -656,7 +627,7 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
656 627
657 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 628 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
658 629
659 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 630 if (sdata->vif.type != NL80211_IFTYPE_STATION)
660 return -EOPNOTSUPP; 631 return -EOPNOTSUPP;
661 632
662 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 633 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -665,8 +636,8 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
665 636
666 sta = sta_info_get(local, sdata->u.sta.bssid); 637 sta = sta_info_get(local, sdata->u.sta.bssid);
667 638
668 if (sta && sta->txrate_idx < sband->n_bitrates) 639 if (sta && sta->last_txrate_idx < sband->n_bitrates)
669 rate->value = sband->bitrates[sta->txrate_idx].bitrate; 640 rate->value = sband->bitrates[sta->last_txrate_idx].bitrate;
670 else 641 else
671 rate->value = 0; 642 rate->value = 0;
672 643
@@ -887,17 +858,17 @@ static int ieee80211_ioctl_siwmlme(struct net_device *dev,
887 struct iw_mlme *mlme = (struct iw_mlme *) extra; 858 struct iw_mlme *mlme = (struct iw_mlme *) extra;
888 859
889 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 860 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
890 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 861 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
891 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 862 sdata->vif.type != NL80211_IFTYPE_ADHOC)
892 return -EINVAL; 863 return -EINVAL;
893 864
894 switch (mlme->cmd) { 865 switch (mlme->cmd) {
895 case IW_MLME_DEAUTH: 866 case IW_MLME_DEAUTH:
896 /* TODO: mlme->addr.sa_data */ 867 /* TODO: mlme->addr.sa_data */
897 return ieee80211_sta_deauthenticate(dev, mlme->reason_code); 868 return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
898 case IW_MLME_DISASSOC: 869 case IW_MLME_DISASSOC:
899 /* TODO: mlme->addr.sa_data */ 870 /* TODO: mlme->addr.sa_data */
900 return ieee80211_sta_disassociate(dev, mlme->reason_code); 871 return ieee80211_sta_disassociate(sdata, mlme->reason_code);
901 default: 872 default:
902 return -EOPNOTSUPP; 873 return -EOPNOTSUPP;
903 } 874 }
@@ -938,7 +909,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
938 } 909 }
939 910
940 return ieee80211_set_encryption( 911 return ieee80211_set_encryption(
941 dev, bcaddr, 912 sdata, bcaddr,
942 idx, alg, remove, 913 idx, alg, remove,
943 !sdata->default_key, 914 !sdata->default_key,
944 keybuf, erq->length); 915 keybuf, erq->length);
@@ -983,7 +954,7 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev,
983 erq->length = sdata->keys[idx]->conf.keylen; 954 erq->length = sdata->keys[idx]->conf.keylen;
984 erq->flags |= IW_ENCODE_ENABLED; 955 erq->flags |= IW_ENCODE_ENABLED;
985 956
986 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 957 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
987 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 958 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
988 switch (ifsta->auth_alg) { 959 switch (ifsta->auth_alg) {
989 case WLAN_AUTH_OPEN: 960 case WLAN_AUTH_OPEN:
@@ -1057,7 +1028,7 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
1057 sdata->drop_unencrypted = !!data->value; 1028 sdata->drop_unencrypted = !!data->value;
1058 break; 1029 break;
1059 case IW_AUTH_PRIVACY_INVOKED: 1030 case IW_AUTH_PRIVACY_INVOKED:
1060 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 1031 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1061 ret = -EINVAL; 1032 ret = -EINVAL;
1062 else { 1033 else {
1063 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; 1034 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
@@ -1072,8 +1043,8 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
1072 } 1043 }
1073 break; 1044 break;
1074 case IW_AUTH_80211_AUTH_ALG: 1045 case IW_AUTH_80211_AUTH_ALG:
1075 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1046 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1076 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1047 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1077 sdata->u.sta.auth_algs = data->value; 1048 sdata->u.sta.auth_algs = data->value;
1078 else 1049 else
1079 ret = -EOPNOTSUPP; 1050 ret = -EOPNOTSUPP;
@@ -1095,8 +1066,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
1095 1066
1096 rcu_read_lock(); 1067 rcu_read_lock();
1097 1068
1098 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1069 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1099 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1070 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1100 sta = sta_info_get(local, sdata->u.sta.bssid); 1071 sta = sta_info_get(local, sdata->u.sta.bssid);
1101 if (!sta) { 1072 if (!sta) {
1102 wstats->discard.fragment = 0; 1073 wstats->discard.fragment = 0;
@@ -1126,8 +1097,8 @@ static int ieee80211_ioctl_giwauth(struct net_device *dev,
1126 1097
1127 switch (data->flags & IW_AUTH_INDEX) { 1098 switch (data->flags & IW_AUTH_INDEX) {
1128 case IW_AUTH_80211_AUTH_ALG: 1099 case IW_AUTH_80211_AUTH_ALG:
1129 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1100 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1130 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1101 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1131 data->value = sdata->u.sta.auth_algs; 1102 data->value = sdata->u.sta.auth_algs;
1132 else 1103 else
1133 ret = -EOPNOTSUPP; 1104 ret = -EOPNOTSUPP;
@@ -1184,7 +1155,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1184 } else 1155 } else
1185 idx--; 1156 idx--;
1186 1157
1187 return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, 1158 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1188 remove, 1159 remove,
1189 ext->ext_flags & 1160 ext->ext_flags &
1190 IW_ENCODE_EXT_SET_TX_KEY, 1161 IW_ENCODE_EXT_SET_TX_KEY,
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4310e2f65661..c703f8b44e92 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -39,7 +39,7 @@ static unsigned int classify_1d(struct sk_buff *skb)
39 return skb->priority - 256; 39 return skb->priority - 256;
40 40
41 switch (skb->protocol) { 41 switch (skb->protocol) {
42 case __constant_htons(ETH_P_IP): 42 case htons(ETH_P_IP):
43 dscp = ip_hdr(skb)->tos & 0xfc; 43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break; 44 break;
45 45
@@ -47,8 +47,6 @@ static unsigned int classify_1d(struct sk_buff *skb)
47 return 0; 47 return 0;
48 } 48 }
49 49
50 if (dscp & 0x1c)
51 return 0;
52 return dscp >> 5; 50 return dscp >> 5;
53} 51}
54 52
@@ -212,7 +210,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
212 DECLARE_MAC_BUF(mac); 210 DECLARE_MAC_BUF(mac);
213 printk(KERN_DEBUG "allocated aggregation queue" 211 printk(KERN_DEBUG "allocated aggregation queue"
214 " %d tid %d addr %s pool=0x%lX\n", 212 " %d tid %d addr %s pool=0x%lX\n",
215 i, tid, print_mac(mac, sta->addr), 213 i, tid, print_mac(mac, sta->sta.addr),
216 local->queue_pool[0]); 214 local->queue_pool[0]);
217 } 215 }
218#endif /* CONFIG_MAC80211_HT_DEBUG */ 216#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 04de28c071a6..bc62f28a4d3d 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IEEE 802.11 driver (80211.o) - QoS datatypes
3 * Copyright 2004, Instant802 Networks, Inc. 2 * Copyright 2004, Instant802 Networks, Inc.
4 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
5 * 4 *
@@ -14,8 +13,6 @@
14#include <linux/netdevice.h> 13#include <linux/netdevice.h>
15#include "ieee80211_i.h" 14#include "ieee80211_i.h"
16 15
17#define QOS_CONTROL_LEN 2
18
19#define QOS_CONTROL_ACK_POLICY_NORMAL 0 16#define QOS_CONTROL_ACK_POLICY_NORMAL 0
20#define QOS_CONTROL_ACK_POLICY_NOACK 1 17#define QOS_CONTROL_ACK_POLICY_NOACK 1
21 18
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 2f33df0dcccf..37ae9a959f63 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -127,7 +127,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
127 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 127 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
128 return RX_DROP_UNUSABLE; 128 return RX_DROP_UNUSABLE;
129 129
130 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 130 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
131 (void *) skb->data); 131 (void *) skb->data);
132 return RX_DROP_UNUSABLE; 132 return RX_DROP_UNUSABLE;
133 } 133 }
@@ -256,7 +256,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
256 256
257 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 257 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
258 key, skb->data + hdrlen, 258 key, skb->data + hdrlen,
259 skb->len - hdrlen, rx->sta->addr, 259 skb->len - hdrlen, rx->sta->sta.addr,
260 hdr->addr1, hwaccel, rx->queue, 260 hdr->addr1, hwaccel, rx->queue,
261 &rx->tkip_iv32, 261 &rx->tkip_iv32,
262 &rx->tkip_iv16); 262 &rx->tkip_iv16);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 1b1226d6653f..20633fdf7e6b 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -68,11 +68,21 @@ static const char *const dccprotos[] = {
68static int parse_dcc(char *data, const char *data_end, u_int32_t *ip, 68static int parse_dcc(char *data, const char *data_end, u_int32_t *ip,
69 u_int16_t *port, char **ad_beg_p, char **ad_end_p) 69 u_int16_t *port, char **ad_beg_p, char **ad_end_p)
70{ 70{
71 char *tmp;
72
71 /* at least 12: "AAAAAAAA P\1\n" */ 73 /* at least 12: "AAAAAAAA P\1\n" */
72 while (*data++ != ' ') 74 while (*data++ != ' ')
73 if (data > data_end - 12) 75 if (data > data_end - 12)
74 return -1; 76 return -1;
75 77
78 /* Make sure we have a newline character within the packet boundaries
79 * because simple_strtoul parses until the first invalid character. */
80 for (tmp = data; tmp <= data_end; tmp++)
81 if (*tmp == '\n')
82 break;
83 if (tmp > data_end || *tmp != '\n')
84 return -1;
85
76 *ad_beg_p = data; 86 *ad_beg_p = data;
77 *ip = simple_strtoul(data, &data, 10); 87 *ip = simple_strtoul(data, &data, 10);
78 88
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 654a4f7f12c6..9bd03967fea4 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -45,12 +45,12 @@ static LIST_HEAD(gre_keymap_list);
45 45
46void nf_ct_gre_keymap_flush(void) 46void nf_ct_gre_keymap_flush(void)
47{ 47{
48 struct list_head *pos, *n; 48 struct nf_ct_gre_keymap *km, *tmp;
49 49
50 write_lock_bh(&nf_ct_gre_lock); 50 write_lock_bh(&nf_ct_gre_lock);
51 list_for_each_safe(pos, n, &gre_keymap_list) { 51 list_for_each_entry_safe(km, tmp, &gre_keymap_list, list) {
52 list_del(pos); 52 list_del(&km->list);
53 kfree(pos); 53 kfree(km);
54 } 54 }
55 write_unlock_bh(&nf_ct_gre_lock); 55 write_unlock_bh(&nf_ct_gre_lock);
56} 56}
@@ -97,10 +97,14 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
97 kmp = &help->help.ct_pptp_info.keymap[dir]; 97 kmp = &help->help.ct_pptp_info.keymap[dir];
98 if (*kmp) { 98 if (*kmp) {
99 /* check whether it's a retransmission */ 99 /* check whether it's a retransmission */
100 read_lock_bh(&nf_ct_gre_lock);
100 list_for_each_entry(km, &gre_keymap_list, list) { 101 list_for_each_entry(km, &gre_keymap_list, list) {
101 if (gre_key_cmpfn(km, t) && km == *kmp) 102 if (gre_key_cmpfn(km, t) && km == *kmp) {
103 read_unlock_bh(&nf_ct_gre_lock);
102 return 0; 104 return 0;
105 }
103 } 106 }
107 read_unlock_bh(&nf_ct_gre_lock);
104 pr_debug("trying to override keymap_%s for ct %p\n", 108 pr_debug("trying to override keymap_%s for ct %p\n",
105 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct); 109 dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
106 return -EEXIST; 110 return -EEXIST;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 2f9bbc058b48..1fa306be60fb 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1193,7 +1193,6 @@ static const struct sip_handler sip_handlers[] = {
1193static int process_sip_response(struct sk_buff *skb, 1193static int process_sip_response(struct sk_buff *skb,
1194 const char **dptr, unsigned int *datalen) 1194 const char **dptr, unsigned int *datalen)
1195{ 1195{
1196 static const struct sip_handler *handler;
1197 enum ip_conntrack_info ctinfo; 1196 enum ip_conntrack_info ctinfo;
1198 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1197 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1199 unsigned int matchoff, matchlen; 1198 unsigned int matchoff, matchlen;
@@ -1214,6 +1213,8 @@ static int process_sip_response(struct sk_buff *skb,
1214 dataoff = matchoff + matchlen + 1; 1213 dataoff = matchoff + matchlen + 1;
1215 1214
1216 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1215 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1216 const struct sip_handler *handler;
1217
1217 handler = &sip_handlers[i]; 1218 handler = &sip_handlers[i];
1218 if (handler->response == NULL) 1219 if (handler->response == NULL)
1219 continue; 1220 continue;
@@ -1228,13 +1229,14 @@ static int process_sip_response(struct sk_buff *skb,
1228static int process_sip_request(struct sk_buff *skb, 1229static int process_sip_request(struct sk_buff *skb,
1229 const char **dptr, unsigned int *datalen) 1230 const char **dptr, unsigned int *datalen)
1230{ 1231{
1231 static const struct sip_handler *handler;
1232 enum ip_conntrack_info ctinfo; 1232 enum ip_conntrack_info ctinfo;
1233 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 1233 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1234 unsigned int matchoff, matchlen; 1234 unsigned int matchoff, matchlen;
1235 unsigned int cseq, i; 1235 unsigned int cseq, i;
1236 1236
1237 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) { 1237 for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
1238 const struct sip_handler *handler;
1239
1238 handler = &sip_handlers[i]; 1240 handler = &sip_handlers[i];
1239 if (handler->request == NULL) 1241 if (handler->request == NULL)
1240 continue; 1242 continue;
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 9f328593287e..307a2c3c2df4 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -136,17 +136,19 @@ static void localtime_3(struct xtm *r, time_t time)
136 * from w repeatedly while counting.) 136 * from w repeatedly while counting.)
137 */ 137 */
138 if (is_leap(year)) { 138 if (is_leap(year)) {
139 /* use days_since_leapyear[] in a leap year */
139 for (i = ARRAY_SIZE(days_since_leapyear) - 1; 140 for (i = ARRAY_SIZE(days_since_leapyear) - 1;
140 i > 0 && days_since_year[i] > w; --i) 141 i > 0 && days_since_leapyear[i] > w; --i)
141 /* just loop */; 142 /* just loop */;
143 r->monthday = w - days_since_leapyear[i] + 1;
142 } else { 144 } else {
143 for (i = ARRAY_SIZE(days_since_year) - 1; 145 for (i = ARRAY_SIZE(days_since_year) - 1;
144 i > 0 && days_since_year[i] > w; --i) 146 i > 0 && days_since_year[i] > w; --i)
145 /* just loop */; 147 /* just loop */;
148 r->monthday = w - days_since_year[i] + 1;
146 } 149 }
147 150
148 r->month = i + 1; 151 r->month = i + 1;
149 r->monthday = w - days_since_year[i] + 1;
150 return; 152 return;
151} 153}
152 154
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
new file mode 100644
index 000000000000..51a5669573f2
--- /dev/null
+++ b/net/phonet/Kconfig
@@ -0,0 +1,16 @@
1#
2# Phonet protocol
3#
4
5config PHONET
6 tristate "Phonet protocols family"
7 help
8 The Phone Network protocol (PhoNet) is a packet-oriented
9 communication protocol developped by Nokia for use with its modems.
10
11 This is required for Maemo to use cellular data connectivity (if
12 supported). It can also be used to control Nokia phones
13 from a Linux computer, although AT commands may be easier to use.
14
15 To compile this driver as a module, choose M here: the module
16 will be called phonet. If unsure, say N.
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
new file mode 100644
index 000000000000..ae9c3ed5be83
--- /dev/null
+++ b/net/phonet/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_PHONET) += phonet.o
2
3phonet-objs := \
4 pn_dev.o \
5 pn_netlink.o \
6 socket.o \
7 datagram.o \
8 sysctl.o \
9 af_phonet.o
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
new file mode 100644
index 000000000000..1d8df6b7e3df
--- /dev/null
+++ b/net/phonet/af_phonet.c
@@ -0,0 +1,468 @@
1/*
2 * File: af_phonet.c
3 *
4 * Phonet protocols family
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <asm/unaligned.h>
29#include <net/sock.h>
30
31#include <linux/if_phonet.h>
32#include <linux/phonet.h>
33#include <net/phonet/phonet.h>
34#include <net/phonet/pn_dev.h>
35
36static struct net_proto_family phonet_proto_family;
37static struct phonet_protocol *phonet_proto_get(int protocol);
38static inline void phonet_proto_put(struct phonet_protocol *pp);
39
40/* protocol family functions */
41
42static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
43{
44 struct sock *sk;
45 struct pn_sock *pn;
46 struct phonet_protocol *pnp;
47 int err;
48
49 if (net != &init_net)
50 return -EAFNOSUPPORT;
51
52 if (!capable(CAP_SYS_ADMIN))
53 return -EPERM;
54
55 if (protocol == 0) {
56 /* Default protocol selection */
57 switch (sock->type) {
58 case SOCK_DGRAM:
59 protocol = PN_PROTO_PHONET;
60 break;
61 default:
62 return -EPROTONOSUPPORT;
63 }
64 }
65
66 pnp = phonet_proto_get(protocol);
67 if (pnp == NULL)
68 return -EPROTONOSUPPORT;
69 if (sock->type != pnp->sock_type) {
70 err = -EPROTONOSUPPORT;
71 goto out;
72 }
73
74 sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
75 if (sk == NULL) {
76 err = -ENOMEM;
77 goto out;
78 }
79
80 sock_init_data(sock, sk);
81 sock->state = SS_UNCONNECTED;
82 sock->ops = pnp->ops;
83 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
84 sk->sk_protocol = protocol;
85 pn = pn_sk(sk);
86 pn->sobject = 0;
87 pn->resource = 0;
88 sk->sk_prot->init(sk);
89 err = 0;
90
91out:
92 phonet_proto_put(pnp);
93 return err;
94}
95
96static struct net_proto_family phonet_proto_family = {
97 .family = AF_PHONET,
98 .create = pn_socket_create,
99 .owner = THIS_MODULE,
100};
101
102/* Phonet device header operations */
103static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
104 unsigned short type, const void *daddr,
105 const void *saddr, unsigned len)
106{
107 u8 *media = skb_push(skb, 1);
108
109 if (type != ETH_P_PHONET)
110 return -1;
111
112 if (!saddr)
113 saddr = dev->dev_addr;
114 *media = *(const u8 *)saddr;
115 return 1;
116}
117
118static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
119{
120 const u8 *media = skb_mac_header(skb);
121 *haddr = *media;
122 return 1;
123}
124
125struct header_ops phonet_header_ops = {
126 .create = pn_header_create,
127 .parse = pn_header_parse,
128};
129EXPORT_SYMBOL(phonet_header_ops);
130
131/*
132 * Prepends an ISI header and sends a datagram.
133 */
134static int pn_send(struct sk_buff *skb, struct net_device *dev,
135 u16 dst, u16 src, u8 res, u8 irq)
136{
137 struct phonethdr *ph;
138 int err;
139
140 if (skb->len + 2 > 0xffff) {
141 /* Phonet length field would overflow */
142 err = -EMSGSIZE;
143 goto drop;
144 }
145
146 skb_reset_transport_header(skb);
147 WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
148 skb_push(skb, sizeof(struct phonethdr));
149 skb_reset_network_header(skb);
150 ph = pn_hdr(skb);
151 ph->pn_rdev = pn_dev(dst);
152 ph->pn_sdev = pn_dev(src);
153 ph->pn_res = res;
154 ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
155 ph->pn_robj = pn_obj(dst);
156 ph->pn_sobj = pn_obj(src);
157
158 skb->protocol = htons(ETH_P_PHONET);
159 skb->priority = 0;
160 skb->dev = dev;
161
162 if (pn_addr(src) == pn_addr(dst)) {
163 skb_reset_mac_header(skb);
164 skb->pkt_type = PACKET_LOOPBACK;
165 skb_orphan(skb);
166 if (irq)
167 netif_rx(skb);
168 else
169 netif_rx_ni(skb);
170 err = 0;
171 } else {
172 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
173 NULL, NULL, skb->len);
174 if (err < 0) {
175 err = -EHOSTUNREACH;
176 goto drop;
177 }
178 err = dev_queue_xmit(skb);
179 }
180
181 return err;
182drop:
183 kfree_skb(skb);
184 return err;
185}
186
187static int pn_raw_send(const void *data, int len, struct net_device *dev,
188 u16 dst, u16 src, u8 res)
189{
190 struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
191 if (skb == NULL)
192 return -ENOMEM;
193
194 skb_reserve(skb, MAX_PHONET_HEADER);
195 __skb_put(skb, len);
196 skb_copy_to_linear_data(skb, data, len);
197 return pn_send(skb, dev, dst, src, res, 1);
198}
199
200/*
201 * Create a Phonet header for the skb and send it out. Returns
202 * non-zero error code if failed. The skb is freed then.
203 */
204int pn_skb_send(struct sock *sk, struct sk_buff *skb,
205 const struct sockaddr_pn *target)
206{
207 struct net_device *dev;
208 struct pn_sock *pn = pn_sk(sk);
209 int err;
210 u16 src;
211 u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
212
213 err = -EHOSTUNREACH;
214 if (sk->sk_bound_dev_if)
215 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
216 else
217 dev = phonet_device_get(sock_net(sk));
218 if (!dev || !(dev->flags & IFF_UP))
219 goto drop;
220
221 saddr = phonet_address_get(dev, daddr);
222 if (saddr == PN_NO_ADDR)
223 goto drop;
224
225 src = pn->sobject;
226 if (!pn_addr(src))
227 src = pn_object(saddr, pn_obj(src));
228
229 err = pn_send(skb, dev, pn_sockaddr_get_object(target),
230 src, pn_sockaddr_get_resource(target), 0);
231 dev_put(dev);
232 return err;
233
234drop:
235 kfree_skb(skb);
236 if (dev)
237 dev_put(dev);
238 return err;
239}
240EXPORT_SYMBOL(pn_skb_send);
241
242/* Do not send an error message in response to an error message */
243static inline int can_respond(struct sk_buff *skb)
244{
245 const struct phonethdr *ph;
246 const struct phonetmsg *pm;
247 u8 submsg_id;
248
249 if (!pskb_may_pull(skb, 3))
250 return 0;
251
252 ph = pn_hdr(skb);
253 if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev)
254 return 0; /* we are not the destination */
255 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
256 return 0;
257
258 ph = pn_hdr(skb); /* re-acquires the pointer */
259 pm = pn_msg(skb);
260 if (pm->pn_msg_id != PN_COMMON_MESSAGE)
261 return 1;
262 submsg_id = (ph->pn_res == PN_PREFIX)
263 ? pm->pn_e_submsg_id : pm->pn_submsg_id;
264 if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP &&
265 pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP)
266 return 1;
267 return 0;
268}
269
270static int send_obj_unreachable(struct sk_buff *rskb)
271{
272 const struct phonethdr *oph = pn_hdr(rskb);
273 const struct phonetmsg *opm = pn_msg(rskb);
274 struct phonetmsg resp;
275
276 memset(&resp, 0, sizeof(resp));
277 resp.pn_trans_id = opm->pn_trans_id;
278 resp.pn_msg_id = PN_COMMON_MESSAGE;
279 if (oph->pn_res == PN_PREFIX) {
280 resp.pn_e_res_id = opm->pn_e_res_id;
281 resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
282 resp.pn_e_orig_msg_id = opm->pn_msg_id;
283 resp.pn_e_status = 0;
284 } else {
285 resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
286 resp.pn_orig_msg_id = opm->pn_msg_id;
287 resp.pn_status = 0;
288 }
289 return pn_raw_send(&resp, sizeof(resp), rskb->dev,
290 pn_object(oph->pn_sdev, oph->pn_sobj),
291 pn_object(oph->pn_rdev, oph->pn_robj),
292 oph->pn_res);
293}
294
295static int send_reset_indications(struct sk_buff *rskb)
296{
297 struct phonethdr *oph = pn_hdr(rskb);
298 static const u8 data[4] = {
299 0x00 /* trans ID */, 0x10 /* subscribe msg */,
300 0x00 /* subscription count */, 0x00 /* dummy */
301 };
302
303 return pn_raw_send(data, sizeof(data), rskb->dev,
304 pn_object(oph->pn_sdev, 0x00),
305 pn_object(oph->pn_rdev, oph->pn_robj), 0x10);
306}
307
308
309/* packet type functions */
310
311/*
312 * Stuff received packets to associated sockets.
313 * On error, returns non-zero and releases the skb.
314 */
315static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
316 struct packet_type *pkttype,
317 struct net_device *orig_dev)
318{
319 struct phonethdr *ph;
320 struct sock *sk;
321 struct sockaddr_pn sa;
322 u16 len;
323
324 if (dev_net(dev) != &init_net)
325 goto out;
326
327 /* check we have at least a full Phonet header */
328 if (!pskb_pull(skb, sizeof(struct phonethdr)))
329 goto out;
330
331 /* check that the advertised length is correct */
332 ph = pn_hdr(skb);
333 len = get_unaligned_be16(&ph->pn_length);
334 if (len < 2)
335 goto out;
336 len -= 2;
337 if ((len > skb->len) || pskb_trim(skb, len))
338 goto out;
339 skb_reset_transport_header(skb);
340
341 pn_skb_get_dst_sockaddr(skb, &sa);
342 if (pn_sockaddr_get_addr(&sa) == 0)
343 goto out; /* currently, we cannot be device 0 */
344
345 sk = pn_find_sock_by_sa(&sa);
346 if (sk == NULL) {
347 if (can_respond(skb)) {
348 send_obj_unreachable(skb);
349 send_reset_indications(skb);
350 }
351 goto out;
352 }
353
354 /* Push data to the socket (or other sockets connected to it). */
355 return sk_receive_skb(sk, skb, 0);
356
357out:
358 kfree_skb(skb);
359 return NET_RX_DROP;
360}
361
362static struct packet_type phonet_packet_type = {
363 .type = __constant_htons(ETH_P_PHONET),
364 .dev = NULL,
365 .func = phonet_rcv,
366};
367
368/* Transport protocol registration */
369static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
370static DEFINE_SPINLOCK(proto_tab_lock);
371
372int __init_or_module phonet_proto_register(int protocol,
373 struct phonet_protocol *pp)
374{
375 int err = 0;
376
377 if (protocol >= PHONET_NPROTO)
378 return -EINVAL;
379
380 err = proto_register(pp->prot, 1);
381 if (err)
382 return err;
383
384 spin_lock(&proto_tab_lock);
385 if (proto_tab[protocol])
386 err = -EBUSY;
387 else
388 proto_tab[protocol] = pp;
389 spin_unlock(&proto_tab_lock);
390
391 return err;
392}
393EXPORT_SYMBOL(phonet_proto_register);
394
395void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
396{
397 spin_lock(&proto_tab_lock);
398 BUG_ON(proto_tab[protocol] != pp);
399 proto_tab[protocol] = NULL;
400 spin_unlock(&proto_tab_lock);
401 proto_unregister(pp->prot);
402}
403EXPORT_SYMBOL(phonet_proto_unregister);
404
405static struct phonet_protocol *phonet_proto_get(int protocol)
406{
407 struct phonet_protocol *pp;
408
409 if (protocol >= PHONET_NPROTO)
410 return NULL;
411
412 spin_lock(&proto_tab_lock);
413 pp = proto_tab[protocol];
414 if (pp && !try_module_get(pp->prot->owner))
415 pp = NULL;
416 spin_unlock(&proto_tab_lock);
417
418 return pp;
419}
420
421static inline void phonet_proto_put(struct phonet_protocol *pp)
422{
423 module_put(pp->prot->owner);
424}
425
426/* Module registration */
427static int __init phonet_init(void)
428{
429 int err;
430
431 err = sock_register(&phonet_proto_family);
432 if (err) {
433 printk(KERN_ALERT
434 "phonet protocol family initialization failed\n");
435 return err;
436 }
437
438 phonet_device_init();
439 dev_add_pack(&phonet_packet_type);
440 phonet_netlink_register();
441 phonet_sysctl_init();
442
443 err = isi_register();
444 if (err)
445 goto err;
446 return 0;
447
448err:
449 phonet_sysctl_exit();
450 sock_unregister(AF_PHONET);
451 dev_remove_pack(&phonet_packet_type);
452 phonet_device_exit();
453 return err;
454}
455
456static void __exit phonet_exit(void)
457{
458 isi_unregister();
459 phonet_sysctl_exit();
460 sock_unregister(AF_PHONET);
461 dev_remove_pack(&phonet_packet_type);
462 phonet_device_exit();
463}
464
465module_init(phonet_init);
466module_exit(phonet_exit);
467MODULE_DESCRIPTION("Phonet protocol stack for Linux");
468MODULE_LICENSE("GPL");
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
new file mode 100644
index 000000000000..e087862ed7e4
--- /dev/null
+++ b/net/phonet/datagram.c
@@ -0,0 +1,197 @@
1/*
2 * File: datagram.c
3 *
4 * Datagram (ISI) Phonet sockets
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/socket.h>
28#include <asm/ioctls.h>
29#include <net/sock.h>
30
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33
34static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
35
36/* associated socket ceases to exist */
37static void pn_sock_close(struct sock *sk, long timeout)
38{
39 sk_common_release(sk);
40}
41
42static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
43{
44 struct sk_buff *skb;
45 int answ;
46
47 switch (cmd) {
48 case SIOCINQ:
49 lock_sock(sk);
50 skb = skb_peek(&sk->sk_receive_queue);
51 answ = skb ? skb->len : 0;
52 release_sock(sk);
53 return put_user(answ, (int __user *)arg);
54 }
55
56 return -ENOIOCTLCMD;
57}
58
59/* Destroy socket. All references are gone. */
60static void pn_destruct(struct sock *sk)
61{
62 skb_queue_purge(&sk->sk_receive_queue);
63}
64
65static int pn_init(struct sock *sk)
66{
67 sk->sk_destruct = pn_destruct;
68 return 0;
69}
70
71static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
72 struct msghdr *msg, size_t len)
73{
74 struct sockaddr_pn *target;
75 struct sk_buff *skb;
76 int err;
77
78 if (msg->msg_flags & MSG_OOB)
79 return -EOPNOTSUPP;
80
81 if (msg->msg_name == NULL)
82 return -EDESTADDRREQ;
83
84 if (msg->msg_namelen < sizeof(struct sockaddr_pn))
85 return -EINVAL;
86
87 target = (struct sockaddr_pn *)msg->msg_name;
88 if (target->spn_family != AF_PHONET)
89 return -EAFNOSUPPORT;
90
91 skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
92 msg->msg_flags & MSG_DONTWAIT, &err);
93 if (skb == NULL)
94 return err;
95 skb_reserve(skb, MAX_PHONET_HEADER);
96
97 err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
98 if (err < 0) {
99 kfree_skb(skb);
100 return err;
101 }
102
103 /*
104 * Fill in the Phonet header and
105 * finally pass the packet forwards.
106 */
107 err = pn_skb_send(sk, skb, target);
108
109 /* If ok, return len. */
110 return (err >= 0) ? len : err;
111}
112
113static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
114 struct msghdr *msg, size_t len, int noblock,
115 int flags, int *addr_len)
116{
117 struct sk_buff *skb = NULL;
118 struct sockaddr_pn sa;
119 int rval = -EOPNOTSUPP;
120 int copylen;
121
122 if (flags & MSG_OOB)
123 goto out_nofree;
124
125 if (addr_len)
126 *addr_len = sizeof(sa);
127
128 skb = skb_recv_datagram(sk, flags, noblock, &rval);
129 if (skb == NULL)
130 goto out_nofree;
131
132 pn_skb_get_src_sockaddr(skb, &sa);
133
134 copylen = skb->len;
135 if (len < copylen) {
136 msg->msg_flags |= MSG_TRUNC;
137 copylen = len;
138 }
139
140 rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
141 if (rval) {
142 rval = -EFAULT;
143 goto out;
144 }
145
146 rval = (flags & MSG_TRUNC) ? skb->len : copylen;
147
148 if (msg->msg_name != NULL)
149 memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
150
151out:
152 skb_free_datagram(sk, skb);
153
154out_nofree:
155 return rval;
156}
157
158/* Queue an skb for a sock. */
159static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
160{
161 int err = sock_queue_rcv_skb(sk, skb);
162 if (err < 0)
163 kfree_skb(skb);
164 return err ? NET_RX_DROP : NET_RX_SUCCESS;
165}
166
167/* Module registration */
168static struct proto pn_proto = {
169 .close = pn_sock_close,
170 .ioctl = pn_ioctl,
171 .init = pn_init,
172 .sendmsg = pn_sendmsg,
173 .recvmsg = pn_recvmsg,
174 .backlog_rcv = pn_backlog_rcv,
175 .hash = pn_sock_hash,
176 .unhash = pn_sock_unhash,
177 .get_port = pn_sock_get_port,
178 .obj_size = sizeof(struct pn_sock),
179 .owner = THIS_MODULE,
180 .name = "PHONET",
181};
182
183static struct phonet_protocol pn_dgram_proto = {
184 .ops = &phonet_dgram_ops,
185 .prot = &pn_proto,
186 .sock_type = SOCK_DGRAM,
187};
188
189int __init isi_register(void)
190{
191 return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto);
192}
193
194void __exit isi_unregister(void)
195{
196 phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto);
197}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
new file mode 100644
index 000000000000..53be9fc82aaa
--- /dev/null
+++ b/net/phonet/pn_dev.c
@@ -0,0 +1,208 @@
1/*
2 * File: pn_dev.c
3 *
4 * Phonet network device
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/net.h>
28#include <linux/netdevice.h>
29#include <linux/phonet.h>
30#include <net/sock.h>
31#include <net/phonet/pn_dev.h>
32
33/* when accessing, remember to lock with spin_lock(&pndevs.lock); */
34struct phonet_device_list pndevs = {
35 .list = LIST_HEAD_INIT(pndevs.list),
36 .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock),
37};
38
39/* Allocate new Phonet device. */
40static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
41{
42 struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
43 if (pnd == NULL)
44 return NULL;
45 pnd->netdev = dev;
46 bitmap_zero(pnd->addrs, 64);
47
48 list_add(&pnd->list, &pndevs.list);
49 return pnd;
50}
51
52static struct phonet_device *__phonet_get(struct net_device *dev)
53{
54 struct phonet_device *pnd;
55
56 list_for_each_entry(pnd, &pndevs.list, list) {
57 if (pnd->netdev == dev)
58 return pnd;
59 }
60 return NULL;
61}
62
63static void __phonet_device_free(struct phonet_device *pnd)
64{
65 list_del(&pnd->list);
66 kfree(pnd);
67}
68
69struct net_device *phonet_device_get(struct net *net)
70{
71 struct phonet_device *pnd;
72 struct net_device *dev;
73
74 spin_lock_bh(&pndevs.lock);
75 list_for_each_entry(pnd, &pndevs.list, list) {
76 dev = pnd->netdev;
77 BUG_ON(!dev);
78
79 if (dev_net(dev) == net &&
80 (dev->reg_state == NETREG_REGISTERED) &&
81 ((pnd->netdev->flags & IFF_UP)) == IFF_UP)
82 break;
83 dev = NULL;
84 }
85 if (dev)
86 dev_hold(dev);
87 spin_unlock_bh(&pndevs.lock);
88 return dev;
89}
90
91int phonet_address_add(struct net_device *dev, u8 addr)
92{
93 struct phonet_device *pnd;
94 int err = 0;
95
96 spin_lock_bh(&pndevs.lock);
97 /* Find or create Phonet-specific device data */
98 pnd = __phonet_get(dev);
99 if (pnd == NULL)
100 pnd = __phonet_device_alloc(dev);
101 if (unlikely(pnd == NULL))
102 err = -ENOMEM;
103 else if (test_and_set_bit(addr >> 2, pnd->addrs))
104 err = -EEXIST;
105 spin_unlock_bh(&pndevs.lock);
106 return err;
107}
108
109int phonet_address_del(struct net_device *dev, u8 addr)
110{
111 struct phonet_device *pnd;
112 int err = 0;
113
114 spin_lock_bh(&pndevs.lock);
115 pnd = __phonet_get(dev);
116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
117 err = -EADDRNOTAVAIL;
118 if (bitmap_empty(pnd->addrs, 64))
119 __phonet_device_free(pnd);
120 spin_unlock_bh(&pndevs.lock);
121 return err;
122}
123
124/* Gets a source address toward a destination, through a interface. */
125u8 phonet_address_get(struct net_device *dev, u8 addr)
126{
127 struct phonet_device *pnd;
128
129 spin_lock_bh(&pndevs.lock);
130 pnd = __phonet_get(dev);
131 if (pnd) {
132 BUG_ON(bitmap_empty(pnd->addrs, 64));
133
134 /* Use same source address as destination, if possible */
135 if (!test_bit(addr >> 2, pnd->addrs))
136 addr = find_first_bit(pnd->addrs, 64) << 2;
137 } else
138 addr = PN_NO_ADDR;
139 spin_unlock_bh(&pndevs.lock);
140 return addr;
141}
142
143int phonet_address_lookup(u8 addr)
144{
145 struct phonet_device *pnd;
146
147 spin_lock_bh(&pndevs.lock);
148 list_for_each_entry(pnd, &pndevs.list, list) {
149 /* Don't allow unregistering devices! */
150 if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
151 ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
152 continue;
153
154 if (test_bit(addr >> 2, pnd->addrs)) {
155 spin_unlock_bh(&pndevs.lock);
156 return 0;
157 }
158 }
159 spin_unlock_bh(&pndevs.lock);
160 return -EADDRNOTAVAIL;
161}
162
163/* notify Phonet of device events */
164static int phonet_device_notify(struct notifier_block *me, unsigned long what,
165 void *arg)
166{
167 struct net_device *dev = arg;
168
169 if (what == NETDEV_UNREGISTER) {
170 struct phonet_device *pnd;
171
172 /* Destroy phonet-specific device data */
173 spin_lock_bh(&pndevs.lock);
174 pnd = __phonet_get(dev);
175 if (pnd)
176 __phonet_device_free(pnd);
177 spin_unlock_bh(&pndevs.lock);
178 }
179 return 0;
180
181}
182
183static struct notifier_block phonet_device_notifier = {
184 .notifier_call = phonet_device_notify,
185 .priority = 0,
186};
187
188/* Initialize Phonet devices list */
189void phonet_device_init(void)
190{
191 register_netdevice_notifier(&phonet_device_notifier);
192}
193
194void phonet_device_exit(void)
195{
196 struct phonet_device *pnd, *n;
197
198 rtnl_unregister_all(PF_PHONET);
199 rtnl_lock();
200 spin_lock_bh(&pndevs.lock);
201
202 list_for_each_entry_safe(pnd, n, &pndevs.list, list)
203 __phonet_device_free(pnd);
204
205 spin_unlock_bh(&pndevs.lock);
206 rtnl_unlock();
207 unregister_netdevice_notifier(&phonet_device_notifier);
208}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
new file mode 100644
index 000000000000..b1ea19a230dd
--- /dev/null
+++ b/net/phonet/pn_netlink.c
@@ -0,0 +1,186 @@
1/*
2 * File: pn_netlink.c
3 *
4 * Phonet netlink interface
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/netlink.h>
28#include <linux/phonet.h>
29#include <net/sock.h>
30#include <net/phonet/pn_dev.h>
31
32static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
33 u32 pid, u32 seq, int event);
34
35static void rtmsg_notify(int event, struct net_device *dev, u8 addr)
36{
37 struct sk_buff *skb;
38 int err = -ENOBUFS;
39
40 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
41 nla_total_size(1), GFP_KERNEL);
42 if (skb == NULL)
43 goto errout;
44 err = fill_addr(skb, dev, addr, 0, 0, event);
45 if (err < 0) {
46 WARN_ON(err == -EMSGSIZE);
47 kfree_skb(skb);
48 goto errout;
49 }
50 err = rtnl_notify(skb, dev_net(dev), 0,
51 RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
52errout:
53 if (err < 0)
54 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
55}
56
57static int newaddr_doit(struct sk_buff *skb, struct nlmsghdr *nlm, void *attr)
58{
59 struct rtattr **rta = attr;
60 struct ifaddrmsg *ifm = NLMSG_DATA(nlm);
61 struct net_device *dev;
62 int err;
63 u8 pnaddr;
64
65 if (!capable(CAP_SYS_ADMIN))
66 return -EPERM;
67
68 ASSERT_RTNL();
69
70 if (rta[IFA_LOCAL - 1] == NULL)
71 return -EINVAL;
72
73 dev = __dev_get_by_index(&init_net, ifm->ifa_index);
74 if (dev == NULL)
75 return -ENODEV;
76
77 if (ifm->ifa_prefixlen > 0)
78 return -EINVAL;
79
80 memcpy(&pnaddr, RTA_DATA(rta[IFA_LOCAL - 1]), 1);
81
82 err = phonet_address_add(dev, pnaddr);
83 if (!err)
84 rtmsg_notify(RTM_NEWADDR, dev, pnaddr);
85 return err;
86}
87
88static int deladdr_doit(struct sk_buff *skb, struct nlmsghdr *nlm, void *attr)
89{
90 struct rtattr **rta = attr;
91 struct ifaddrmsg *ifm = NLMSG_DATA(nlm);
92 struct net_device *dev;
93 int err;
94 u8 pnaddr;
95
96 if (!capable(CAP_SYS_ADMIN))
97 return -EPERM;
98
99 ASSERT_RTNL();
100
101 if (rta[IFA_LOCAL - 1] == NULL)
102 return -EINVAL;
103
104 dev = __dev_get_by_index(&init_net, ifm->ifa_index);
105 if (dev == NULL)
106 return -ENODEV;
107
108 if (ifm->ifa_prefixlen > 0)
109 return -EADDRNOTAVAIL;
110
111 memcpy(&pnaddr, RTA_DATA(rta[IFA_LOCAL - 1]), 1);
112
113 err = phonet_address_del(dev, pnaddr);
114 if (!err)
115 rtmsg_notify(RTM_DELADDR, dev, pnaddr);
116 return err;
117}
118
119static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
120 u32 pid, u32 seq, int event)
121{
122 struct ifaddrmsg *ifm;
123 struct nlmsghdr *nlh;
124 unsigned int orig_len = skb->len;
125
126 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct ifaddrmsg));
127 ifm = NLMSG_DATA(nlh);
128 ifm->ifa_family = AF_PHONET;
129 ifm->ifa_prefixlen = 0;
130 ifm->ifa_flags = IFA_F_PERMANENT;
131 ifm->ifa_scope = RT_SCOPE_HOST;
132 ifm->ifa_index = dev->ifindex;
133 RTA_PUT(skb, IFA_LOCAL, 1, &addr);
134 nlh->nlmsg_len = skb->len - orig_len;
135
136 return 0;
137
138nlmsg_failure:
139rtattr_failure:
140 skb_trim(skb, orig_len);
141
142 return -1;
143}
144
145static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
146{
147 struct phonet_device *pnd;
148 int dev_idx = 0, dev_start_idx = cb->args[0];
149 int addr_idx = 0, addr_start_idx = cb->args[1];
150
151 spin_lock_bh(&pndevs.lock);
152 list_for_each_entry(pnd, &pndevs.list, list) {
153 u8 addr;
154
155 if (dev_idx > dev_start_idx)
156 addr_start_idx = 0;
157 if (dev_idx++ < dev_start_idx)
158 continue;
159
160 addr_idx = 0;
161 for (addr = find_first_bit(pnd->addrs, 64); addr < 64;
162 addr = find_next_bit(pnd->addrs, 64, 1+addr)) {
163 if (addr_idx++ < addr_start_idx)
164 continue;
165
166 if (fill_addr(skb, pnd->netdev, addr << 2,
167 NETLINK_CB(cb->skb).pid,
168 cb->nlh->nlmsg_seq, RTM_NEWADDR))
169 goto out;
170 }
171 }
172
173out:
174 spin_unlock_bh(&pndevs.lock);
175 cb->args[0] = dev_idx;
176 cb->args[1] = addr_idx;
177
178 return skb->len;
179}
180
181void __init phonet_netlink_register(void)
182{
183 rtnl_register(PF_PHONET, RTM_NEWADDR, newaddr_doit, NULL);
184 rtnl_register(PF_PHONET, RTM_DELADDR, deladdr_doit, NULL);
185 rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit);
186}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
new file mode 100644
index 000000000000..dfd4061646db
--- /dev/null
+++ b/net/phonet/socket.c
@@ -0,0 +1,312 @@
1/*
2 * File: socket.c
3 *
4 * Phonet sockets
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/net.h>
28#include <net/sock.h>
29#include <net/tcp_states.h>
30
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33#include <net/phonet/pn_dev.h>
34
35static int pn_socket_release(struct socket *sock)
36{
37 struct sock *sk = sock->sk;
38
39 if (sk) {
40 sock->sk = NULL;
41 sk->sk_prot->close(sk, 0);
42 }
43 return 0;
44}
45
46static struct {
47 struct hlist_head hlist;
48 spinlock_t lock;
49} pnsocks = {
50 .hlist = HLIST_HEAD_INIT,
51 .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock),
52};
53
54/*
55 * Find address based on socket address, match only certain fields.
56 * Also grab sock if it was found. Remember to sock_put it later.
57 */
58struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *spn)
59{
60 struct hlist_node *node;
61 struct sock *sknode;
62 struct sock *rval = NULL;
63 u16 obj = pn_sockaddr_get_object(spn);
64 u8 res = spn->spn_resource;
65
66 spin_lock_bh(&pnsocks.lock);
67
68 sk_for_each(sknode, node, &pnsocks.hlist) {
69 struct pn_sock *pn = pn_sk(sknode);
70 BUG_ON(!pn->sobject); /* unbound socket */
71
72 if (pn_port(obj)) {
73 /* Look up socket by port */
74 if (pn_port(pn->sobject) != pn_port(obj))
75 continue;
76 } else {
77 /* If port is zero, look up by resource */
78 if (pn->resource != res)
79 continue;
80 }
81 if (pn_addr(pn->sobject)
82 && pn_addr(pn->sobject) != pn_addr(obj))
83 continue;
84
85 rval = sknode;
86 sock_hold(sknode);
87 break;
88 }
89
90 spin_unlock_bh(&pnsocks.lock);
91
92 return rval;
93
94}
95
96void pn_sock_hash(struct sock *sk)
97{
98 spin_lock_bh(&pnsocks.lock);
99 sk_add_node(sk, &pnsocks.hlist);
100 spin_unlock_bh(&pnsocks.lock);
101}
102EXPORT_SYMBOL(pn_sock_hash);
103
104void pn_sock_unhash(struct sock *sk)
105{
106 spin_lock_bh(&pnsocks.lock);
107 sk_del_node_init(sk);
108 spin_unlock_bh(&pnsocks.lock);
109}
110EXPORT_SYMBOL(pn_sock_unhash);
111
112static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
113{
114 struct sock *sk = sock->sk;
115 struct pn_sock *pn = pn_sk(sk);
116 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
117 int err;
118 u16 handle;
119 u8 saddr;
120
121 if (sk->sk_prot->bind)
122 return sk->sk_prot->bind(sk, addr, len);
123
124 if (len < sizeof(struct sockaddr_pn))
125 return -EINVAL;
126 if (spn->spn_family != AF_PHONET)
127 return -EAFNOSUPPORT;
128
129 handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
130 saddr = pn_addr(handle);
131 if (saddr && phonet_address_lookup(saddr))
132 return -EADDRNOTAVAIL;
133
134 lock_sock(sk);
135 if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
136 err = -EINVAL; /* attempt to rebind */
137 goto out;
138 }
139 err = sk->sk_prot->get_port(sk, pn_port(handle));
140 if (err)
141 goto out;
142
143 /* get_port() sets the port, bind() sets the address if applicable */
144 pn->sobject = pn_object(saddr, pn_port(pn->sobject));
145 pn->resource = spn->spn_resource;
146
147 /* Enable RX on the socket */
148 sk->sk_prot->hash(sk);
149out:
150 release_sock(sk);
151 return err;
152}
153
154static int pn_socket_autobind(struct socket *sock)
155{
156 struct sockaddr_pn sa;
157 int err;
158
159 memset(&sa, 0, sizeof(sa));
160 sa.spn_family = AF_PHONET;
161 err = pn_socket_bind(sock, (struct sockaddr *)&sa,
162 sizeof(struct sockaddr_pn));
163 if (err != -EINVAL)
164 return err;
165 BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
166 return 0; /* socket was already bound */
167}
168
169static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
170 int *sockaddr_len, int peer)
171{
172 struct sock *sk = sock->sk;
173 struct pn_sock *pn = pn_sk(sk);
174
175 memset(addr, 0, sizeof(struct sockaddr_pn));
176 addr->sa_family = AF_PHONET;
177 if (!peer) /* Race with bind() here is userland's problem. */
178 pn_sockaddr_set_object((struct sockaddr_pn *)addr,
179 pn->sobject);
180
181 *sockaddr_len = sizeof(struct sockaddr_pn);
182 return 0;
183}
184
185static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
186 unsigned long arg)
187{
188 struct sock *sk = sock->sk;
189 struct pn_sock *pn = pn_sk(sk);
190
191 if (cmd == SIOCPNGETOBJECT) {
192 struct net_device *dev;
193 u16 handle;
194 u8 saddr;
195
196 if (get_user(handle, (__u16 __user *)arg))
197 return -EFAULT;
198
199 lock_sock(sk);
200 if (sk->sk_bound_dev_if)
201 dev = dev_get_by_index(sock_net(sk),
202 sk->sk_bound_dev_if);
203 else
204 dev = phonet_device_get(sock_net(sk));
205 if (dev && (dev->flags & IFF_UP))
206 saddr = phonet_address_get(dev, pn_addr(handle));
207 else
208 saddr = PN_NO_ADDR;
209 release_sock(sk);
210
211 if (dev)
212 dev_put(dev);
213 if (saddr == PN_NO_ADDR)
214 return -EHOSTUNREACH;
215
216 handle = pn_object(saddr, pn_port(pn->sobject));
217 return put_user(handle, (__u16 __user *)arg);
218 }
219
220 return sk->sk_prot->ioctl(sk, cmd, arg);
221}
222
223static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
224 struct msghdr *m, size_t total_len)
225{
226 struct sock *sk = sock->sk;
227
228 if (pn_socket_autobind(sock))
229 return -EAGAIN;
230
231 return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
232}
233
234const struct proto_ops phonet_dgram_ops = {
235 .family = AF_PHONET,
236 .owner = THIS_MODULE,
237 .release = pn_socket_release,
238 .bind = pn_socket_bind,
239 .connect = sock_no_connect,
240 .socketpair = sock_no_socketpair,
241 .accept = sock_no_accept,
242 .getname = pn_socket_getname,
243 .poll = datagram_poll,
244 .ioctl = pn_socket_ioctl,
245 .listen = sock_no_listen,
246 .shutdown = sock_no_shutdown,
247 .setsockopt = sock_no_setsockopt,
248 .getsockopt = sock_no_getsockopt,
249#ifdef CONFIG_COMPAT
250 .compat_setsockopt = sock_no_setsockopt,
251 .compat_getsockopt = sock_no_getsockopt,
252#endif
253 .sendmsg = pn_socket_sendmsg,
254 .recvmsg = sock_common_recvmsg,
255 .mmap = sock_no_mmap,
256 .sendpage = sock_no_sendpage,
257};
258
259static DEFINE_MUTEX(port_mutex);
260
261/* allocate port for a socket */
262int pn_sock_get_port(struct sock *sk, unsigned short sport)
263{
264 static int port_cur;
265 struct pn_sock *pn = pn_sk(sk);
266 struct sockaddr_pn try_sa;
267 struct sock *tmpsk;
268
269 memset(&try_sa, 0, sizeof(struct sockaddr_pn));
270 try_sa.spn_family = AF_PHONET;
271
272 mutex_lock(&port_mutex);
273
274 if (!sport) {
275 /* search free port */
276 int port, pmin, pmax;
277
278 phonet_get_local_port_range(&pmin, &pmax);
279 for (port = pmin; port <= pmax; port++) {
280 port_cur++;
281 if (port_cur < pmin || port_cur > pmax)
282 port_cur = pmin;
283
284 pn_sockaddr_set_port(&try_sa, port_cur);
285 tmpsk = pn_find_sock_by_sa(&try_sa);
286 if (tmpsk == NULL) {
287 sport = port_cur;
288 goto found;
289 } else
290 sock_put(tmpsk);
291 }
292 } else {
293 /* try to find specific port */
294 pn_sockaddr_set_port(&try_sa, sport);
295 tmpsk = pn_find_sock_by_sa(&try_sa);
296 if (tmpsk == NULL)
297 /* No sock there! We can use that port... */
298 goto found;
299 else
300 sock_put(tmpsk);
301 }
302 mutex_unlock(&port_mutex);
303
304 /* the port must be in use already */
305 return -EADDRINUSE;
306
307found:
308 mutex_unlock(&port_mutex);
309 pn->sobject = pn_object(pn_addr(pn->sobject), sport);
310 return 0;
311}
312EXPORT_SYMBOL(pn_sock_get_port);
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
new file mode 100644
index 000000000000..600a4309b8c8
--- /dev/null
+++ b/net/phonet/sysctl.c
@@ -0,0 +1,113 @@
1/*
2 * File: sysctl.c
3 *
4 * Phonet /proc/sys/net/phonet interface implementation
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <linux/seqlock.h>
26#include <linux/sysctl.h>
27#include <linux/errno.h>
28#include <linux/init.h>
29
30#define DYNAMIC_PORT_MIN 0x40
31#define DYNAMIC_PORT_MAX 0x7f
32
33static DEFINE_SEQLOCK(local_port_range_lock);
34static int local_port_range_min[2] = {0, 0};
35static int local_port_range_max[2] = {1023, 1023};
36static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX};
37static struct ctl_table_header *phonet_table_hrd;
38
39static void set_local_port_range(int range[2])
40{
41 write_seqlock(&local_port_range_lock);
42 local_port_range[0] = range[0];
43 local_port_range[1] = range[1];
44 write_sequnlock(&local_port_range_lock);
45}
46
47void phonet_get_local_port_range(int *min, int *max)
48{
49 unsigned seq;
50 do {
51 seq = read_seqbegin(&local_port_range_lock);
52 if (min)
53 *min = local_port_range[0];
54 if (max)
55 *max = local_port_range[1];
56 } while (read_seqretry(&local_port_range_lock, seq));
57}
58
59static int proc_local_port_range(ctl_table *table, int write, struct file *filp,
60 void __user *buffer,
61 size_t *lenp, loff_t *ppos)
62{
63 int ret;
64 int range[2] = {local_port_range[0], local_port_range[1]};
65 ctl_table tmp = {
66 .data = &range,
67 .maxlen = sizeof(range),
68 .mode = table->mode,
69 .extra1 = &local_port_range_min,
70 .extra2 = &local_port_range_max,
71 };
72
73 ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
74
75 if (write && ret == 0) {
76 if (range[1] < range[0])
77 ret = -EINVAL;
78 else
79 set_local_port_range(range);
80 }
81
82 return ret;
83}
84
85static struct ctl_table phonet_table[] = {
86 {
87 .ctl_name = CTL_UNNUMBERED,
88 .procname = "local_port_range",
89 .data = &local_port_range,
90 .maxlen = sizeof(local_port_range),
91 .mode = 0644,
92 .proc_handler = &proc_local_port_range,
93 .strategy = NULL,
94 },
95 { .ctl_name = 0 }
96};
97
98struct ctl_path phonet_ctl_path[] = {
99 { .procname = "net", .ctl_name = CTL_NET, },
100 { .procname = "phonet", .ctl_name = CTL_UNNUMBERED, },
101 { },
102};
103
104int __init phonet_sysctl_init(void)
105{
106 phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
107 return phonet_table_hrd == NULL ? -ENOMEM : 0;
108}
109
110void phonet_sysctl_exit(void)
111{
112 unregister_sysctl_table(phonet_table_hrd);
113}
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index f63d05045685..bbfa646157c6 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -13,5 +13,6 @@
13 13
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void); 15void rfkill_epo(void);
16void rfkill_restore_states(void);
16 17
17#endif /* __RFKILL_INPUT_H */ 18#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 74aecc098bad..ea0dc04b3c77 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -37,14 +37,20 @@ MODULE_DESCRIPTION("RF switch support");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_mutex); 40static DEFINE_MUTEX(rfkill_global_mutex);
41 41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; 42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444); 43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state, 44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off"); 45 "Default initial state for all radio types, 0 = radio off");
46 46
47static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; 47struct rfkill_gsw_state {
48 enum rfkill_state current_state;
49 enum rfkill_state default_state;
50};
51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
48 54
49static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); 55static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
50 56
@@ -70,6 +76,7 @@ static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
70 */ 76 */
71int register_rfkill_notifier(struct notifier_block *nb) 77int register_rfkill_notifier(struct notifier_block *nb)
72{ 78{
79 BUG_ON(!nb);
73 return blocking_notifier_chain_register(&rfkill_notifier_list, nb); 80 return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
74} 81}
75EXPORT_SYMBOL_GPL(register_rfkill_notifier); 82EXPORT_SYMBOL_GPL(register_rfkill_notifier);
@@ -85,6 +92,7 @@ EXPORT_SYMBOL_GPL(register_rfkill_notifier);
85 */ 92 */
86int unregister_rfkill_notifier(struct notifier_block *nb) 93int unregister_rfkill_notifier(struct notifier_block *nb)
87{ 94{
95 BUG_ON(!nb);
88 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); 96 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
89} 97}
90EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); 98EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
@@ -195,6 +203,11 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
195 * BLOCK even a transmitter that is already in state 203 * BLOCK even a transmitter that is already in state
196 * RFKILL_STATE_HARD_BLOCKED */ 204 * RFKILL_STATE_HARD_BLOCKED */
197 break; 205 break;
206 default:
207 WARN(1, KERN_WARNING
208 "rfkill: illegal state %d passed as parameter "
209 "to rfkill_toggle_radio\n", state);
210 return -EINVAL;
198 } 211 }
199 212
200 if (force || state != rfkill->state) { 213 if (force || state != rfkill->state) {
@@ -213,22 +226,29 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
213} 226}
214 227
215/** 228/**
216 * rfkill_switch_all - Toggle state of all switches of given type 229 * __rfkill_switch_all - Toggle state of all switches of given type
217 * @type: type of interfaces to be affected 230 * @type: type of interfaces to be affected
218 * @state: the new state 231 * @state: the new state
219 * 232 *
220 * This function toggles the state of all switches of given type, 233 * This function toggles the state of all switches of given type,
221 * unless a specific switch is claimed by userspace (in which case, 234 * unless a specific switch is claimed by userspace (in which case,
222 * that switch is left alone) or suspended. 235 * that switch is left alone) or suspended.
236 *
237 * Caller must have acquired rfkill_global_mutex.
223 */ 238 */
224void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 239static void __rfkill_switch_all(const enum rfkill_type type,
240 const enum rfkill_state state)
225{ 241{
226 struct rfkill *rfkill; 242 struct rfkill *rfkill;
227 243
228 mutex_lock(&rfkill_mutex); 244 if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
229 245 KERN_WARNING
230 rfkill_states[type] = state; 246 "rfkill: illegal state %d or type %d "
247 "passed as parameter to __rfkill_switch_all\n",
248 state, type))
249 return;
231 250
251 rfkill_global_states[type].current_state = state;
232 list_for_each_entry(rfkill, &rfkill_list, node) { 252 list_for_each_entry(rfkill, &rfkill_list, node) {
233 if ((!rfkill->user_claim) && (rfkill->type == type)) { 253 if ((!rfkill->user_claim) && (rfkill->type == type)) {
234 mutex_lock(&rfkill->mutex); 254 mutex_lock(&rfkill->mutex);
@@ -236,8 +256,21 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
236 mutex_unlock(&rfkill->mutex); 256 mutex_unlock(&rfkill->mutex);
237 } 257 }
238 } 258 }
259}
239 260
240 mutex_unlock(&rfkill_mutex); 261/**
262 * rfkill_switch_all - Toggle state of all switches of given type
263 * @type: type of interfaces to be affected
264 * @state: the new state
265 *
266 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
267 * Please refer to __rfkill_switch_all() for details.
268 */
269void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
270{
271 mutex_lock(&rfkill_global_mutex);
272 __rfkill_switch_all(type, state);
273 mutex_unlock(&rfkill_global_mutex);
241} 274}
242EXPORT_SYMBOL(rfkill_switch_all); 275EXPORT_SYMBOL(rfkill_switch_all);
243 276
@@ -245,23 +278,53 @@ EXPORT_SYMBOL(rfkill_switch_all);
245 * rfkill_epo - emergency power off all transmitters 278 * rfkill_epo - emergency power off all transmitters
246 * 279 *
247 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, 280 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
248 * ignoring everything in its path but rfkill_mutex and rfkill->mutex. 281 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
282 *
283 * The global state before the EPO is saved and can be restored later
284 * using rfkill_restore_states().
249 */ 285 */
250void rfkill_epo(void) 286void rfkill_epo(void)
251{ 287{
252 struct rfkill *rfkill; 288 struct rfkill *rfkill;
289 int i;
290
291 mutex_lock(&rfkill_global_mutex);
253 292
254 mutex_lock(&rfkill_mutex);
255 list_for_each_entry(rfkill, &rfkill_list, node) { 293 list_for_each_entry(rfkill, &rfkill_list, node) {
256 mutex_lock(&rfkill->mutex); 294 mutex_lock(&rfkill->mutex);
257 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 295 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
258 mutex_unlock(&rfkill->mutex); 296 mutex_unlock(&rfkill->mutex);
259 } 297 }
260 mutex_unlock(&rfkill_mutex); 298 for (i = 0; i < RFKILL_TYPE_MAX; i++) {
299 rfkill_global_states[i].default_state =
300 rfkill_global_states[i].current_state;
301 rfkill_global_states[i].current_state =
302 RFKILL_STATE_SOFT_BLOCKED;
303 }
304 mutex_unlock(&rfkill_global_mutex);
261} 305}
262EXPORT_SYMBOL_GPL(rfkill_epo); 306EXPORT_SYMBOL_GPL(rfkill_epo);
263 307
264/** 308/**
309 * rfkill_restore_states - restore global states
310 *
311 * Restore (and sync switches to) the global state from the
312 * states in rfkill_default_states. This can undo the effects of
313 * a call to rfkill_epo().
314 */
315void rfkill_restore_states(void)
316{
317 int i;
318
319 mutex_lock(&rfkill_global_mutex);
320
321 for (i = 0; i < RFKILL_TYPE_MAX; i++)
322 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
323 mutex_unlock(&rfkill_global_mutex);
324}
325EXPORT_SYMBOL_GPL(rfkill_restore_states);
326
327/**
265 * rfkill_force_state - Force the internal rfkill radio state 328 * rfkill_force_state - Force the internal rfkill radio state
266 * @rfkill: pointer to the rfkill class to modify. 329 * @rfkill: pointer to the rfkill class to modify.
267 * @state: the current radio state the class should be forced to. 330 * @state: the current radio state the class should be forced to.
@@ -282,9 +345,11 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
282{ 345{
283 enum rfkill_state oldstate; 346 enum rfkill_state oldstate;
284 347
285 if (state != RFKILL_STATE_SOFT_BLOCKED && 348 BUG_ON(!rfkill);
286 state != RFKILL_STATE_UNBLOCKED && 349 if (WARN((state >= RFKILL_STATE_MAX),
287 state != RFKILL_STATE_HARD_BLOCKED) 350 KERN_WARNING
351 "rfkill: illegal state %d passed as parameter "
352 "to rfkill_force_state\n", state))
288 return -EINVAL; 353 return -EINVAL;
289 354
290 mutex_lock(&rfkill->mutex); 355 mutex_lock(&rfkill->mutex);
@@ -352,12 +417,16 @@ static ssize_t rfkill_state_store(struct device *dev,
352 const char *buf, size_t count) 417 const char *buf, size_t count)
353{ 418{
354 struct rfkill *rfkill = to_rfkill(dev); 419 struct rfkill *rfkill = to_rfkill(dev);
355 unsigned int state = simple_strtoul(buf, NULL, 0); 420 unsigned long state;
356 int error; 421 int error;
357 422
358 if (!capable(CAP_NET_ADMIN)) 423 if (!capable(CAP_NET_ADMIN))
359 return -EPERM; 424 return -EPERM;
360 425
426 error = strict_strtoul(buf, 0, &state);
427 if (error)
428 return error;
429
361 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ 430 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
362 if (state != RFKILL_STATE_UNBLOCKED && 431 if (state != RFKILL_STATE_UNBLOCKED &&
363 state != RFKILL_STATE_SOFT_BLOCKED) 432 state != RFKILL_STATE_SOFT_BLOCKED)
@@ -385,7 +454,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
385 const char *buf, size_t count) 454 const char *buf, size_t count)
386{ 455{
387 struct rfkill *rfkill = to_rfkill(dev); 456 struct rfkill *rfkill = to_rfkill(dev);
388 bool claim = !!simple_strtoul(buf, NULL, 0); 457 unsigned long claim_tmp;
458 bool claim;
389 int error; 459 int error;
390 460
391 if (!capable(CAP_NET_ADMIN)) 461 if (!capable(CAP_NET_ADMIN))
@@ -394,11 +464,16 @@ static ssize_t rfkill_claim_store(struct device *dev,
394 if (rfkill->user_claim_unsupported) 464 if (rfkill->user_claim_unsupported)
395 return -EOPNOTSUPP; 465 return -EOPNOTSUPP;
396 466
467 error = strict_strtoul(buf, 0, &claim_tmp);
468 if (error)
469 return error;
470 claim = !!claim_tmp;
471
397 /* 472 /*
398 * Take the global lock to make sure the kernel is not in 473 * Take the global lock to make sure the kernel is not in
399 * the middle of rfkill_switch_all 474 * the middle of rfkill_switch_all
400 */ 475 */
401 error = mutex_lock_interruptible(&rfkill_mutex); 476 error = mutex_lock_interruptible(&rfkill_global_mutex);
402 if (error) 477 if (error)
403 return error; 478 return error;
404 479
@@ -406,14 +481,14 @@ static ssize_t rfkill_claim_store(struct device *dev,
406 if (!claim) { 481 if (!claim) {
407 mutex_lock(&rfkill->mutex); 482 mutex_lock(&rfkill->mutex);
408 rfkill_toggle_radio(rfkill, 483 rfkill_toggle_radio(rfkill,
409 rfkill_states[rfkill->type], 484 rfkill_global_states[rfkill->type].current_state,
410 0); 485 0);
411 mutex_unlock(&rfkill->mutex); 486 mutex_unlock(&rfkill->mutex);
412 } 487 }
413 rfkill->user_claim = claim; 488 rfkill->user_claim = claim;
414 } 489 }
415 490
416 mutex_unlock(&rfkill_mutex); 491 mutex_unlock(&rfkill_global_mutex);
417 492
418 return error ? error : count; 493 return error ? error : count;
419} 494}
@@ -437,21 +512,9 @@ static void rfkill_release(struct device *dev)
437#ifdef CONFIG_PM 512#ifdef CONFIG_PM
438static int rfkill_suspend(struct device *dev, pm_message_t state) 513static int rfkill_suspend(struct device *dev, pm_message_t state)
439{ 514{
440 struct rfkill *rfkill = to_rfkill(dev); 515 /* mark class device as suspended */
441 516 if (dev->power.power_state.event != state.event)
442 if (dev->power.power_state.event != state.event) {
443 if (state.event & PM_EVENT_SLEEP) {
444 /* Stop transmitter, keep state, no notifies */
445 update_rfkill_state(rfkill);
446
447 mutex_lock(&rfkill->mutex);
448 rfkill->toggle_radio(rfkill->data,
449 RFKILL_STATE_SOFT_BLOCKED);
450 mutex_unlock(&rfkill->mutex);
451 }
452
453 dev->power.power_state = state; 517 dev->power.power_state = state;
454 }
455 518
456 return 0; 519 return 0;
457} 520}
@@ -525,24 +588,60 @@ static struct class rfkill_class = {
525 .dev_uevent = rfkill_dev_uevent, 588 .dev_uevent = rfkill_dev_uevent,
526}; 589};
527 590
591static int rfkill_check_duplicity(const struct rfkill *rfkill)
592{
593 struct rfkill *p;
594 unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
595
596 memset(seen, 0, sizeof(seen));
597
598 list_for_each_entry(p, &rfkill_list, node) {
599 if (WARN((p == rfkill), KERN_WARNING
600 "rfkill: illegal attempt to register "
601 "an already registered rfkill struct\n"))
602 return -EEXIST;
603 set_bit(p->type, seen);
604 }
605
606 /* 0: first switch of its kind */
607 return test_bit(rfkill->type, seen);
608}
609
528static int rfkill_add_switch(struct rfkill *rfkill) 610static int rfkill_add_switch(struct rfkill *rfkill)
529{ 611{
530 mutex_lock(&rfkill_mutex); 612 int error;
613
614 mutex_lock(&rfkill_global_mutex);
615
616 error = rfkill_check_duplicity(rfkill);
617 if (error < 0)
618 goto unlock_out;
531 619
532 rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); 620 if (!error) {
621 /* lock default after first use */
622 set_bit(rfkill->type, rfkill_states_lockdflt);
623 rfkill_global_states[rfkill->type].current_state =
624 rfkill_global_states[rfkill->type].default_state;
625 }
626
627 rfkill_toggle_radio(rfkill,
628 rfkill_global_states[rfkill->type].current_state,
629 0);
533 630
534 list_add_tail(&rfkill->node, &rfkill_list); 631 list_add_tail(&rfkill->node, &rfkill_list);
535 632
536 mutex_unlock(&rfkill_mutex); 633 error = 0;
634unlock_out:
635 mutex_unlock(&rfkill_global_mutex);
537 636
538 return 0; 637 return error;
539} 638}
540 639
541static void rfkill_remove_switch(struct rfkill *rfkill) 640static void rfkill_remove_switch(struct rfkill *rfkill)
542{ 641{
543 mutex_lock(&rfkill_mutex); 642 mutex_lock(&rfkill_global_mutex);
544 list_del_init(&rfkill->node); 643 list_del_init(&rfkill->node);
545 mutex_unlock(&rfkill_mutex); 644 mutex_unlock(&rfkill_global_mutex);
546 645
547 mutex_lock(&rfkill->mutex); 646 mutex_lock(&rfkill->mutex);
548 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 647 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
@@ -562,11 +661,18 @@ static void rfkill_remove_switch(struct rfkill *rfkill)
562 * NOTE: If registration fails the structure shoudl be freed by calling 661 * NOTE: If registration fails the structure shoudl be freed by calling
563 * rfkill_free() otherwise rfkill_unregister() should be used. 662 * rfkill_free() otherwise rfkill_unregister() should be used.
564 */ 663 */
565struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) 664struct rfkill * __must_check rfkill_allocate(struct device *parent,
665 enum rfkill_type type)
566{ 666{
567 struct rfkill *rfkill; 667 struct rfkill *rfkill;
568 struct device *dev; 668 struct device *dev;
569 669
670 if (WARN((type >= RFKILL_TYPE_MAX),
671 KERN_WARNING
672 "rfkill: illegal type %d passed as parameter "
673 "to rfkill_allocate\n", type))
674 return NULL;
675
570 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); 676 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
571 if (!rfkill) 677 if (!rfkill)
572 return NULL; 678 return NULL;
@@ -633,15 +739,18 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
633 * structure needs to be registered. Immediately from registration the 739 * structure needs to be registered. Immediately from registration the
634 * switch driver should be able to service calls to toggle_radio. 740 * switch driver should be able to service calls to toggle_radio.
635 */ 741 */
636int rfkill_register(struct rfkill *rfkill) 742int __must_check rfkill_register(struct rfkill *rfkill)
637{ 743{
638 static atomic_t rfkill_no = ATOMIC_INIT(0); 744 static atomic_t rfkill_no = ATOMIC_INIT(0);
639 struct device *dev = &rfkill->dev; 745 struct device *dev = &rfkill->dev;
640 int error; 746 int error;
641 747
642 if (!rfkill->toggle_radio) 748 if (WARN((!rfkill || !rfkill->toggle_radio ||
643 return -EINVAL; 749 rfkill->type >= RFKILL_TYPE_MAX ||
644 if (rfkill->type >= RFKILL_TYPE_MAX) 750 rfkill->state >= RFKILL_STATE_MAX),
751 KERN_WARNING
752 "rfkill: attempt to register a "
753 "badly initialized rfkill struct\n"))
645 return -EINVAL; 754 return -EINVAL;
646 755
647 snprintf(dev->bus_id, sizeof(dev->bus_id), 756 snprintf(dev->bus_id, sizeof(dev->bus_id),
@@ -676,6 +785,7 @@ EXPORT_SYMBOL(rfkill_register);
676 */ 785 */
677void rfkill_unregister(struct rfkill *rfkill) 786void rfkill_unregister(struct rfkill *rfkill)
678{ 787{
788 BUG_ON(!rfkill);
679 device_del(&rfkill->dev); 789 device_del(&rfkill->dev);
680 rfkill_remove_switch(rfkill); 790 rfkill_remove_switch(rfkill);
681 rfkill_led_trigger_unregister(rfkill); 791 rfkill_led_trigger_unregister(rfkill);
@@ -683,6 +793,56 @@ void rfkill_unregister(struct rfkill *rfkill)
683} 793}
684EXPORT_SYMBOL(rfkill_unregister); 794EXPORT_SYMBOL(rfkill_unregister);
685 795
796/**
797 * rfkill_set_default - set initial value for a switch type
798 * @type - the type of switch to set the default state of
799 * @state - the new default state for that group of switches
800 *
801 * Sets the initial state rfkill should use for a given type.
802 * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
803 * and RFKILL_STATE_UNBLOCKED.
804 *
805 * This function is meant to be used by platform drivers for platforms
806 * that can save switch state across power down/reboot.
807 *
808 * The default state for each switch type can be changed exactly once.
809 * After a switch of that type is registered, the default state cannot
810 * be changed anymore. This guards against multiple drivers it the
811 * same platform trying to set the initial switch default state, which
812 * is not allowed.
813 *
814 * Returns -EPERM if the state has already been set once or is in use,
815 * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
816 * if this function returns -EPERM.
817 *
818 * Returns 0 if the new default state was set, or an error if it
819 * could not be set.
820 */
821int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
822{
823 int error;
824
825 if (WARN((type >= RFKILL_TYPE_MAX ||
826 (state != RFKILL_STATE_SOFT_BLOCKED &&
827 state != RFKILL_STATE_UNBLOCKED)),
828 KERN_WARNING
829 "rfkill: illegal state %d or type %d passed as "
830 "parameter to rfkill_set_default\n", state, type))
831 return -EINVAL;
832
833 mutex_lock(&rfkill_global_mutex);
834
835 if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
836 rfkill_global_states[type].default_state = state;
837 error = 0;
838 } else
839 error = -EPERM;
840
841 mutex_unlock(&rfkill_global_mutex);
842 return error;
843}
844EXPORT_SYMBOL_GPL(rfkill_set_default);
845
686/* 846/*
687 * Rfkill module initialization/deinitialization. 847 * Rfkill module initialization/deinitialization.
688 */ 848 */
@@ -696,8 +856,8 @@ static int __init rfkill_init(void)
696 rfkill_default_state != RFKILL_STATE_UNBLOCKED) 856 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
697 return -EINVAL; 857 return -EINVAL;
698 858
699 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) 859 for (i = 0; i < RFKILL_TYPE_MAX; i++)
700 rfkill_states[i] = rfkill_default_state; 860 rfkill_global_states[i].default_state = rfkill_default_state;
701 861
702 error = class_register(&rfkill_class); 862 error = class_register(&rfkill_class);
703 if (error) { 863 if (error) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 9437b27ff84d..6767e54155db 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -106,6 +106,15 @@ config NET_SCH_PRIO
106 To compile this code as a module, choose M here: the 106 To compile this code as a module, choose M here: the
107 module will be called sch_prio. 107 module will be called sch_prio.
108 108
109config NET_SCH_MULTIQ
110 tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)"
111 ---help---
112 Say Y here if you want to use an n-band queue packet scheduler
113 to support devices that have multiple hardware transmit queues.
114
115 To compile this code as a module, choose M here: the
116 module will be called sch_multiq.
117
109config NET_SCH_RED 118config NET_SCH_RED
110 tristate "Random Early Detection (RED)" 119 tristate "Random Early Detection (RED)"
111 ---help--- 120 ---help---
@@ -476,6 +485,17 @@ config NET_ACT_SIMP
476 To compile this code as a module, choose M here: the 485 To compile this code as a module, choose M here: the
477 module will be called simple. 486 module will be called simple.
478 487
488config NET_ACT_SKBEDIT
489 tristate "SKB Editing"
490 depends on NET_CLS_ACT
491 ---help---
492 Say Y here to change skb priority or queue_mapping settings.
493
494 If unsure, say N.
495
496 To compile this code as a module, choose M here: the
497 module will be called skbedit.
498
479config NET_CLS_IND 499config NET_CLS_IND
480 bool "Incoming device classification" 500 bool "Incoming device classification"
481 depends on NET_CLS_U32 || NET_CLS_FW 501 depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 1d2b0f7df848..e60c9925b269 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
14obj-$(CONFIG_NET_ACT_NAT) += act_nat.o 14obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
17obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o 18obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
18obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 19obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
19obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 20obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
26obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o 27obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
27obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o 28obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
28obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o 29obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
30obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
29obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o 31obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
30obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o 32obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
31obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 33obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
new file mode 100644
index 000000000000..fe9777e77f35
--- /dev/null
+++ b/net/sched/act_skbedit.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/skbuff.h>
24#include <linux/rtnetlink.h>
25#include <net/netlink.h>
26#include <net/pkt_sched.h>
27
28#include <linux/tc_act/tc_skbedit.h>
29#include <net/tc_act/tc_skbedit.h>
30
31#define SKBEDIT_TAB_MASK 15
32static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
33static u32 skbedit_idx_gen;
34static DEFINE_RWLOCK(skbedit_lock);
35
36static struct tcf_hashinfo skbedit_hash_info = {
37 .htab = tcf_skbedit_ht,
38 .hmask = SKBEDIT_TAB_MASK,
39 .lock = &skbedit_lock,
40};
41
42static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
43 struct tcf_result *res)
44{
45 struct tcf_skbedit *d = a->priv;
46
47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb);
50 d->tcf_bstats.packets++;
51
52 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority;
54 if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
55 skb->dev->real_num_tx_queues > d->queue_mapping)
56 skb_set_queue_mapping(skb, d->queue_mapping);
57
58 spin_unlock(&d->tcf_lock);
59 return d->tcf_action;
60}
61
62static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
63 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
64 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
65 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
66};
67
68static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
69 struct tc_action *a, int ovr, int bind)
70{
71 struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
72 struct tc_skbedit *parm;
73 struct tcf_skbedit *d;
74 struct tcf_common *pc;
75 u32 flags = 0, *priority = NULL;
76 u16 *queue_mapping = NULL;
77 int ret = 0, err;
78
79 if (nla == NULL)
80 return -EINVAL;
81
82 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy);
83 if (err < 0)
84 return err;
85
86 if (tb[TCA_SKBEDIT_PARMS] == NULL)
87 return -EINVAL;
88
89 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
90 flags |= SKBEDIT_F_PRIORITY;
91 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
92 }
93
94 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
95 flags |= SKBEDIT_F_QUEUE_MAPPING;
96 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
97 }
98 if (!flags)
99 return -EINVAL;
100
101 parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
102
103 pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info);
104 if (!pc) {
105 pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
106 &skbedit_idx_gen, &skbedit_hash_info);
107 if (unlikely(!pc))
108 return -ENOMEM;
109
110 d = to_skbedit(pc);
111 ret = ACT_P_CREATED;
112 } else {
113 d = to_skbedit(pc);
114 if (!ovr) {
115 tcf_hash_release(pc, bind, &skbedit_hash_info);
116 return -EEXIST;
117 }
118 }
119
120 spin_lock_bh(&d->tcf_lock);
121
122 d->flags = flags;
123 if (flags & SKBEDIT_F_PRIORITY)
124 d->priority = *priority;
125 if (flags & SKBEDIT_F_QUEUE_MAPPING)
126 d->queue_mapping = *queue_mapping;
127 d->tcf_action = parm->action;
128
129 spin_unlock_bh(&d->tcf_lock);
130
131 if (ret == ACT_P_CREATED)
132 tcf_hash_insert(pc, &skbedit_hash_info);
133 return ret;
134}
135
136static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
137{
138 struct tcf_skbedit *d = a->priv;
139
140 if (d)
141 return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
142 return 0;
143}
144
145static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
146 int bind, int ref)
147{
148 unsigned char *b = skb_tail_pointer(skb);
149 struct tcf_skbedit *d = a->priv;
150 struct tc_skbedit opt;
151 struct tcf_t t;
152
153 opt.index = d->tcf_index;
154 opt.refcnt = d->tcf_refcnt - ref;
155 opt.bindcnt = d->tcf_bindcnt - bind;
156 opt.action = d->tcf_action;
157 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
158 if (d->flags & SKBEDIT_F_PRIORITY)
159 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
160 &d->priority);
161 if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
162 NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
163 sizeof(d->queue_mapping), &d->queue_mapping);
164 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
165 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
166 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
167 NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
168 return skb->len;
169
170nla_put_failure:
171 nlmsg_trim(skb, b);
172 return -1;
173}
174
175static struct tc_action_ops act_skbedit_ops = {
176 .kind = "skbedit",
177 .hinfo = &skbedit_hash_info,
178 .type = TCA_ACT_SKBEDIT,
179 .capab = TCA_CAP_NONE,
180 .owner = THIS_MODULE,
181 .act = tcf_skbedit,
182 .dump = tcf_skbedit_dump,
183 .cleanup = tcf_skbedit_cleanup,
184 .init = tcf_skbedit_init,
185 .walk = tcf_generic_walker,
186};
187
188MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
189MODULE_DESCRIPTION("SKB Editing");
190MODULE_LICENSE("GPL");
191
192static int __init skbedit_init_module(void)
193{
194 return tcf_register_action(&act_skbedit_ops);
195}
196
197static void __exit skbedit_cleanup_module(void)
198{
199 tcf_unregister_action(&act_skbedit_ops);
200}
201
202module_init(skbedit_init_module);
203module_exit(skbedit_cleanup_module);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5cafdd4c8018..8eb79e92e94c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -205,7 +205,7 @@ replay:
205 } 205 }
206 } 206 }
207 207
208 root_lock = qdisc_root_lock(q); 208 root_lock = qdisc_root_sleeping_lock(q);
209 209
210 if (tp == NULL) { 210 if (tp == NULL) {
211 /* Proto-tcf does not exist, create new one */ 211 /* Proto-tcf does not exist, create new one */
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 8f63a1a94014..0ebaff637e31 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -67,9 +67,9 @@ static inline u32 addr_fold(void *addr)
67static u32 flow_get_src(const struct sk_buff *skb) 67static u32 flow_get_src(const struct sk_buff *skb)
68{ 68{
69 switch (skb->protocol) { 69 switch (skb->protocol) {
70 case __constant_htons(ETH_P_IP): 70 case htons(ETH_P_IP):
71 return ntohl(ip_hdr(skb)->saddr); 71 return ntohl(ip_hdr(skb)->saddr);
72 case __constant_htons(ETH_P_IPV6): 72 case htons(ETH_P_IPV6):
73 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 73 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
74 default: 74 default:
75 return addr_fold(skb->sk); 75 return addr_fold(skb->sk);
@@ -79,9 +79,9 @@ static u32 flow_get_src(const struct sk_buff *skb)
79static u32 flow_get_dst(const struct sk_buff *skb) 79static u32 flow_get_dst(const struct sk_buff *skb)
80{ 80{
81 switch (skb->protocol) { 81 switch (skb->protocol) {
82 case __constant_htons(ETH_P_IP): 82 case htons(ETH_P_IP):
83 return ntohl(ip_hdr(skb)->daddr); 83 return ntohl(ip_hdr(skb)->daddr);
84 case __constant_htons(ETH_P_IPV6): 84 case htons(ETH_P_IPV6):
85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
86 default: 86 default:
87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol; 87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol;
@@ -91,9 +91,9 @@ static u32 flow_get_dst(const struct sk_buff *skb)
91static u32 flow_get_proto(const struct sk_buff *skb) 91static u32 flow_get_proto(const struct sk_buff *skb)
92{ 92{
93 switch (skb->protocol) { 93 switch (skb->protocol) {
94 case __constant_htons(ETH_P_IP): 94 case htons(ETH_P_IP):
95 return ip_hdr(skb)->protocol; 95 return ip_hdr(skb)->protocol;
96 case __constant_htons(ETH_P_IPV6): 96 case htons(ETH_P_IPV6):
97 return ipv6_hdr(skb)->nexthdr; 97 return ipv6_hdr(skb)->nexthdr;
98 default: 98 default:
99 return 0; 99 return 0;
@@ -120,7 +120,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb)
120 u32 res = 0; 120 u32 res = 0;
121 121
122 switch (skb->protocol) { 122 switch (skb->protocol) {
123 case __constant_htons(ETH_P_IP): { 123 case htons(ETH_P_IP): {
124 struct iphdr *iph = ip_hdr(skb); 124 struct iphdr *iph = ip_hdr(skb);
125 125
126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -128,7 +128,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb)
128 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 128 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
129 break; 129 break;
130 } 130 }
131 case __constant_htons(ETH_P_IPV6): { 131 case htons(ETH_P_IPV6): {
132 struct ipv6hdr *iph = ipv6_hdr(skb); 132 struct ipv6hdr *iph = ipv6_hdr(skb);
133 133
134 if (has_ports(iph->nexthdr)) 134 if (has_ports(iph->nexthdr))
@@ -147,7 +147,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
147 u32 res = 0; 147 u32 res = 0;
148 148
149 switch (skb->protocol) { 149 switch (skb->protocol) {
150 case __constant_htons(ETH_P_IP): { 150 case htons(ETH_P_IP): {
151 struct iphdr *iph = ip_hdr(skb); 151 struct iphdr *iph = ip_hdr(skb);
152 152
153 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 153 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -155,7 +155,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
155 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 155 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
156 break; 156 break;
157 } 157 }
158 case __constant_htons(ETH_P_IPV6): { 158 case htons(ETH_P_IPV6): {
159 struct ipv6hdr *iph = ipv6_hdr(skb); 159 struct ipv6hdr *iph = ipv6_hdr(skb);
160 160
161 if (has_ports(iph->nexthdr)) 161 if (has_ports(iph->nexthdr))
@@ -213,9 +213,9 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
213static u32 flow_get_nfct_src(const struct sk_buff *skb) 213static u32 flow_get_nfct_src(const struct sk_buff *skb)
214{ 214{
215 switch (skb->protocol) { 215 switch (skb->protocol) {
216 case __constant_htons(ETH_P_IP): 216 case htons(ETH_P_IP):
217 return ntohl(CTTUPLE(skb, src.u3.ip)); 217 return ntohl(CTTUPLE(skb, src.u3.ip));
218 case __constant_htons(ETH_P_IPV6): 218 case htons(ETH_P_IPV6):
219 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 219 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
220 } 220 }
221fallback: 221fallback:
@@ -225,9 +225,9 @@ fallback:
225static u32 flow_get_nfct_dst(const struct sk_buff *skb) 225static u32 flow_get_nfct_dst(const struct sk_buff *skb)
226{ 226{
227 switch (skb->protocol) { 227 switch (skb->protocol) {
228 case __constant_htons(ETH_P_IP): 228 case htons(ETH_P_IP):
229 return ntohl(CTTUPLE(skb, dst.u3.ip)); 229 return ntohl(CTTUPLE(skb, dst.u3.ip));
230 case __constant_htons(ETH_P_IPV6): 230 case htons(ETH_P_IPV6):
231 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 231 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
232 } 232 }
233fallback: 233fallback:
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 481260a4f10f..e3d8455eebc2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -75,7 +75,7 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
75static inline 75static inline
76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) 76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
77{ 77{
78 spinlock_t *root_lock = qdisc_root_lock(q); 78 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
79 79
80 spin_lock_bh(root_lock); 80 spin_lock_bh(root_lock);
81 memset(head->fastmap, 0, sizeof(head->fastmap)); 81 memset(head->fastmap, 0, sizeof(head->fastmap));
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index cc49c932641d..bc450397487a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/tc_ematch/tc_em_cmp.h> 16#include <linux/tc_ematch/tc_em_cmp.h>
17#include <asm/unaligned.h>
17#include <net/pkt_cls.h> 18#include <net/pkt_cls.h>
18 19
19static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp) 20static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
@@ -37,8 +38,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
37 break; 38 break;
38 39
39 case TCF_EM_ALIGN_U16: 40 case TCF_EM_ALIGN_U16:
40 val = *ptr << 8; 41 val = get_unaligned_be16(ptr);
41 val |= *(ptr+1);
42 42
43 if (cmp_needs_transformation(cmp)) 43 if (cmp_needs_transformation(cmp))
44 val = be16_to_cpu(val); 44 val = be16_to_cpu(val);
@@ -47,10 +47,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
47 case TCF_EM_ALIGN_U32: 47 case TCF_EM_ALIGN_U32:
48 /* Worth checking boundries? The branching seems 48 /* Worth checking boundries? The branching seems
49 * to get worse. Visit again. */ 49 * to get worse. Visit again. */
50 val = *ptr << 24; 50 val = get_unaligned_be32(ptr);
51 val |= *(ptr+1) << 16;
52 val |= *(ptr+2) << 8;
53 val |= *(ptr+3);
54 51
55 if (cmp_needs_transformation(cmp)) 52 if (cmp_needs_transformation(cmp))
56 val = be32_to_cpu(val); 53 val = be32_to_cpu(val);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 506b709510b6..1122c952aa99 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1169,8 +1169,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1169 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0) 1169 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1170 goto nla_put_failure; 1170 goto nla_put_failure;
1171 1171
1172 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1172 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1173 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1173 qdisc_root_sleeping_lock(q), &d) < 0)
1174 goto nla_put_failure; 1174 goto nla_put_failure;
1175 1175
1176 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 1176 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1461,8 +1461,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1462 goto nla_put_failure; 1462 goto nla_put_failure;
1463 1463
1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1465 TCA_XSTATS, qdisc_root_lock(q), &d) < 0) 1465 qdisc_root_sleeping_lock(q), &d) < 0)
1466 goto nla_put_failure; 1466 goto nla_put_failure;
1467 1467
1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9b720adedead..8b06fa900482 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1754,7 +1754,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1754 1754
1755 if (--cl->refcnt == 0) { 1755 if (--cl->refcnt == 0) {
1756#ifdef CONFIG_NET_CLS_ACT 1756#ifdef CONFIG_NET_CLS_ACT
1757 spinlock_t *root_lock = qdisc_root_lock(sch); 1757 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1758 struct cbq_sched_data *q = qdisc_priv(sch); 1758 struct cbq_sched_data *q = qdisc_priv(sch);
1759 1759
1760 spin_lock_bh(root_lock); 1760 spin_lock_bh(root_lock);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index edd1298f85f6..ba43aab3a851 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -202,7 +202,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
202 202
203 if (p->set_tc_index) { 203 if (p->set_tc_index) {
204 switch (skb->protocol) { 204 switch (skb->protocol) {
205 case __constant_htons(ETH_P_IP): 205 case htons(ETH_P_IP):
206 if (skb_cow_head(skb, sizeof(struct iphdr))) 206 if (skb_cow_head(skb, sizeof(struct iphdr)))
207 goto drop; 207 goto drop;
208 208
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
210 & ~INET_ECN_MASK; 210 & ~INET_ECN_MASK;
211 break; 211 break;
212 212
213 case __constant_htons(ETH_P_IPV6): 213 case htons(ETH_P_IPV6):
214 if (skb_cow_head(skb, sizeof(struct ipv6hdr))) 214 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
215 goto drop; 215 goto drop;
216 216
@@ -289,11 +289,11 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
289 pr_debug("index %d->%d\n", skb->tc_index, index); 289 pr_debug("index %d->%d\n", skb->tc_index, index);
290 290
291 switch (skb->protocol) { 291 switch (skb->protocol) {
292 case __constant_htons(ETH_P_IP): 292 case htons(ETH_P_IP):
293 ipv4_change_dsfield(ip_hdr(skb), p->mask[index], 293 ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
294 p->value[index]); 294 p->value[index]);
295 break; 295 break;
296 case __constant_htons(ETH_P_IPV6): 296 case htons(ETH_P_IPV6):
297 ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index], 297 ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
298 p->value[index]); 298 p->value[index]);
299 break; 299 break;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9634091ee2f0..5e7e0bd38fe8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,10 +44,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
44 44
45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
46{ 46{
47 if (unlikely(skb->next)) 47 __skb_queue_head(&q->requeue, skb);
48 q->gso_skb = skb;
49 else
50 q->ops->requeue(skb, q);
51 48
52 __netif_schedule(q); 49 __netif_schedule(q);
53 return 0; 50 return 0;
@@ -55,12 +52,21 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
55 52
56static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
57{ 54{
58 struct sk_buff *skb; 55 struct sk_buff *skb = skb_peek(&q->requeue);
56
57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
59 60
60 if ((skb = q->gso_skb)) 61 /* check the reason of requeuing without tx lock first */
61 q->gso_skb = NULL; 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
62 else 63 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
64 __skb_unlink(skb, &q->requeue);
65 else
66 skb = NULL;
67 } else {
63 skb = q->dequeue(q); 68 skb = q->dequeue(q);
69 }
64 70
65 return skb; 71 return skb;
66} 72}
@@ -215,10 +221,9 @@ static void dev_watchdog(unsigned long arg)
215 time_after(jiffies, (dev->trans_start + 221 time_after(jiffies, (dev->trans_start +
216 dev->watchdog_timeo))) { 222 dev->watchdog_timeo))) {
217 char drivername[64]; 223 char drivername[64];
218 printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 224 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
219 dev->name, netdev_drivername(dev, drivername, 64)); 225 dev->name, netdev_drivername(dev, drivername, 64));
220 dev->tx_timeout(dev); 226 dev->tx_timeout(dev);
221 WARN_ON_ONCE(1);
222 } 227 }
223 if (!mod_timer(&dev->watchdog_timer, 228 if (!mod_timer(&dev->watchdog_timer,
224 round_jiffies(jiffies + 229 round_jiffies(jiffies +
@@ -328,6 +333,7 @@ struct Qdisc noop_qdisc = {
328 .flags = TCQ_F_BUILTIN, 333 .flags = TCQ_F_BUILTIN,
329 .ops = &noop_qdisc_ops, 334 .ops = &noop_qdisc_ops,
330 .list = LIST_HEAD_INIT(noop_qdisc.list), 335 .list = LIST_HEAD_INIT(noop_qdisc.list),
336 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
331 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 337 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
332 .dev_queue = &noop_netdev_queue, 338 .dev_queue = &noop_netdev_queue,
333}; 339};
@@ -353,6 +359,7 @@ static struct Qdisc noqueue_qdisc = {
353 .flags = TCQ_F_BUILTIN, 359 .flags = TCQ_F_BUILTIN,
354 .ops = &noqueue_qdisc_ops, 360 .ops = &noqueue_qdisc_ops,
355 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 361 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
362 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
356 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 363 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
357 .dev_queue = &noqueue_netdev_queue, 364 .dev_queue = &noqueue_netdev_queue,
358}; 365};
@@ -473,6 +480,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
473 sch->padded = (char *) sch - (char *) p; 480 sch->padded = (char *) sch - (char *) p;
474 481
475 INIT_LIST_HEAD(&sch->list); 482 INIT_LIST_HEAD(&sch->list);
483 skb_queue_head_init(&sch->requeue);
476 skb_queue_head_init(&sch->q); 484 skb_queue_head_init(&sch->q);
477 sch->ops = ops; 485 sch->ops = ops;
478 sch->enqueue = ops->enqueue; 486 sch->enqueue = ops->enqueue;
@@ -540,7 +548,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
540 module_put(ops->owner); 548 module_put(ops->owner);
541 dev_put(qdisc_dev(qdisc)); 549 dev_put(qdisc_dev(qdisc));
542 550
543 kfree_skb(qdisc->gso_skb); 551 __skb_queue_purge(&qdisc->requeue);
544 552
545 kfree((char *) qdisc - qdisc->padded); 553 kfree((char *) qdisc - qdisc->padded);
546} 554}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 97d4761cc31e..d14f02056ae6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1043,7 +1043,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1043 1043
1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1044static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1045{ 1045{
1046 spinlock_t *root_lock = qdisc_root_lock(sch); 1046 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1047 struct htb_sched *q = qdisc_priv(sch); 1047 struct htb_sched *q = qdisc_priv(sch);
1048 struct nlattr *nest; 1048 struct nlattr *nest;
1049 struct tc_htb_glob gopt; 1049 struct tc_htb_glob gopt;
@@ -1075,7 +1075,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1075 struct sk_buff *skb, struct tcmsg *tcm) 1075 struct sk_buff *skb, struct tcmsg *tcm)
1076{ 1076{
1077 struct htb_class *cl = (struct htb_class *)arg; 1077 struct htb_class *cl = (struct htb_class *)arg;
1078 spinlock_t *root_lock = qdisc_root_lock(sch); 1078 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1079 struct nlattr *nest; 1079 struct nlattr *nest;
1080 struct tc_htb_opt opt; 1080 struct tc_htb_opt opt;
1081 1081
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
new file mode 100644
index 000000000000..915f3149dde2
--- /dev/null
+++ b/net/sched/sch_multiq.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/skbuff.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
28
29
30struct multiq_sched_data {
31 u16 bands;
32 u16 max_bands;
33 u16 curband;
34 struct tcf_proto *filter_list;
35 struct Qdisc **queues;
36};
37
38
39static struct Qdisc *
40multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
41{
42 struct multiq_sched_data *q = qdisc_priv(sch);
43 u32 band;
44 struct tcf_result res;
45 int err;
46
47 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
48 err = tc_classify(skb, q->filter_list, &res);
49#ifdef CONFIG_NET_CLS_ACT
50 switch (err) {
51 case TC_ACT_STOLEN:
52 case TC_ACT_QUEUED:
53 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
54 case TC_ACT_SHOT:
55 return NULL;
56 }
57#endif
58 band = skb_get_queue_mapping(skb);
59
60 if (band >= q->bands)
61 return q->queues[0];
62
63 return q->queues[band];
64}
65
66static int
67multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
68{
69 struct Qdisc *qdisc;
70 int ret;
71
72 qdisc = multiq_classify(skb, sch, &ret);
73#ifdef CONFIG_NET_CLS_ACT
74 if (qdisc == NULL) {
75
76 if (ret & __NET_XMIT_BYPASS)
77 sch->qstats.drops++;
78 kfree_skb(skb);
79 return ret;
80 }
81#endif
82
83 ret = qdisc_enqueue(skb, qdisc);
84 if (ret == NET_XMIT_SUCCESS) {
85 sch->bstats.bytes += qdisc_pkt_len(skb);
86 sch->bstats.packets++;
87 sch->q.qlen++;
88 return NET_XMIT_SUCCESS;
89 }
90 if (net_xmit_drop_count(ret))
91 sch->qstats.drops++;
92 return ret;
93}
94
95
96static int
97multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
98{
99 struct Qdisc *qdisc;
100 struct multiq_sched_data *q = qdisc_priv(sch);
101 int ret;
102
103 qdisc = multiq_classify(skb, sch, &ret);
104#ifdef CONFIG_NET_CLS_ACT
105 if (qdisc == NULL) {
106 if (ret & __NET_XMIT_BYPASS)
107 sch->qstats.drops++;
108 kfree_skb(skb);
109 return ret;
110 }
111#endif
112
113 ret = qdisc->ops->requeue(skb, qdisc);
114 if (ret == NET_XMIT_SUCCESS) {
115 sch->q.qlen++;
116 sch->qstats.requeues++;
117 if (q->curband)
118 q->curband--;
119 else
120 q->curband = q->bands - 1;
121 return NET_XMIT_SUCCESS;
122 }
123 if (net_xmit_drop_count(ret))
124 sch->qstats.drops++;
125 return ret;
126}
127
128
129static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
130{
131 struct multiq_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *qdisc;
133 struct sk_buff *skb;
134 int band;
135
136 for (band = 0; band < q->bands; band++) {
137 /* cycle through bands to ensure fairness */
138 q->curband++;
139 if (q->curband >= q->bands)
140 q->curband = 0;
141
142 /* Check that target subqueue is available before
143 * pulling an skb to avoid excessive requeues
144 */
145 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
146 qdisc = q->queues[q->curband];
147 skb = qdisc->dequeue(qdisc);
148 if (skb) {
149 sch->q.qlen--;
150 return skb;
151 }
152 }
153 }
154 return NULL;
155
156}
157
158static unsigned int multiq_drop(struct Qdisc *sch)
159{
160 struct multiq_sched_data *q = qdisc_priv(sch);
161 int band;
162 unsigned int len;
163 struct Qdisc *qdisc;
164
165 for (band = q->bands-1; band >= 0; band--) {
166 qdisc = q->queues[band];
167 if (qdisc->ops->drop) {
168 len = qdisc->ops->drop(qdisc);
169 if (len != 0) {
170 sch->q.qlen--;
171 return len;
172 }
173 }
174 }
175 return 0;
176}
177
178
179static void
180multiq_reset(struct Qdisc *sch)
181{
182 u16 band;
183 struct multiq_sched_data *q = qdisc_priv(sch);
184
185 for (band = 0; band < q->bands; band++)
186 qdisc_reset(q->queues[band]);
187 sch->q.qlen = 0;
188 q->curband = 0;
189}
190
191static void
192multiq_destroy(struct Qdisc *sch)
193{
194 int band;
195 struct multiq_sched_data *q = qdisc_priv(sch);
196
197 tcf_destroy_chain(&q->filter_list);
198 for (band = 0; band < q->bands; band++)
199 qdisc_destroy(q->queues[band]);
200
201 kfree(q->queues);
202}
203
204static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
205{
206 struct multiq_sched_data *q = qdisc_priv(sch);
207 struct tc_multiq_qopt *qopt;
208 int i;
209
210 if (!netif_is_multiqueue(qdisc_dev(sch)))
211 return -EINVAL;
212 if (nla_len(opt) < sizeof(*qopt))
213 return -EINVAL;
214
215 qopt = nla_data(opt);
216
217 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
218
219 sch_tree_lock(sch);
220 q->bands = qopt->bands;
221 for (i = q->bands; i < q->max_bands; i++) {
222 if (q->queues[i] != &noop_qdisc) {
223 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
224 qdisc_tree_decrease_qlen(child, child->q.qlen);
225 qdisc_destroy(child);
226 }
227 }
228
229 sch_tree_unlock(sch);
230
231 for (i = 0; i < q->bands; i++) {
232 if (q->queues[i] == &noop_qdisc) {
233 struct Qdisc *child;
234 child = qdisc_create_dflt(qdisc_dev(sch),
235 sch->dev_queue,
236 &pfifo_qdisc_ops,
237 TC_H_MAKE(sch->handle,
238 i + 1));
239 if (child) {
240 sch_tree_lock(sch);
241 child = xchg(&q->queues[i], child);
242
243 if (child != &noop_qdisc) {
244 qdisc_tree_decrease_qlen(child,
245 child->q.qlen);
246 qdisc_destroy(child);
247 }
248 sch_tree_unlock(sch);
249 }
250 }
251 }
252 return 0;
253}
254
255static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
256{
257 struct multiq_sched_data *q = qdisc_priv(sch);
258 int i, err;
259
260 q->queues = NULL;
261
262 if (opt == NULL)
263 return -EINVAL;
264
265 q->max_bands = qdisc_dev(sch)->num_tx_queues;
266
267 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
268 if (!q->queues)
269 return -ENOBUFS;
270 for (i = 0; i < q->max_bands; i++)
271 q->queues[i] = &noop_qdisc;
272
273 err = multiq_tune(sch,opt);
274
275 if (err)
276 kfree(q->queues);
277
278 return err;
279}
280
281static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
282{
283 struct multiq_sched_data *q = qdisc_priv(sch);
284 unsigned char *b = skb_tail_pointer(skb);
285 struct tc_multiq_qopt opt;
286
287 opt.bands = q->bands;
288 opt.max_bands = q->max_bands;
289
290 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
291
292 return skb->len;
293
294nla_put_failure:
295 nlmsg_trim(skb, b);
296 return -1;
297}
298
299static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
300 struct Qdisc **old)
301{
302 struct multiq_sched_data *q = qdisc_priv(sch);
303 unsigned long band = arg - 1;
304
305 if (band >= q->bands)
306 return -EINVAL;
307
308 if (new == NULL)
309 new = &noop_qdisc;
310
311 sch_tree_lock(sch);
312 *old = q->queues[band];
313 q->queues[band] = new;
314 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
315 qdisc_reset(*old);
316 sch_tree_unlock(sch);
317
318 return 0;
319}
320
321static struct Qdisc *
322multiq_leaf(struct Qdisc *sch, unsigned long arg)
323{
324 struct multiq_sched_data *q = qdisc_priv(sch);
325 unsigned long band = arg - 1;
326
327 if (band >= q->bands)
328 return NULL;
329
330 return q->queues[band];
331}
332
333static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
334{
335 struct multiq_sched_data *q = qdisc_priv(sch);
336 unsigned long band = TC_H_MIN(classid);
337
338 if (band - 1 >= q->bands)
339 return 0;
340 return band;
341}
342
343static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
344 u32 classid)
345{
346 return multiq_get(sch, classid);
347}
348
349
350static void multiq_put(struct Qdisc *q, unsigned long cl)
351{
352 return;
353}
354
355static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
356 struct nlattr **tca, unsigned long *arg)
357{
358 unsigned long cl = *arg;
359 struct multiq_sched_data *q = qdisc_priv(sch);
360
361 if (cl - 1 > q->bands)
362 return -ENOENT;
363 return 0;
364}
365
366static int multiq_delete(struct Qdisc *sch, unsigned long cl)
367{
368 struct multiq_sched_data *q = qdisc_priv(sch);
369 if (cl - 1 > q->bands)
370 return -ENOENT;
371 return 0;
372}
373
374
375static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
376 struct sk_buff *skb, struct tcmsg *tcm)
377{
378 struct multiq_sched_data *q = qdisc_priv(sch);
379
380 if (cl - 1 > q->bands)
381 return -ENOENT;
382 tcm->tcm_handle |= TC_H_MIN(cl);
383 if (q->queues[cl-1])
384 tcm->tcm_info = q->queues[cl-1]->handle;
385 return 0;
386}
387
388static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
389 struct gnet_dump *d)
390{
391 struct multiq_sched_data *q = qdisc_priv(sch);
392 struct Qdisc *cl_q;
393
394 cl_q = q->queues[cl - 1];
395 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
396 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
397 return -1;
398
399 return 0;
400}
401
402static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
403{
404 struct multiq_sched_data *q = qdisc_priv(sch);
405 int band;
406
407 if (arg->stop)
408 return;
409
410 for (band = 0; band < q->bands; band++) {
411 if (arg->count < arg->skip) {
412 arg->count++;
413 continue;
414 }
415 if (arg->fn(sch, band+1, arg) < 0) {
416 arg->stop = 1;
417 break;
418 }
419 arg->count++;
420 }
421}
422
423static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
424{
425 struct multiq_sched_data *q = qdisc_priv(sch);
426
427 if (cl)
428 return NULL;
429 return &q->filter_list;
430}
431
432static const struct Qdisc_class_ops multiq_class_ops = {
433 .graft = multiq_graft,
434 .leaf = multiq_leaf,
435 .get = multiq_get,
436 .put = multiq_put,
437 .change = multiq_change,
438 .delete = multiq_delete,
439 .walk = multiq_walk,
440 .tcf_chain = multiq_find_tcf,
441 .bind_tcf = multiq_bind,
442 .unbind_tcf = multiq_put,
443 .dump = multiq_dump_class,
444 .dump_stats = multiq_dump_class_stats,
445};
446
447static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
448 .next = NULL,
449 .cl_ops = &multiq_class_ops,
450 .id = "multiq",
451 .priv_size = sizeof(struct multiq_sched_data),
452 .enqueue = multiq_enqueue,
453 .dequeue = multiq_dequeue,
454 .requeue = multiq_requeue,
455 .drop = multiq_drop,
456 .init = multiq_init,
457 .reset = multiq_reset,
458 .destroy = multiq_destroy,
459 .change = multiq_tune,
460 .dump = multiq_dump,
461 .owner = THIS_MODULE,
462};
463
464static int __init multiq_module_init(void)
465{
466 return register_qdisc(&multiq_qdisc_ops);
467}
468
469static void __exit multiq_module_exit(void)
470{
471 unregister_qdisc(&multiq_qdisc_ops);
472}
473
474module_init(multiq_module_init)
475module_exit(multiq_module_exit)
476
477MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index fb0294d0b55e..a11959908d9a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -341,7 +341,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
341 for (i = 0; i < n; i++) 341 for (i = 0; i < n; i++)
342 d->table[i] = data[i]; 342 d->table[i] = data[i];
343 343
344 root_lock = qdisc_root_lock(sch); 344 root_lock = qdisc_root_sleeping_lock(sch);
345 345
346 spin_lock_bh(root_lock); 346 spin_lock_bh(root_lock);
347 d = xchg(&q->delay_dist, d); 347 d = xchg(&q->delay_dist, d);
@@ -388,6 +388,20 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
389}; 389};
390 390
391static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
392 const struct nla_policy *policy, int len)
393{
394 int nested_len = nla_len(nla) - NLA_ALIGN(len);
395
396 if (nested_len < 0)
397 return -EINVAL;
398 if (nested_len >= nla_attr_size(0))
399 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
400 nested_len, policy);
401 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
402 return 0;
403}
404
391/* Parse netlink message to set options */ 405/* Parse netlink message to set options */
392static int netem_change(struct Qdisc *sch, struct nlattr *opt) 406static int netem_change(struct Qdisc *sch, struct nlattr *opt)
393{ 407{
@@ -399,8 +413,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
399 if (opt == NULL) 413 if (opt == NULL)
400 return -EINVAL; 414 return -EINVAL;
401 415
402 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy, 416 qopt = nla_data(opt);
403 qopt, sizeof(*qopt)); 417 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
404 if (ret < 0) 418 if (ret < 0)
405 return ret; 419 return ret;
406 420
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a6697c686c7f..504a78cdb718 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -254,16 +254,12 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
254{ 254{
255 struct prio_sched_data *q = qdisc_priv(sch); 255 struct prio_sched_data *q = qdisc_priv(sch);
256 unsigned char *b = skb_tail_pointer(skb); 256 unsigned char *b = skb_tail_pointer(skb);
257 struct nlattr *nest;
258 struct tc_prio_qopt opt; 257 struct tc_prio_qopt opt;
259 258
260 opt.bands = q->bands; 259 opt.bands = q->bands;
261 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 260 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
262 261
263 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 262 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
264 if (nest == NULL)
265 goto nla_put_failure;
266 nla_nest_compat_end(skb, nest);
267 263
268 return skb->len; 264 return skb->len;
269 265
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6e041d10dbdb..fe1508ef0d3d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -119,7 +119,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
119 u32 h, h2; 119 u32 h, h2;
120 120
121 switch (skb->protocol) { 121 switch (skb->protocol) {
122 case __constant_htons(ETH_P_IP): 122 case htons(ETH_P_IP):
123 { 123 {
124 const struct iphdr *iph = ip_hdr(skb); 124 const struct iphdr *iph = ip_hdr(skb);
125 h = iph->daddr; 125 h = iph->daddr;
@@ -134,7 +134,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
134 h2 ^= *(((u32*)iph) + iph->ihl); 134 h2 ^= *(((u32*)iph) + iph->ihl);
135 break; 135 break;
136 } 136 }
137 case __constant_htons(ETH_P_IPV6): 137 case htons(ETH_P_IPV6):
138 { 138 {
139 struct ipv6hdr *iph = ipv6_hdr(skb); 139 struct ipv6hdr *iph = ipv6_hdr(skb);
140 h = iph->daddr.s6_addr32[3]; 140 h = iph->daddr.s6_addr32[3];
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 2c35c678563b..d35ef059abb1 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -161,7 +161,7 @@ teql_destroy(struct Qdisc* sch)
161 txq = netdev_get_tx_queue(master->dev, 0); 161 txq = netdev_get_tx_queue(master->dev, 0);
162 master->slaves = NULL; 162 master->slaves = NULL;
163 163
164 root_lock = qdisc_root_lock(txq->qdisc); 164 root_lock = qdisc_root_sleeping_lock(txq->qdisc);
165 spin_lock_bh(root_lock); 165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc); 166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock); 167 spin_unlock_bh(root_lock);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 5061a26c5028..7b23803343cc 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -317,7 +317,7 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
317 } 317 }
318 318
319 /* Insert before pos. */ 319 /* Insert before pos. */
320 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); 320 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
321 321
322} 322}
323 323
@@ -825,8 +825,7 @@ static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
825 825
826 826
827 /* Insert before pos. */ 827 /* Insert before pos. */
828 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); 828 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
829
830} 829}
831 830
832static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, 831static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 0f8c439b848a..5231f7aaac0e 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -60,24 +60,14 @@ static int proc_do_xprt(ctl_table *table, int write, struct file *file,
60 void __user *buffer, size_t *lenp, loff_t *ppos) 60 void __user *buffer, size_t *lenp, loff_t *ppos)
61{ 61{
62 char tmpbuf[256]; 62 char tmpbuf[256];
63 int len; 63 size_t len;
64
64 if ((*ppos && !write) || !*lenp) { 65 if ((*ppos && !write) || !*lenp) {
65 *lenp = 0; 66 *lenp = 0;
66 return 0; 67 return 0;
67 } 68 }
68 if (write) 69 len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
69 return -EINVAL; 70 return simple_read_from_buffer(buffer, *lenp, ppos, tmpbuf, len);
70 else {
71 len = svc_print_xprts(tmpbuf, sizeof(tmpbuf));
72 if (!access_ok(VERIFY_WRITE, buffer, len))
73 return -EFAULT;
74
75 if (__copy_to_user(buffer, tmpbuf, len))
76 return -EFAULT;
77 }
78 *lenp -= len;
79 *ppos += len;
80 return 0;
81} 71}
82 72
83static int 73static int
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e55427f73dfe..5c1954d28d09 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -769,7 +769,7 @@ repost:
769 /* check for expected message types */ 769 /* check for expected message types */
770 /* The order of some of these tests is important. */ 770 /* The order of some of these tests is important. */
771 switch (headerp->rm_type) { 771 switch (headerp->rm_type) {
772 case __constant_htonl(RDMA_MSG): 772 case htonl(RDMA_MSG):
773 /* never expect read chunks */ 773 /* never expect read chunks */
774 /* never expect reply chunks (two ways to check) */ 774 /* never expect reply chunks (two ways to check) */
775 /* never expect write chunks without having offered RDMA */ 775 /* never expect write chunks without having offered RDMA */
@@ -802,7 +802,7 @@ repost:
802 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len); 802 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len);
803 break; 803 break;
804 804
805 case __constant_htonl(RDMA_NOMSG): 805 case htonl(RDMA_NOMSG):
806 /* never expect read or write chunks, always reply chunks */ 806 /* never expect read or write chunks, always reply chunks */
807 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 807 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
808 headerp->rm_body.rm_chunks[1] != xdr_zero || 808 headerp->rm_body.rm_chunks[1] != xdr_zero ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index b4b17f44cb29..74de31a06616 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -443,18 +443,18 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
443 443
444 dprintk("svcrdma: rqstp=%p\n", rqstp); 444 dprintk("svcrdma: rqstp=%p\n", rqstp);
445 445
446 spin_lock_bh(&rdma_xprt->sc_read_complete_lock); 446 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
447 if (!list_empty(&rdma_xprt->sc_read_complete_q)) { 447 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
448 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, 448 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
449 struct svc_rdma_op_ctxt, 449 struct svc_rdma_op_ctxt,
450 dto_q); 450 dto_q);
451 list_del_init(&ctxt->dto_q); 451 list_del_init(&ctxt->dto_q);
452 } 452 }
453 spin_unlock_bh(&rdma_xprt->sc_read_complete_lock); 453 if (ctxt) {
454 if (ctxt) 454 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
455 return rdma_read_complete(rqstp, ctxt); 455 return rdma_read_complete(rqstp, ctxt);
456 }
456 457
457 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
458 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { 458 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
459 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, 459 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
460 struct svc_rdma_op_ctxt, 460 struct svc_rdma_op_ctxt,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 19ddc382b777..900cb69728c6 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -359,11 +359,11 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
359 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { 359 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
360 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; 360 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
361 BUG_ON(!read_hdr); 361 BUG_ON(!read_hdr);
362 spin_lock_bh(&xprt->sc_rq_dto_lock);
362 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 363 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
363 spin_lock_bh(&xprt->sc_read_complete_lock);
364 list_add_tail(&read_hdr->dto_q, 364 list_add_tail(&read_hdr->dto_q,
365 &xprt->sc_read_complete_q); 365 &xprt->sc_read_complete_q);
366 spin_unlock_bh(&xprt->sc_read_complete_lock); 366 spin_unlock_bh(&xprt->sc_rq_dto_lock);
367 svc_xprt_enqueue(&xprt->sc_xprt); 367 svc_xprt_enqueue(&xprt->sc_xprt);
368 } 368 }
369 svc_rdma_put_context(ctxt, 0); 369 svc_rdma_put_context(ctxt, 0);
@@ -428,7 +428,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
428 init_waitqueue_head(&cma_xprt->sc_send_wait); 428 init_waitqueue_head(&cma_xprt->sc_send_wait);
429 429
430 spin_lock_init(&cma_xprt->sc_lock); 430 spin_lock_init(&cma_xprt->sc_lock);
431 spin_lock_init(&cma_xprt->sc_read_complete_lock);
432 spin_lock_init(&cma_xprt->sc_rq_dto_lock); 431 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
433 432
434 cma_xprt->sc_ord = svcrdma_ord; 433 cma_xprt->sc_ord = svcrdma_ord;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index b1ff16aa4bdb..3ddaff42d1bb 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -96,8 +96,8 @@ struct bcbearer {
96 struct media media; 96 struct media media;
97 struct bcbearer_pair bpairs[MAX_BEARERS]; 97 struct bcbearer_pair bpairs[MAX_BEARERS];
98 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; 98 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
99 struct node_map remains; 99 struct tipc_node_map remains;
100 struct node_map remains_new; 100 struct tipc_node_map remains_new;
101}; 101};
102 102
103/** 103/**
@@ -110,7 +110,7 @@ struct bcbearer {
110 110
111struct bclink { 111struct bclink {
112 struct link link; 112 struct link link;
113 struct node node; 113 struct tipc_node node;
114}; 114};
115 115
116 116
@@ -149,7 +149,7 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
149 * Called with 'node' locked, bc_lock unlocked 149 * Called with 'node' locked, bc_lock unlocked
150 */ 150 */
151 151
152static void bclink_set_gap(struct node *n_ptr) 152static void bclink_set_gap(struct tipc_node *n_ptr)
153{ 153{
154 struct sk_buff *buf = n_ptr->bclink.deferred_head; 154 struct sk_buff *buf = n_ptr->bclink.deferred_head;
155 155
@@ -202,7 +202,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
202 * Node is locked, bc_lock unlocked. 202 * Node is locked, bc_lock unlocked.
203 */ 203 */
204 204
205void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked) 205void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
206{ 206{
207 struct sk_buff *crs; 207 struct sk_buff *crs;
208 struct sk_buff *next; 208 struct sk_buff *next;
@@ -250,7 +250,7 @@ void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
250 * tipc_net_lock and node lock set 250 * tipc_net_lock and node lock set
251 */ 251 */
252 252
253static void bclink_send_ack(struct node *n_ptr) 253static void bclink_send_ack(struct tipc_node *n_ptr)
254{ 254{
255 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 255 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
256 256
@@ -264,7 +264,7 @@ static void bclink_send_ack(struct node *n_ptr)
264 * tipc_net_lock and node lock set 264 * tipc_net_lock and node lock set
265 */ 265 */
266 266
267static void bclink_send_nack(struct node *n_ptr) 267static void bclink_send_nack(struct tipc_node *n_ptr)
268{ 268{
269 struct sk_buff *buf; 269 struct sk_buff *buf;
270 struct tipc_msg *msg; 270 struct tipc_msg *msg;
@@ -308,7 +308,7 @@ static void bclink_send_nack(struct node *n_ptr)
308 * tipc_net_lock and node lock set 308 * tipc_net_lock and node lock set
309 */ 309 */
310 310
311void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent) 311void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
312{ 312{
313 if (!n_ptr->bclink.supported || 313 if (!n_ptr->bclink.supported ||
314 less_eq(last_sent, mod(n_ptr->bclink.last_in))) 314 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
@@ -328,7 +328,7 @@ void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
328 328
329static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 329static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
330{ 330{
331 struct node *n_ptr = tipc_node_find(dest); 331 struct tipc_node *n_ptr = tipc_node_find(dest);
332 u32 my_after, my_to; 332 u32 my_after, my_to;
333 333
334 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) 334 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
@@ -418,7 +418,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
418 static int rx_count = 0; 418 static int rx_count = 0;
419#endif 419#endif
420 struct tipc_msg *msg = buf_msg(buf); 420 struct tipc_msg *msg = buf_msg(buf);
421 struct node* node = tipc_node_find(msg_prevnode(msg)); 421 struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
422 u32 next_in; 422 u32 next_in;
423 u32 seqno; 423 u32 seqno;
424 struct sk_buff *deferred; 424 struct sk_buff *deferred;
@@ -538,7 +538,7 @@ u32 tipc_bclink_get_last_sent(void)
538 return last_sent; 538 return last_sent;
539} 539}
540 540
541u32 tipc_bclink_acks_missing(struct node *n_ptr) 541u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
542{ 542{
543 return (n_ptr->bclink.supported && 543 return (n_ptr->bclink.supported &&
544 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 544 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index a2416fa6b906..5aa024b99c55 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -41,12 +41,12 @@
41#define WSIZE 32 41#define WSIZE 32
42 42
43/** 43/**
44 * struct node_map - set of node identifiers 44 * struct tipc_node_map - set of node identifiers
45 * @count: # of nodes in set 45 * @count: # of nodes in set
46 * @map: bitmap of node identifiers that are in the set 46 * @map: bitmap of node identifiers that are in the set
47 */ 47 */
48 48
49struct node_map { 49struct tipc_node_map {
50 u32 count; 50 u32 count;
51 u32 map[MAX_NODES / WSIZE]; 51 u32 map[MAX_NODES / WSIZE];
52}; 52};
@@ -68,7 +68,7 @@ struct port_list {
68}; 68};
69 69
70 70
71struct node; 71struct tipc_node;
72 72
73extern char tipc_bclink_name[]; 73extern char tipc_bclink_name[];
74 74
@@ -77,7 +77,7 @@ extern char tipc_bclink_name[];
77 * nmap_add - add a node to a node map 77 * nmap_add - add a node to a node map
78 */ 78 */
79 79
80static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node) 80static inline void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
81{ 81{
82 int n = tipc_node(node); 82 int n = tipc_node(node);
83 int w = n / WSIZE; 83 int w = n / WSIZE;
@@ -93,7 +93,7 @@ static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
93 * nmap_remove - remove a node from a node map 93 * nmap_remove - remove a node from a node map
94 */ 94 */
95 95
96static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node) 96static inline void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
97{ 97{
98 int n = tipc_node(node); 98 int n = tipc_node(node);
99 int w = n / WSIZE; 99 int w = n / WSIZE;
@@ -109,7 +109,7 @@ static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node)
109 * nmap_equal - test for equality of node maps 109 * nmap_equal - test for equality of node maps
110 */ 110 */
111 111
112static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b) 112static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
113{ 113{
114 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 114 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
115} 115}
@@ -121,8 +121,8 @@ static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
121 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 121 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
122 */ 122 */
123 123
124static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b, 124static inline void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
125 struct node_map *nm_diff) 125 struct tipc_node_map *nm_diff)
126{ 126{
127 int stop = sizeof(nm_a->map) / sizeof(u32); 127 int stop = sizeof(nm_a->map) / sizeof(u32);
128 int w; 128 int w;
@@ -195,12 +195,12 @@ static inline void tipc_port_list_free(struct port_list *pl_ptr)
195 195
196int tipc_bclink_init(void); 196int tipc_bclink_init(void);
197void tipc_bclink_stop(void); 197void tipc_bclink_stop(void);
198void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked); 198void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
199int tipc_bclink_send_msg(struct sk_buff *buf); 199int tipc_bclink_send_msg(struct sk_buff *buf);
200void tipc_bclink_recv_pkt(struct sk_buff *buf); 200void tipc_bclink_recv_pkt(struct sk_buff *buf);
201u32 tipc_bclink_get_last_sent(void); 201u32 tipc_bclink_get_last_sent(void);
202u32 tipc_bclink_acks_missing(struct node *n_ptr); 202u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
203void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno); 203void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 seqno);
204int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 204int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
205int tipc_bclink_reset_stats(void); 205int tipc_bclink_reset_stats(void);
206int tipc_bclink_set_queue_limits(u32 limit); 206int tipc_bclink_set_queue_limits(u32 limit);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 6a9aba3edd08..a7a36779b9b3 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -599,7 +599,7 @@ int tipc_block_bearer(const char *name)
599 spin_lock_bh(&b_ptr->publ.lock); 599 spin_lock_bh(&b_ptr->publ.lock);
600 b_ptr->publ.blocked = 1; 600 b_ptr->publ.blocked = 1;
601 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 601 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
602 struct node *n_ptr = l_ptr->owner; 602 struct tipc_node *n_ptr = l_ptr->owner;
603 603
604 spin_lock_bh(&n_ptr->lock); 604 spin_lock_bh(&n_ptr->lock);
605 tipc_link_reset(l_ptr); 605 tipc_link_reset(l_ptr);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 6a36b6600e6c..ca5734892713 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -104,7 +104,7 @@ struct bearer {
104 u32 continue_count; 104 u32 continue_count;
105 int active; 105 int active;
106 char net_plane; 106 char net_plane;
107 struct node_map nodes; 107 struct tipc_node_map nodes;
108}; 108};
109 109
110struct bearer_name { 110struct bearer_name {
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 46ee6c58532d..689fdefe9d04 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -48,8 +48,8 @@ static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper); 48 u32 lower, u32 upper);
49static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest); 49static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50 50
51struct node **tipc_local_nodes = NULL; 51struct tipc_node **tipc_local_nodes = NULL;
52struct node_map tipc_cltr_bcast_nodes = {0,{0,}}; 52struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
53u32 tipc_highest_allowed_slave = 0; 53u32 tipc_highest_allowed_slave = 0;
54 54
55struct cluster *tipc_cltr_create(u32 addr) 55struct cluster *tipc_cltr_create(u32 addr)
@@ -115,7 +115,7 @@ void tipc_cltr_delete(struct cluster *c_ptr)
115 115
116u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr) 116u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
117{ 117{
118 struct node *n_ptr; 118 struct tipc_node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1; 119 u32 n_num = tipc_node(addr) + 1;
120 120
121 if (!c_ptr) 121 if (!c_ptr)
@@ -133,7 +133,7 @@ u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
133 return 0; 133 return 0;
134} 134}
135 135
136void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr) 136void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr)
137{ 137{
138 u32 n_num = tipc_node(n_ptr->addr); 138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes; 139 u32 max_n_num = tipc_max_nodes;
@@ -196,7 +196,7 @@ u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
196 * Uses deterministic and fair algorithm. 196 * Uses deterministic and fair algorithm.
197 */ 197 */
198 198
199struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector) 199struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
200{ 200{
201 u32 n_num; 201 u32 n_num;
202 u32 mask = tipc_max_nodes; 202 u32 mask = tipc_max_nodes;
@@ -379,7 +379,7 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf)
379{ 379{
380 struct tipc_msg *msg = buf_msg(buf); 380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr; 381 struct cluster *c_ptr;
382 struct node *n_ptr; 382 struct tipc_node *n_ptr;
383 unchar *node_table; 383 unchar *node_table;
384 u32 table_size; 384 u32 table_size;
385 u32 router; 385 u32 router;
@@ -499,7 +499,7 @@ static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper) 499 u32 lower, u32 upper)
500{ 500{
501 struct sk_buff *buf_copy; 501 struct sk_buff *buf_copy;
502 struct node *n_ptr; 502 struct tipc_node *n_ptr;
503 u32 n_num; 503 u32 n_num;
504 u32 tstop; 504 u32 tstop;
505 505
@@ -534,7 +534,7 @@ void tipc_cltr_broadcast(struct sk_buff *buf)
534{ 534{
535 struct sk_buff *buf_copy; 535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr; 536 struct cluster *c_ptr;
537 struct node *n_ptr; 537 struct tipc_node *n_ptr;
538 u32 n_num; 538 u32 n_num;
539 u32 tstart; 539 u32 tstart;
540 u32 tstop; 540 u32 tstop;
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
index 62df074afaec..333efb0b9c44 100644
--- a/net/tipc/cluster.h
+++ b/net/tipc/cluster.h
@@ -54,24 +54,24 @@
54struct cluster { 54struct cluster {
55 u32 addr; 55 u32 addr;
56 struct _zone *owner; 56 struct _zone *owner;
57 struct node **nodes; 57 struct tipc_node **nodes;
58 u32 highest_node; 58 u32 highest_node;
59 u32 highest_slave; 59 u32 highest_slave;
60}; 60};
61 61
62 62
63extern struct node **tipc_local_nodes; 63extern struct tipc_node **tipc_local_nodes;
64extern u32 tipc_highest_allowed_slave; 64extern u32 tipc_highest_allowed_slave;
65extern struct node_map tipc_cltr_bcast_nodes; 65extern struct tipc_node_map tipc_cltr_bcast_nodes;
66 66
67void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router); 67void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
68void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest); 68void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
69struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector); 69struct tipc_node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
70u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref); 70u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
71void tipc_cltr_recv_routing_table(struct sk_buff *buf); 71void tipc_cltr_recv_routing_table(struct sk_buff *buf);
72struct cluster *tipc_cltr_create(u32 addr); 72struct cluster *tipc_cltr_create(u32 addr);
73void tipc_cltr_delete(struct cluster *c_ptr); 73void tipc_cltr_delete(struct cluster *c_ptr);
74void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr); 74void tipc_cltr_attach_node(struct cluster *c_ptr, struct tipc_node *n_ptr);
75void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest); 75void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
76void tipc_cltr_broadcast(struct sk_buff *buf); 76void tipc_cltr_broadcast(struct sk_buff *buf);
77int tipc_cltr_init(void); 77int tipc_cltr_init(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 1657f0e795ff..74b7d1e28aec 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -193,7 +193,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
193 /* Always accept link here */ 193 /* Always accept link here */
194 struct sk_buff *rbuf; 194 struct sk_buff *rbuf;
195 struct tipc_media_addr *addr; 195 struct tipc_media_addr *addr;
196 struct node *n_ptr = tipc_node_find(orig); 196 struct tipc_node *n_ptr = tipc_node_find(orig);
197 int link_fully_up; 197 int link_fully_up;
198 198
199 dbg(" in own cluster\n"); 199 dbg(" in own cluster\n");
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d60113ba4b1b..dd4c18b9a35b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1155,7 +1155,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1155int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 1155int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1156{ 1156{
1157 struct link *l_ptr; 1157 struct link *l_ptr;
1158 struct node *n_ptr; 1158 struct tipc_node *n_ptr;
1159 int res = -ELINKCONG; 1159 int res = -ELINKCONG;
1160 1160
1161 read_lock_bh(&tipc_net_lock); 1161 read_lock_bh(&tipc_net_lock);
@@ -1226,7 +1226,7 @@ static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1226int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode) 1226int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1227{ 1227{
1228 struct link *l_ptr; 1228 struct link *l_ptr;
1229 struct node *n_ptr; 1229 struct tipc_node *n_ptr;
1230 int res; 1230 int res;
1231 u32 selector = msg_origport(buf_msg(buf)) & 1; 1231 u32 selector = msg_origport(buf_msg(buf)) & 1;
1232 u32 dummy; 1232 u32 dummy;
@@ -1270,7 +1270,7 @@ int tipc_link_send_sections_fast(struct port *sender,
1270 struct tipc_msg *hdr = &sender->publ.phdr; 1270 struct tipc_msg *hdr = &sender->publ.phdr;
1271 struct link *l_ptr; 1271 struct link *l_ptr;
1272 struct sk_buff *buf; 1272 struct sk_buff *buf;
1273 struct node *node; 1273 struct tipc_node *node;
1274 int res; 1274 int res;
1275 u32 selector = msg_origport(hdr) & 1; 1275 u32 selector = msg_origport(hdr) & 1;
1276 1276
@@ -1364,7 +1364,7 @@ static int link_send_sections_long(struct port *sender,
1364 u32 destaddr) 1364 u32 destaddr)
1365{ 1365{
1366 struct link *l_ptr; 1366 struct link *l_ptr;
1367 struct node *node; 1367 struct tipc_node *node;
1368 struct tipc_msg *hdr = &sender->publ.phdr; 1368 struct tipc_msg *hdr = &sender->publ.phdr;
1369 u32 dsz = msg_data_sz(hdr); 1369 u32 dsz = msg_data_sz(hdr);
1370 u32 max_pkt,fragm_sz,rest; 1370 u32 max_pkt,fragm_sz,rest;
@@ -1636,7 +1636,7 @@ void tipc_link_push_queue(struct link *l_ptr)
1636 1636
1637static void link_reset_all(unsigned long addr) 1637static void link_reset_all(unsigned long addr)
1638{ 1638{
1639 struct node *n_ptr; 1639 struct tipc_node *n_ptr;
1640 char addr_string[16]; 1640 char addr_string[16];
1641 u32 i; 1641 u32 i;
1642 1642
@@ -1682,7 +1682,7 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1682 1682
1683 /* Handle failure on broadcast link */ 1683 /* Handle failure on broadcast link */
1684 1684
1685 struct node *n_ptr; 1685 struct tipc_node *n_ptr;
1686 char addr_string[16]; 1686 char addr_string[16];
1687 1687
1688 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg)); 1688 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
@@ -1843,7 +1843,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1843 read_lock_bh(&tipc_net_lock); 1843 read_lock_bh(&tipc_net_lock);
1844 while (head) { 1844 while (head) {
1845 struct bearer *b_ptr = (struct bearer *)tb_ptr; 1845 struct bearer *b_ptr = (struct bearer *)tb_ptr;
1846 struct node *n_ptr; 1846 struct tipc_node *n_ptr;
1847 struct link *l_ptr; 1847 struct link *l_ptr;
1848 struct sk_buff *crs; 1848 struct sk_buff *crs;
1849 struct sk_buff *buf = head; 1849 struct sk_buff *buf = head;
@@ -2935,7 +2935,7 @@ void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2935 * Returns pointer to link (or 0 if invalid link name). 2935 * Returns pointer to link (or 0 if invalid link name).
2936 */ 2936 */
2937 2937
2938static struct link *link_find_link(const char *name, struct node **node) 2938static struct link *link_find_link(const char *name, struct tipc_node **node)
2939{ 2939{
2940 struct link_name link_name_parts; 2940 struct link_name link_name_parts;
2941 struct bearer *b_ptr; 2941 struct bearer *b_ptr;
@@ -2965,7 +2965,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2965 struct tipc_link_config *args; 2965 struct tipc_link_config *args;
2966 u32 new_value; 2966 u32 new_value;
2967 struct link *l_ptr; 2967 struct link *l_ptr;
2968 struct node *node; 2968 struct tipc_node *node;
2969 int res; 2969 int res;
2970 2970
2971 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2971 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
@@ -3043,7 +3043,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
3043{ 3043{
3044 char *link_name; 3044 char *link_name;
3045 struct link *l_ptr; 3045 struct link *l_ptr;
3046 struct node *node; 3046 struct tipc_node *node;
3047 3047
3048 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 3048 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
3049 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 3049 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -3091,7 +3091,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
3091{ 3091{
3092 struct print_buf pb; 3092 struct print_buf pb;
3093 struct link *l_ptr; 3093 struct link *l_ptr;
3094 struct node *node; 3094 struct tipc_node *node;
3095 char *status; 3095 char *status;
3096 u32 profile_total = 0; 3096 u32 profile_total = 0;
3097 3097
@@ -3207,7 +3207,7 @@ int link_control(const char *name, u32 op, u32 val)
3207 int res = -EINVAL; 3207 int res = -EINVAL;
3208 struct link *l_ptr; 3208 struct link *l_ptr;
3209 u32 bearer_id; 3209 u32 bearer_id;
3210 struct node * node; 3210 struct tipc_node * node;
3211 u32 a; 3211 u32 a;
3212 3212
3213 a = link_name2addr(name, &bearer_id); 3213 a = link_name2addr(name, &bearer_id);
@@ -3249,7 +3249,7 @@ int link_control(const char *name, u32 op, u32 val)
3249 3249
3250u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 3250u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3251{ 3251{
3252 struct node *n_ptr; 3252 struct tipc_node *n_ptr;
3253 struct link *l_ptr; 3253 struct link *l_ptr;
3254 u32 res = MAX_PKT_DEFAULT; 3254 u32 res = MAX_PKT_DEFAULT;
3255 3255
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 52f3e7c1871f..6a51e38ad25c 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -116,7 +116,7 @@ struct link {
116 char name[TIPC_MAX_LINK_NAME]; 116 char name[TIPC_MAX_LINK_NAME];
117 struct tipc_media_addr media_addr; 117 struct tipc_media_addr media_addr;
118 struct timer_list timer; 118 struct timer_list timer;
119 struct node *owner; 119 struct tipc_node *owner;
120 struct list_head link_list; 120 struct list_head link_list;
121 121
122 /* Management and link supervision data */ 122 /* Management and link supervision data */
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index b9e7cd336d76..139882d4ed00 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -76,7 +76,7 @@ struct publication {
76 u32 node; 76 u32 node;
77 u32 ref; 77 u32 ref;
78 u32 key; 78 u32 key;
79 struct node_subscr subscr; 79 struct tipc_node_subscr subscr;
80 struct list_head local_list; 80 struct list_head local_list;
81 struct list_head pport_list; 81 struct list_head pport_list;
82 struct publication *node_list_next; 82 struct publication *node_list_next;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index ec7b04fbdc43..7906608bf510 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -118,7 +118,7 @@
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct network tipc_net = { NULL };
120 120
121struct node *tipc_net_select_remote_node(u32 addr, u32 ref) 121struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 122{
123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref); 123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
124} 124}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index d154ac2bda9a..de2b9ad8f646 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -55,7 +55,7 @@ extern rwlock_t tipc_net_lock;
55void tipc_net_remove_as_router(u32 router); 55void tipc_net_remove_as_router(u32 router);
56void tipc_net_send_external_routes(u32 dest); 56void tipc_net_send_external_routes(u32 dest);
57void tipc_net_route_msg(struct sk_buff *buf); 57void tipc_net_route_msg(struct sk_buff *buf);
58struct node *tipc_net_select_remote_node(u32 addr, u32 ref); 58struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref);
59u32 tipc_net_select_router(u32 addr, u32 ref); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60 60
61int tipc_net_start(u32 addr); 61int tipc_net_start(u32 addr);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index ee952ad60218..20d98c56e152 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -46,11 +46,11 @@
46#include "bearer.h" 46#include "bearer.h"
47#include "name_distr.h" 47#include "name_distr.h"
48 48
49void node_print(struct print_buf *buf, struct node *n_ptr, char *str); 49void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
50static void node_lost_contact(struct node *n_ptr); 50static void node_lost_contact(struct tipc_node *n_ptr);
51static void node_established_contact(struct node *n_ptr); 51static void node_established_contact(struct tipc_node *n_ptr);
52 52
53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */ 53struct tipc_node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
54 54
55static DEFINE_SPINLOCK(node_create_lock); 55static DEFINE_SPINLOCK(node_create_lock);
56 56
@@ -66,11 +66,11 @@ u32 tipc_own_tag = 0;
66 * but this is a non-trivial change.) 66 * but this is a non-trivial change.)
67 */ 67 */
68 68
69struct node *tipc_node_create(u32 addr) 69struct tipc_node *tipc_node_create(u32 addr)
70{ 70{
71 struct cluster *c_ptr; 71 struct cluster *c_ptr;
72 struct node *n_ptr; 72 struct tipc_node *n_ptr;
73 struct node **curr_node; 73 struct tipc_node **curr_node;
74 74
75 spin_lock_bh(&node_create_lock); 75 spin_lock_bh(&node_create_lock);
76 76
@@ -120,7 +120,7 @@ struct node *tipc_node_create(u32 addr)
120 return n_ptr; 120 return n_ptr;
121} 121}
122 122
123void tipc_node_delete(struct node *n_ptr) 123void tipc_node_delete(struct tipc_node *n_ptr)
124{ 124{
125 if (!n_ptr) 125 if (!n_ptr)
126 return; 126 return;
@@ -146,7 +146,7 @@ void tipc_node_delete(struct node *n_ptr)
146 * Link becomes active (alone or shared) or standby, depending on its priority. 146 * Link becomes active (alone or shared) or standby, depending on its priority.
147 */ 147 */
148 148
149void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr) 149void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr)
150{ 150{
151 struct link **active = &n_ptr->active_links[0]; 151 struct link **active = &n_ptr->active_links[0];
152 152
@@ -180,7 +180,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
180 * node_select_active_links - select active link 180 * node_select_active_links - select active link
181 */ 181 */
182 182
183static void node_select_active_links(struct node *n_ptr) 183static void node_select_active_links(struct tipc_node *n_ptr)
184{ 184{
185 struct link **active = &n_ptr->active_links[0]; 185 struct link **active = &n_ptr->active_links[0];
186 u32 i; 186 u32 i;
@@ -208,7 +208,7 @@ static void node_select_active_links(struct node *n_ptr)
208 * tipc_node_link_down - handle loss of link 208 * tipc_node_link_down - handle loss of link
209 */ 209 */
210 210
211void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr) 211void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
212{ 212{
213 struct link **active; 213 struct link **active;
214 214
@@ -235,30 +235,30 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
235 node_lost_contact(n_ptr); 235 node_lost_contact(n_ptr);
236} 236}
237 237
238int tipc_node_has_active_links(struct node *n_ptr) 238int tipc_node_has_active_links(struct tipc_node *n_ptr)
239{ 239{
240 return (n_ptr && 240 return (n_ptr &&
241 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 241 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
242} 242}
243 243
244int tipc_node_has_redundant_links(struct node *n_ptr) 244int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
245{ 245{
246 return (n_ptr->working_links > 1); 246 return (n_ptr->working_links > 1);
247} 247}
248 248
249static int tipc_node_has_active_routes(struct node *n_ptr) 249static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
250{ 250{
251 return (n_ptr && (n_ptr->last_router >= 0)); 251 return (n_ptr && (n_ptr->last_router >= 0));
252} 252}
253 253
254int tipc_node_is_up(struct node *n_ptr) 254int tipc_node_is_up(struct tipc_node *n_ptr)
255{ 255{
256 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr)); 256 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
257} 257}
258 258
259struct node *tipc_node_attach_link(struct link *l_ptr) 259struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
260{ 260{
261 struct node *n_ptr = tipc_node_find(l_ptr->addr); 261 struct tipc_node *n_ptr = tipc_node_find(l_ptr->addr);
262 262
263 if (!n_ptr) 263 if (!n_ptr)
264 n_ptr = tipc_node_create(l_ptr->addr); 264 n_ptr = tipc_node_create(l_ptr->addr);
@@ -285,7 +285,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
285 return NULL; 285 return NULL;
286} 286}
287 287
288void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr) 288void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr)
289{ 289{
290 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 290 n_ptr->links[l_ptr->b_ptr->identity] = NULL;
291 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; 291 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
@@ -338,7 +338,7 @@ void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
338 * 338 *
339 */ 339 */
340 340
341static void node_established_contact(struct node *n_ptr) 341static void node_established_contact(struct tipc_node *n_ptr)
342{ 342{
343 struct cluster *c_ptr; 343 struct cluster *c_ptr;
344 344
@@ -384,10 +384,10 @@ static void node_established_contact(struct node *n_ptr)
384 tipc_highest_allowed_slave); 384 tipc_highest_allowed_slave);
385} 385}
386 386
387static void node_lost_contact(struct node *n_ptr) 387static void node_lost_contact(struct tipc_node *n_ptr)
388{ 388{
389 struct cluster *c_ptr; 389 struct cluster *c_ptr;
390 struct node_subscr *ns, *tns; 390 struct tipc_node_subscr *ns, *tns;
391 char addr_string[16]; 391 char addr_string[16];
392 u32 i; 392 u32 i;
393 393
@@ -466,9 +466,9 @@ static void node_lost_contact(struct node *n_ptr)
466 * Called by when cluster local lookup has failed. 466 * Called by when cluster local lookup has failed.
467 */ 467 */
468 468
469struct node *tipc_node_select_next_hop(u32 addr, u32 selector) 469struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector)
470{ 470{
471 struct node *n_ptr; 471 struct tipc_node *n_ptr;
472 u32 router_addr; 472 u32 router_addr;
473 473
474 if (!tipc_addr_domain_valid(addr)) 474 if (!tipc_addr_domain_valid(addr))
@@ -513,7 +513,7 @@ struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
513 * Uses a deterministic and fair algorithm for selecting router node. 513 * Uses a deterministic and fair algorithm for selecting router node.
514 */ 514 */
515 515
516u32 tipc_node_select_router(struct node *n_ptr, u32 ref) 516u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref)
517{ 517{
518 u32 ulim; 518 u32 ulim;
519 u32 mask; 519 u32 mask;
@@ -551,7 +551,7 @@ u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
551 return tipc_addr(own_zone(), own_cluster(), r); 551 return tipc_addr(own_zone(), own_cluster(), r);
552} 552}
553 553
554void tipc_node_add_router(struct node *n_ptr, u32 router) 554void tipc_node_add_router(struct tipc_node *n_ptr, u32 router)
555{ 555{
556 u32 r_num = tipc_node(router); 556 u32 r_num = tipc_node(router);
557 557
@@ -562,7 +562,7 @@ void tipc_node_add_router(struct node *n_ptr, u32 router)
562 !n_ptr->routers[n_ptr->last_router]); 562 !n_ptr->routers[n_ptr->last_router]);
563} 563}
564 564
565void tipc_node_remove_router(struct node *n_ptr, u32 router) 565void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router)
566{ 566{
567 u32 r_num = tipc_node(router); 567 u32 r_num = tipc_node(router);
568 568
@@ -580,7 +580,7 @@ void tipc_node_remove_router(struct node *n_ptr, u32 router)
580} 580}
581 581
582#if 0 582#if 0
583void node_print(struct print_buf *buf, struct node *n_ptr, char *str) 583void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str)
584{ 584{
585 u32 i; 585 u32 i;
586 586
@@ -597,7 +597,7 @@ void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
597 597
598u32 tipc_available_nodes(const u32 domain) 598u32 tipc_available_nodes(const u32 domain)
599{ 599{
600 struct node *n_ptr; 600 struct tipc_node *n_ptr;
601 u32 cnt = 0; 601 u32 cnt = 0;
602 602
603 read_lock_bh(&tipc_net_lock); 603 read_lock_bh(&tipc_net_lock);
@@ -615,7 +615,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
615{ 615{
616 u32 domain; 616 u32 domain;
617 struct sk_buff *buf; 617 struct sk_buff *buf;
618 struct node *n_ptr; 618 struct tipc_node *n_ptr;
619 struct tipc_node_info node_info; 619 struct tipc_node_info node_info;
620 u32 payload_size; 620 u32 payload_size;
621 621
@@ -667,7 +667,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
667{ 667{
668 u32 domain; 668 u32 domain;
669 struct sk_buff *buf; 669 struct sk_buff *buf;
670 struct node *n_ptr; 670 struct tipc_node *n_ptr;
671 struct tipc_link_info link_info; 671 struct tipc_link_info link_info;
672 u32 payload_size; 672 u32 payload_size;
673 673
diff --git a/net/tipc/node.h b/net/tipc/node.h
index cd1882654bbb..6f990da5d143 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -43,7 +43,7 @@
43#include "bearer.h" 43#include "bearer.h"
44 44
45/** 45/**
46 * struct node - TIPC node structure 46 * struct tipc_node - TIPC node structure
47 * @addr: network address of node 47 * @addr: network address of node
48 * @lock: spinlock governing access to structure 48 * @lock: spinlock governing access to structure
49 * @owner: pointer to cluster that node belongs to 49 * @owner: pointer to cluster that node belongs to
@@ -68,11 +68,11 @@
68 * @defragm: list of partially reassembled b'cast message fragments from node 68 * @defragm: list of partially reassembled b'cast message fragments from node
69 */ 69 */
70 70
71struct node { 71struct tipc_node {
72 u32 addr; 72 u32 addr;
73 spinlock_t lock; 73 spinlock_t lock;
74 struct cluster *owner; 74 struct cluster *owner;
75 struct node *next; 75 struct tipc_node *next;
76 struct list_head nsub; 76 struct list_head nsub;
77 struct link *active_links[2]; 77 struct link *active_links[2];
78 struct link *links[MAX_BEARERS]; 78 struct link *links[MAX_BEARERS];
@@ -94,26 +94,26 @@ struct node {
94 } bclink; 94 } bclink;
95}; 95};
96 96
97extern struct node *tipc_nodes; 97extern struct tipc_node *tipc_nodes;
98extern u32 tipc_own_tag; 98extern u32 tipc_own_tag;
99 99
100struct node *tipc_node_create(u32 addr); 100struct tipc_node *tipc_node_create(u32 addr);
101void tipc_node_delete(struct node *n_ptr); 101void tipc_node_delete(struct tipc_node *n_ptr);
102struct node *tipc_node_attach_link(struct link *l_ptr); 102struct tipc_node *tipc_node_attach_link(struct link *l_ptr);
103void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr); 103void tipc_node_detach_link(struct tipc_node *n_ptr, struct link *l_ptr);
104void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr); 104void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr);
105void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr); 105void tipc_node_link_up(struct tipc_node *n_ptr, struct link *l_ptr);
106int tipc_node_has_active_links(struct node *n_ptr); 106int tipc_node_has_active_links(struct tipc_node *n_ptr);
107int tipc_node_has_redundant_links(struct node *n_ptr); 107int tipc_node_has_redundant_links(struct tipc_node *n_ptr);
108u32 tipc_node_select_router(struct node *n_ptr, u32 ref); 108u32 tipc_node_select_router(struct tipc_node *n_ptr, u32 ref);
109struct node *tipc_node_select_next_hop(u32 addr, u32 selector); 109struct tipc_node *tipc_node_select_next_hop(u32 addr, u32 selector);
110int tipc_node_is_up(struct node *n_ptr); 110int tipc_node_is_up(struct tipc_node *n_ptr);
111void tipc_node_add_router(struct node *n_ptr, u32 router); 111void tipc_node_add_router(struct tipc_node *n_ptr, u32 router);
112void tipc_node_remove_router(struct node *n_ptr, u32 router); 112void tipc_node_remove_router(struct tipc_node *n_ptr, u32 router);
113struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 113struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
114struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 114struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
115 115
116static inline struct node *tipc_node_find(u32 addr) 116static inline struct tipc_node *tipc_node_find(u32 addr)
117{ 117{
118 if (likely(in_own_cluster(addr))) 118 if (likely(in_own_cluster(addr)))
119 return tipc_local_nodes[tipc_node(addr)]; 119 return tipc_local_nodes[tipc_node(addr)];
@@ -126,19 +126,19 @@ static inline struct node *tipc_node_find(u32 addr)
126 return NULL; 126 return NULL;
127} 127}
128 128
129static inline struct node *tipc_node_select(u32 addr, u32 selector) 129static inline struct tipc_node *tipc_node_select(u32 addr, u32 selector)
130{ 130{
131 if (likely(in_own_cluster(addr))) 131 if (likely(in_own_cluster(addr)))
132 return tipc_local_nodes[tipc_node(addr)]; 132 return tipc_local_nodes[tipc_node(addr)];
133 return tipc_node_select_next_hop(addr, selector); 133 return tipc_node_select_next_hop(addr, selector);
134} 134}
135 135
136static inline void tipc_node_lock(struct node *n_ptr) 136static inline void tipc_node_lock(struct tipc_node *n_ptr)
137{ 137{
138 spin_lock_bh(&n_ptr->lock); 138 spin_lock_bh(&n_ptr->lock);
139} 139}
140 140
141static inline void tipc_node_unlock(struct node *n_ptr) 141static inline void tipc_node_unlock(struct tipc_node *n_ptr)
142{ 142{
143 spin_unlock_bh(&n_ptr->lock); 143 spin_unlock_bh(&n_ptr->lock);
144} 144}
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 8ecbd0fb6103..19194d476a9e 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -44,7 +44,7 @@
44 * tipc_nodesub_subscribe - create "node down" subscription for specified node 44 * tipc_nodesub_subscribe - create "node down" subscription for specified node
45 */ 45 */
46 46
47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 if (addr == tipc_own_addr) { 50 if (addr == tipc_own_addr) {
@@ -69,7 +69,7 @@ void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
69 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) 69 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
70 */ 70 */
71 71
72void tipc_nodesub_unsubscribe(struct node_subscr *node_sub) 72void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
73{ 73{
74 if (!node_sub->node) 74 if (!node_sub->node)
75 return; 75 return;
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 5f3f5859b84c..006ed739f515 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -42,22 +42,22 @@
42typedef void (*net_ev_handler) (void *usr_handle); 42typedef void (*net_ev_handler) (void *usr_handle);
43 43
44/** 44/**
45 * struct node_subscr - "node down" subscription entry 45 * struct tipc_node_subscr - "node down" subscription entry
46 * @node: ptr to node structure of interest (or NULL, if none) 46 * @node: ptr to node structure of interest (or NULL, if none)
47 * @handle_node_down: routine to invoke when node fails 47 * @handle_node_down: routine to invoke when node fails
48 * @usr_handle: argument to pass to routine when node fails 48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node 49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */ 50 */
51 51
52struct node_subscr { 52struct tipc_node_subscr {
53 struct node *node; 53 struct tipc_node *node;
54 net_ev_handler handle_node_down; 54 net_ev_handler handle_node_down;
55 void *usr_handle; 55 void *usr_handle;
56 struct list_head nodesub_list; 56 struct list_head nodesub_list;
57}; 57};
58 58
59void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 59void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
60 void *usr_handle, net_ev_handler handle_down); 60 void *usr_handle, net_ev_handler handle_down);
61void tipc_nodesub_unsubscribe(struct node_subscr *node_sub); 61void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.h b/net/tipc/port.h
index e5f8c16429bd..ff31ee4a1dc3 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -105,7 +105,7 @@ struct port {
105 u32 probing_interval; 105 u32 probing_interval;
106 u32 last_in_seqno; 106 u32 last_in_seqno;
107 struct timer_list timer; 107 struct timer_list timer;
108 struct node_subscr subscription; 108 struct tipc_node_subscr subscription;
109}; 109};
110 110
111extern spinlock_t tipc_port_list_lock; 111extern spinlock_t tipc_port_list_lock;
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 3506f8563441..2c01ba2d86bf 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -111,10 +111,10 @@ void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
111 } 111 }
112} 112}
113 113
114struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref) 114struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
115{ 115{
116 struct cluster *c_ptr; 116 struct cluster *c_ptr;
117 struct node *n_ptr; 117 struct tipc_node *n_ptr;
118 u32 c_num; 118 u32 c_num;
119 119
120 if (!z_ptr) 120 if (!z_ptr)
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
index 6e7a08df8af5..7bdc3406ba9b 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/zone.h
@@ -54,7 +54,7 @@ struct _zone {
54 u32 links; 54 u32 links;
55}; 55};
56 56
57struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref); 57struct tipc_node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
58u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref); 58u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
59void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router); 59void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
60void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest); 60void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index ab015c62d561..b97bd9fe6b79 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -14,6 +14,38 @@ config NL80211
14 14
15 If unsure, say Y. 15 If unsure, say Y.
16 16
17config WIRELESS_OLD_REGULATORY
18 bool "Old wireless static regulatory defintions"
19 default n
20 ---help---
21 This option enables the old static regulatory information
22 and uses it within the new framework. This is available
23 temporarily as an option to help prevent immediate issues
24 due to the switch to the new regulatory framework which
25 does require a new userspace application which has the
26 database of regulatory information (CRDA) and another for
27 setting regulatory domains (iw).
28
29 For more information see:
30
31 http://wireless.kernel.org/en/developers/Regulatory/CRDA
32 http://wireless.kernel.org/en/users/Documentation/iw
33
34 It is important to note though that if you *do* have CRDA present
35 and if this option is enabled CRDA *will* be called to update the
36 regulatory domain (for US and JP only). Support for letting the user
37 set the regulatory domain through iw is also supported. This option
38 mainly exists to leave around for a kernel release some old static
39 regulatory domains that were defined and to keep around the old
40 ieee80211_regdom module parameter. This is being phased out and you
41 should stop using them ASAP.
42
43 Say N unless you cannot install a new userspace application
44 or have one currently depending on the ieee80211_regdom module
45 parameter and cannot port it to use the new userspace interfaces.
46
47 This is scheduled for removal for 2.6.29.
48
17config WIRELESS_EXT 49config WIRELESS_EXT
18 bool "Wireless extensions" 50 bool "Wireless extensions"
19 default n 51 default n
@@ -39,4 +71,5 @@ config WIRELESS_EXT_SYSFS
39 files in /sys/class/net/*/wireless/. The same information 71 files in /sys/class/net/*/wireless/. The same information
40 is available via the ioctls as well. 72 is available via the ioctls as well.
41 73
42 Say Y if you have programs using it (we don't know of any). 74 Say Y if you have programs using it, like old versions of
75 hal.
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f1da0b93bc56..a910cd2d0fd1 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -13,12 +13,14 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/notifier.h> 14#include <linux/notifier.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/list.h>
16#include <net/genetlink.h> 17#include <net/genetlink.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
18#include <net/wireless.h> 19#include <net/wireless.h>
19#include "nl80211.h" 20#include "nl80211.h"
20#include "core.h" 21#include "core.h"
21#include "sysfs.h" 22#include "sysfs.h"
23#include "reg.h"
22 24
23/* name for sysfs, %d is appended */ 25/* name for sysfs, %d is appended */
24#define PHY_NAME "phy" 26#define PHY_NAME "phy"
@@ -27,6 +29,107 @@ MODULE_AUTHOR("Johannes Berg");
27MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
28MODULE_DESCRIPTION("wireless configuration support"); 30MODULE_DESCRIPTION("wireless configuration support");
29 31
32struct list_head regulatory_requests;
33
34/* Central wireless core regulatory domains, we only need two,
35 * the current one and a world regulatory domain in case we have no
36 * information to give us an alpha2 */
37struct ieee80211_regdomain *cfg80211_regdomain;
38
39/* We keep a static world regulatory domain in case of the absence of CRDA */
40const struct ieee80211_regdomain world_regdom = {
41 .n_reg_rules = 1,
42 .alpha2 = "00",
43 .reg_rules = {
44 REG_RULE(2402, 2472, 40, 6, 20,
45 NL80211_RRF_PASSIVE_SCAN |
46 NL80211_RRF_NO_IBSS),
47 }
48};
49
50#ifdef CONFIG_WIRELESS_OLD_REGULATORY
51/* All this fucking static junk will be removed soon, so
52 * don't fucking count on it !@#$ */
53
54static char *ieee80211_regdom = "US";
55module_param(ieee80211_regdom, charp, 0444);
56MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
57
58/* We assume 40 MHz bandwidth for the old regulatory work.
59 * We make emphasis we are using the exact same frequencies
60 * as before */
61
62const struct ieee80211_regdomain us_regdom = {
63 .n_reg_rules = 6,
64 .alpha2 = "US",
65 .reg_rules = {
66 /* IEEE 802.11b/g, channels 1..11 */
67 REG_RULE(2412-20, 2462+20, 40, 6, 27, 0),
68 /* IEEE 802.11a, channel 36 */
69 REG_RULE(5180-20, 5180+20, 40, 6, 23, 0),
70 /* IEEE 802.11a, channel 40 */
71 REG_RULE(5200-20, 5200+20, 40, 6, 23, 0),
72 /* IEEE 802.11a, channel 44 */
73 REG_RULE(5220-20, 5220+20, 40, 6, 23, 0),
74 /* IEEE 802.11a, channels 48..64 */
75 REG_RULE(5240-20, 5320+20, 40, 6, 23, 0),
76 /* IEEE 802.11a, channels 149..165, outdoor */
77 REG_RULE(5745-20, 5825+20, 40, 6, 30, 0),
78 }
79};
80
81const struct ieee80211_regdomain jp_regdom = {
82 .n_reg_rules = 3,
83 .alpha2 = "JP",
84 .reg_rules = {
85 /* IEEE 802.11b/g, channels 1..14 */
86 REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
87 /* IEEE 802.11a, channels 34..48 */
88 REG_RULE(5170-20, 5240+20, 40, 6, 20,
89 NL80211_RRF_PASSIVE_SCAN),
90 /* IEEE 802.11a, channels 52..64 */
91 REG_RULE(5260-20, 5320+20, 40, 6, 20,
92 NL80211_RRF_NO_IBSS |
93 NL80211_RRF_DFS),
94 }
95};
96
97const struct ieee80211_regdomain eu_regdom = {
98 .n_reg_rules = 6,
99 /* This alpha2 is bogus, we leave it here just for stupid
100 * backward compatibility */
101 .alpha2 = "EU",
102 .reg_rules = {
103 /* IEEE 802.11b/g, channels 1..13 */
104 REG_RULE(2412-20, 2472+20, 40, 6, 20, 0),
105 /* IEEE 802.11a, channel 36 */
106 REG_RULE(5180-20, 5180+20, 40, 6, 23,
107 NL80211_RRF_PASSIVE_SCAN),
108 /* IEEE 802.11a, channel 40 */
109 REG_RULE(5200-20, 5200+20, 40, 6, 23,
110 NL80211_RRF_PASSIVE_SCAN),
111 /* IEEE 802.11a, channel 44 */
112 REG_RULE(5220-20, 5220+20, 40, 6, 23,
113 NL80211_RRF_PASSIVE_SCAN),
114 /* IEEE 802.11a, channels 48..64 */
115 REG_RULE(5240-20, 5320+20, 40, 6, 20,
116 NL80211_RRF_NO_IBSS |
117 NL80211_RRF_DFS),
118 /* IEEE 802.11a, channels 100..140 */
119 REG_RULE(5500-20, 5700+20, 40, 6, 30,
120 NL80211_RRF_NO_IBSS |
121 NL80211_RRF_DFS),
122 }
123};
124
125#endif
126
127struct ieee80211_regdomain *cfg80211_world_regdom =
128 (struct ieee80211_regdomain *) &world_regdom;
129
130LIST_HEAD(regulatory_requests);
131DEFINE_MUTEX(cfg80211_reg_mutex);
132
30/* RCU might be appropriate here since we usually 133/* RCU might be appropriate here since we usually
31 * only read the list, and that can happen quite 134 * only read the list, and that can happen quite
32 * often because we need to do it for each command */ 135 * often because we need to do it for each command */
@@ -259,6 +362,13 @@ int wiphy_register(struct wiphy *wiphy)
259 struct ieee80211_supported_band *sband; 362 struct ieee80211_supported_band *sband;
260 bool have_band = false; 363 bool have_band = false;
261 int i; 364 int i;
365 u16 ifmodes = wiphy->interface_modes;
366
367 /* sanity check ifmodes */
368 WARN_ON(!ifmodes);
369 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
370 if (WARN_ON(ifmodes != wiphy->interface_modes))
371 wiphy->interface_modes = ifmodes;
262 372
263 /* sanity check supported bands/channels */ 373 /* sanity check supported bands/channels */
264 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 374 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -295,7 +405,9 @@ int wiphy_register(struct wiphy *wiphy)
295 ieee80211_set_bitrate_flags(wiphy); 405 ieee80211_set_bitrate_flags(wiphy);
296 406
297 /* set up regulatory info */ 407 /* set up regulatory info */
298 wiphy_update_regulatory(wiphy); 408 mutex_lock(&cfg80211_reg_mutex);
409 wiphy_update_regulatory(wiphy, REGDOM_SET_BY_CORE);
410 mutex_unlock(&cfg80211_reg_mutex);
299 411
300 mutex_lock(&cfg80211_drv_mutex); 412 mutex_lock(&cfg80211_drv_mutex);
301 413
@@ -402,9 +514,35 @@ static struct notifier_block cfg80211_netdev_notifier = {
402 .notifier_call = cfg80211_netdev_notifier_call, 514 .notifier_call = cfg80211_netdev_notifier_call,
403}; 515};
404 516
517#ifdef CONFIG_WIRELESS_OLD_REGULATORY
518const struct ieee80211_regdomain *static_regdom(char *alpha2)
519{
520 if (alpha2[0] == 'U' && alpha2[1] == 'S')
521 return &us_regdom;
522 if (alpha2[0] == 'J' && alpha2[1] == 'P')
523 return &jp_regdom;
524 if (alpha2[0] == 'E' && alpha2[1] == 'U')
525 return &eu_regdom;
526 /* Default, as per the old rules */
527 return &us_regdom;
528}
529#endif
530
405static int cfg80211_init(void) 531static int cfg80211_init(void)
406{ 532{
407 int err = wiphy_sysfs_init(); 533 int err;
534
535#ifdef CONFIG_WIRELESS_OLD_REGULATORY
536 cfg80211_regdomain =
537 (struct ieee80211_regdomain *) static_regdom(ieee80211_regdom);
538 /* Used during reset_regdomains_static() */
539 cfg80211_world_regdom = cfg80211_regdomain;
540#else
541 cfg80211_regdomain =
542 (struct ieee80211_regdomain *) cfg80211_world_regdom;
543#endif
544
545 err = wiphy_sysfs_init();
408 if (err) 546 if (err)
409 goto out_fail_sysfs; 547 goto out_fail_sysfs;
410 548
@@ -418,8 +556,33 @@ static int cfg80211_init(void)
418 556
419 ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); 557 ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
420 558
559 err = regulatory_init();
560 if (err)
561 goto out_fail_reg;
562
563#ifdef CONFIG_WIRELESS_OLD_REGULATORY
564 printk(KERN_INFO "cfg80211: Using old static regulatory domain:\n");
565 print_regdomain_info(cfg80211_regdomain);
566 /* The old code still requests for a new regdomain and if
567 * you have CRDA you get it updated, otherwise you get
568 * stuck with the static values. We ignore "EU" code as
569 * that is not a valid ISO / IEC 3166 alpha2 */
570 if (ieee80211_regdom[0] != 'E' &&
571 ieee80211_regdom[1] != 'U')
572 err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE,
573 ieee80211_regdom, NULL);
574#else
575 err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE, "00", NULL);
576 if (err)
577 printk(KERN_ERR "cfg80211: calling CRDA failed - "
578 "unable to update world regulatory domain, "
579 "using static definition\n");
580#endif
581
421 return 0; 582 return 0;
422 583
584out_fail_reg:
585 debugfs_remove(ieee80211_debugfs_dir);
423out_fail_nl80211: 586out_fail_nl80211:
424 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 587 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
425out_fail_notifier: 588out_fail_notifier:
@@ -427,6 +590,7 @@ out_fail_notifier:
427out_fail_sysfs: 590out_fail_sysfs:
428 return err; 591 return err;
429} 592}
593
430subsys_initcall(cfg80211_init); 594subsys_initcall(cfg80211_init);
431 595
432static void cfg80211_exit(void) 596static void cfg80211_exit(void)
@@ -435,5 +599,6 @@ static void cfg80211_exit(void)
435 nl80211_exit(); 599 nl80211_exit();
436 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 600 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
437 wiphy_sysfs_exit(); 601 wiphy_sysfs_exit();
602 regulatory_exit();
438} 603}
439module_exit(cfg80211_exit); 604module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7a02c356d63d..771cc5cc7658 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
79 char *newname); 79 char *newname);
80 80
81void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 81void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
82void wiphy_update_regulatory(struct wiphy *wiphy); 82void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby);
83 83
84#endif /* __NET_WIRELESS_CORE_H */ 84#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 59eb2cf42e5f..1221d726ed50 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -18,6 +18,7 @@
18#include <net/cfg80211.h> 18#include <net/cfg80211.h>
19#include "core.h" 19#include "core.h"
20#include "nl80211.h" 20#include "nl80211.h"
21#include "reg.h"
21 22
22/* the netlink family */ 23/* the netlink family */
23static struct genl_family nl80211_fam = { 24static struct genl_family nl80211_fam = {
@@ -87,6 +88,16 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
87 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 88 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
88 .len = IEEE80211_MAX_MESH_ID_LEN }, 89 .len = IEEE80211_MAX_MESH_ID_LEN },
89 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 90 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
91
92 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
93 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
94
95 [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
96 [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 },
97 [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 },
98
99 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
100 .len = NL80211_HT_CAPABILITY_LEN },
90}; 101};
91 102
92/* message building helper */ 103/* message building helper */
@@ -106,10 +117,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
106 struct nlattr *nl_bands, *nl_band; 117 struct nlattr *nl_bands, *nl_band;
107 struct nlattr *nl_freqs, *nl_freq; 118 struct nlattr *nl_freqs, *nl_freq;
108 struct nlattr *nl_rates, *nl_rate; 119 struct nlattr *nl_rates, *nl_rate;
120 struct nlattr *nl_modes;
109 enum ieee80211_band band; 121 enum ieee80211_band band;
110 struct ieee80211_channel *chan; 122 struct ieee80211_channel *chan;
111 struct ieee80211_rate *rate; 123 struct ieee80211_rate *rate;
112 int i; 124 int i;
125 u16 ifmodes = dev->wiphy.interface_modes;
113 126
114 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 127 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
115 if (!hdr) 128 if (!hdr)
@@ -118,6 +131,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
118 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 131 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
119 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 132 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
120 133
134 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
135 if (!nl_modes)
136 goto nla_put_failure;
137
138 i = 0;
139 while (ifmodes) {
140 if (ifmodes & 1)
141 NLA_PUT_FLAG(msg, i);
142 ifmodes >>= 1;
143 i++;
144 }
145
146 nla_nest_end(msg, nl_modes);
147
121 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 148 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
122 if (!nl_bands) 149 if (!nl_bands)
123 goto nla_put_failure; 150 goto nla_put_failure;
@@ -408,7 +435,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
408 ifindex = dev->ifindex; 435 ifindex = dev->ifindex;
409 dev_put(dev); 436 dev_put(dev);
410 437
411 if (!drv->ops->change_virtual_intf) { 438 if (!drv->ops->change_virtual_intf ||
439 !(drv->wiphy.interface_modes & (1 << type))) {
412 err = -EOPNOTSUPP; 440 err = -EOPNOTSUPP;
413 goto unlock; 441 goto unlock;
414 } 442 }
@@ -455,7 +483,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
455 if (IS_ERR(drv)) 483 if (IS_ERR(drv))
456 return PTR_ERR(drv); 484 return PTR_ERR(drv);
457 485
458 if (!drv->ops->add_virtual_intf) { 486 if (!drv->ops->add_virtual_intf ||
487 !(drv->wiphy.interface_modes & (1 << type))) {
459 err = -EOPNOTSUPP; 488 err = -EOPNOTSUPP;
460 goto unlock; 489 goto unlock;
461 } 490 }
@@ -1125,6 +1154,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1125 params.listen_interval = 1154 params.listen_interval =
1126 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1155 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1127 1156
1157 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1158 params.ht_capa =
1159 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1160
1128 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1161 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1129 &params.station_flags)) 1162 &params.station_flags))
1130 return -EINVAL; 1163 return -EINVAL;
@@ -1188,6 +1221,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1188 params.listen_interval = 1221 params.listen_interval =
1189 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1222 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1190 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1223 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1224 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1225 params.ht_capa =
1226 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1191 1227
1192 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1228 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1193 &params.station_flags)) 1229 &params.station_flags))
@@ -1525,6 +1561,183 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
1525 return err; 1561 return err;
1526} 1562}
1527 1563
1564static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
1565{
1566 struct cfg80211_registered_device *drv;
1567 int err;
1568 struct net_device *dev;
1569 struct bss_parameters params;
1570
1571 memset(&params, 0, sizeof(params));
1572 /* default to not changing parameters */
1573 params.use_cts_prot = -1;
1574 params.use_short_preamble = -1;
1575 params.use_short_slot_time = -1;
1576
1577 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
1578 params.use_cts_prot =
1579 nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
1580 if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
1581 params.use_short_preamble =
1582 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
1583 if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
1584 params.use_short_slot_time =
1585 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
1586
1587 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1588 if (err)
1589 return err;
1590
1591 if (!drv->ops->change_bss) {
1592 err = -EOPNOTSUPP;
1593 goto out;
1594 }
1595
1596 rtnl_lock();
1597 err = drv->ops->change_bss(&drv->wiphy, dev, &params);
1598 rtnl_unlock();
1599
1600 out:
1601 cfg80211_put_dev(drv);
1602 dev_put(dev);
1603 return err;
1604}
1605
1606static const struct nla_policy
1607 reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
1608 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
1609 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
1610 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
1611 [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
1612 [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
1613 [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
1614};
1615
1616static int parse_reg_rule(struct nlattr *tb[],
1617 struct ieee80211_reg_rule *reg_rule)
1618{
1619 struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
1620 struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
1621
1622 if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
1623 return -EINVAL;
1624 if (!tb[NL80211_ATTR_FREQ_RANGE_START])
1625 return -EINVAL;
1626 if (!tb[NL80211_ATTR_FREQ_RANGE_END])
1627 return -EINVAL;
1628 if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
1629 return -EINVAL;
1630 if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
1631 return -EINVAL;
1632
1633 reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
1634
1635 freq_range->start_freq_khz =
1636 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
1637 freq_range->end_freq_khz =
1638 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
1639 freq_range->max_bandwidth_khz =
1640 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
1641
1642 power_rule->max_eirp =
1643 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
1644
1645 if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
1646 power_rule->max_antenna_gain =
1647 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
1648
1649 return 0;
1650}
1651
1652static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
1653{
1654 int r;
1655 char *data = NULL;
1656
1657 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
1658 return -EINVAL;
1659
1660 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
1661
1662#ifdef CONFIG_WIRELESS_OLD_REGULATORY
1663 /* We ignore world regdom requests with the old regdom setup */
1664 if (is_world_regdom(data))
1665 return -EINVAL;
1666#endif
1667 mutex_lock(&cfg80211_drv_mutex);
1668 r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, NULL);
1669 mutex_unlock(&cfg80211_drv_mutex);
1670 return r;
1671}
1672
1673static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
1674{
1675 struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
1676 struct nlattr *nl_reg_rule;
1677 char *alpha2 = NULL;
1678 int rem_reg_rules = 0, r = 0;
1679 u32 num_rules = 0, rule_idx = 0, size_of_regd;
1680 struct ieee80211_regdomain *rd = NULL;
1681
1682 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
1683 return -EINVAL;
1684
1685 if (!info->attrs[NL80211_ATTR_REG_RULES])
1686 return -EINVAL;
1687
1688 alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
1689
1690 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
1691 rem_reg_rules) {
1692 num_rules++;
1693 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
1694 goto bad_reg;
1695 }
1696
1697 if (!reg_is_valid_request(alpha2))
1698 return -EINVAL;
1699
1700 size_of_regd = sizeof(struct ieee80211_regdomain) +
1701 (num_rules * sizeof(struct ieee80211_reg_rule));
1702
1703 rd = kzalloc(size_of_regd, GFP_KERNEL);
1704 if (!rd)
1705 return -ENOMEM;
1706
1707 rd->n_reg_rules = num_rules;
1708 rd->alpha2[0] = alpha2[0];
1709 rd->alpha2[1] = alpha2[1];
1710
1711 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
1712 rem_reg_rules) {
1713 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
1714 nla_data(nl_reg_rule), nla_len(nl_reg_rule),
1715 reg_rule_policy);
1716 r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
1717 if (r)
1718 goto bad_reg;
1719
1720 rule_idx++;
1721
1722 if (rule_idx > NL80211_MAX_SUPP_REG_RULES)
1723 goto bad_reg;
1724 }
1725
1726 BUG_ON(rule_idx != num_rules);
1727
1728 mutex_lock(&cfg80211_drv_mutex);
1729 r = set_regdom(rd);
1730 mutex_unlock(&cfg80211_drv_mutex);
1731 if (r)
1732 goto bad_reg;
1733
1734 return r;
1735
1736bad_reg:
1737 kfree(rd);
1738 return -EINVAL;
1739}
1740
1528static struct genl_ops nl80211_ops[] = { 1741static struct genl_ops nl80211_ops[] = {
1529 { 1742 {
1530 .cmd = NL80211_CMD_GET_WIPHY, 1743 .cmd = NL80211_CMD_GET_WIPHY,
@@ -1656,6 +1869,24 @@ static struct genl_ops nl80211_ops[] = {
1656 .policy = nl80211_policy, 1869 .policy = nl80211_policy,
1657 .flags = GENL_ADMIN_PERM, 1870 .flags = GENL_ADMIN_PERM,
1658 }, 1871 },
1872 {
1873 .cmd = NL80211_CMD_SET_BSS,
1874 .doit = nl80211_set_bss,
1875 .policy = nl80211_policy,
1876 .flags = GENL_ADMIN_PERM,
1877 },
1878 {
1879 .cmd = NL80211_CMD_SET_REG,
1880 .doit = nl80211_set_reg,
1881 .policy = nl80211_policy,
1882 .flags = GENL_ADMIN_PERM,
1883 },
1884 {
1885 .cmd = NL80211_CMD_REQ_SET_REG,
1886 .doit = nl80211_req_set_reg,
1887 .policy = nl80211_policy,
1888 .flags = GENL_ADMIN_PERM,
1889 },
1659}; 1890};
1660 1891
1661/* multicast groups */ 1892/* multicast groups */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 855bff4b3250..592b2e391d42 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2,179 +2,758 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
9 */ 10 */
10 11
11/* 12/**
12 * This regulatory domain control implementation is highly incomplete, it 13 * DOC: Wireless regulatory infrastructure
13 * only exists for the purpose of not regressing mac80211.
14 *
15 * For now, drivers can restrict the set of allowed channels by either
16 * not registering those channels or setting the IEEE80211_CHAN_DISABLED
17 * flag; that flag will only be *set* by this code, never *cleared.
18 * 14 *
19 * The usual implementation is for a driver to read a device EEPROM to 15 * The usual implementation is for a driver to read a device EEPROM to
20 * determine which regulatory domain it should be operating under, then 16 * determine which regulatory domain it should be operating under, then
21 * looking up the allowable channels in a driver-local table and finally 17 * looking up the allowable channels in a driver-local table and finally
22 * registering those channels in the wiphy structure. 18 * registering those channels in the wiphy structure.
23 * 19 *
24 * Alternatively, drivers that trust the regulatory domain control here 20 * Another set of compliance enforcement is for drivers to use their
25 * will register a complete set of capabilities and the control code 21 * own compliance limits which can be stored on the EEPROM. The host
26 * will restrict the set by setting the IEEE80211_CHAN_* flags. 22 * driver or firmware may ensure these are used.
23 *
24 * In addition to all this we provide an extra layer of regulatory
25 * conformance. For drivers which do not have any regulatory
26 * information CRDA provides the complete regulatory solution.
27 * For others it provides a community effort on further restrictions
28 * to enhance compliance.
29 *
30 * Note: When number of rules --> infinity we will not be able to
31 * index on alpha2 any more, instead we'll probably have to
32 * rely on some SHA1 checksum of the regdomain for example.
33 *
27 */ 34 */
28#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/random.h>
38#include <linux/nl80211.h>
39#include <linux/platform_device.h>
29#include <net/wireless.h> 40#include <net/wireless.h>
41#include <net/cfg80211.h>
30#include "core.h" 42#include "core.h"
43#include "reg.h"
31 44
32static char *ieee80211_regdom = "US"; 45/* To trigger userspace events */
33module_param(ieee80211_regdom, charp, 0444); 46static struct platform_device *reg_pdev;
34MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
35 47
36struct ieee80211_channel_range { 48/* Keep the ordering from large to small */
37 short start_freq; 49static u32 supported_bandwidths[] = {
38 short end_freq; 50 MHZ_TO_KHZ(40),
39 int max_power; 51 MHZ_TO_KHZ(20),
40 int max_antenna_gain;
41 u32 flags;
42}; 52};
43 53
44struct ieee80211_regdomain { 54bool is_world_regdom(char *alpha2)
45 const char *code; 55{
46 const struct ieee80211_channel_range *ranges; 56 if (!alpha2)
47 int n_ranges; 57 return false;
48}; 58 if (alpha2[0] == '0' && alpha2[1] == '0')
59 return true;
60 return false;
61}
49 62
50#define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \ 63static bool is_alpha2_set(char *alpha2)
51 { _start, _end, _pwr, _ag, _flags } 64{
65 if (!alpha2)
66 return false;
67 if (alpha2[0] != 0 && alpha2[1] != 0)
68 return true;
69 return false;
70}
52 71
72static bool is_alpha_upper(char letter)
73{
74 /* ASCII A - Z */
75 if (letter >= 65 && letter <= 90)
76 return true;
77 return false;
78}
53 79
54/* 80static bool is_unknown_alpha2(char *alpha2)
55 * Ideally, in the future, these definitions will be loaded from a 81{
56 * userspace table via some daemon. 82 if (!alpha2)
57 */ 83 return false;
58static const struct ieee80211_channel_range ieee80211_US_channels[] = { 84 /* Special case where regulatory domain was built by driver
59 /* IEEE 802.11b/g, channels 1..11 */ 85 * but a specific alpha2 cannot be determined */
60 RANGE_PWR(2412, 2462, 27, 6, 0), 86 if (alpha2[0] == '9' && alpha2[1] == '9')
61 /* IEEE 802.11a, channel 36*/ 87 return true;
62 RANGE_PWR(5180, 5180, 23, 6, 0), 88 return false;
63 /* IEEE 802.11a, channel 40*/ 89}
64 RANGE_PWR(5200, 5200, 23, 6, 0),
65 /* IEEE 802.11a, channel 44*/
66 RANGE_PWR(5220, 5220, 23, 6, 0),
67 /* IEEE 802.11a, channels 48..64 */
68 RANGE_PWR(5240, 5320, 23, 6, 0),
69 /* IEEE 802.11a, channels 149..165, outdoor */
70 RANGE_PWR(5745, 5825, 30, 6, 0),
71};
72 90
73static const struct ieee80211_channel_range ieee80211_JP_channels[] = { 91static bool is_an_alpha2(char *alpha2)
74 /* IEEE 802.11b/g, channels 1..14 */ 92{
75 RANGE_PWR(2412, 2484, 20, 6, 0), 93 if (!alpha2)
76 /* IEEE 802.11a, channels 34..48 */ 94 return false;
77 RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN), 95 if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1]))
78 /* IEEE 802.11a, channels 52..64 */ 96 return true;
79 RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS | 97 return false;
80 IEEE80211_CHAN_RADAR), 98}
81};
82 99
83static const struct ieee80211_channel_range ieee80211_EU_channels[] = { 100static bool alpha2_equal(char *alpha2_x, char *alpha2_y)
84 /* IEEE 802.11b/g, channels 1..13 */ 101{
85 RANGE_PWR(2412, 2472, 20, 6, 0), 102 if (!alpha2_x || !alpha2_y)
86 /* IEEE 802.11a, channel 36*/ 103 return false;
87 RANGE_PWR(5180, 5180, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 104 if (alpha2_x[0] == alpha2_y[0] &&
88 /* IEEE 802.11a, channel 40*/ 105 alpha2_x[1] == alpha2_y[1])
89 RANGE_PWR(5200, 5200, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 106 return true;
90 /* IEEE 802.11a, channel 44*/ 107 return false;
91 RANGE_PWR(5220, 5220, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 108}
92 /* IEEE 802.11a, channels 48..64 */ 109
93 RANGE_PWR(5240, 5320, 23, 6, IEEE80211_CHAN_NO_IBSS | 110static bool regdom_changed(char *alpha2)
94 IEEE80211_CHAN_RADAR), 111{
95 /* IEEE 802.11a, channels 100..140 */ 112 if (!cfg80211_regdomain)
96 RANGE_PWR(5500, 5700, 30, 6, IEEE80211_CHAN_NO_IBSS | 113 return true;
97 IEEE80211_CHAN_RADAR), 114 if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
98}; 115 return false;
116 return true;
117}
118
119/* This lets us keep regulatory code which is updated on a regulatory
120 * basis in userspace. */
121static int call_crda(const char *alpha2)
122{
123 char country_env[9 + 2] = "COUNTRY=";
124 char *envp[] = {
125 country_env,
126 NULL
127 };
128
129 if (!is_world_regdom((char *) alpha2))
130 printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n",
131 alpha2[0], alpha2[1]);
132 else
133#ifdef CONFIG_WIRELESS_OLD_REGULATORY
134 return -EINVAL;
135#else
136 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
137 "regulatory domain\n");
138#endif
139
140 country_env[8] = alpha2[0];
141 country_env[9] = alpha2[1];
142
143 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp);
144}
145
146/* This has the logic which determines when a new request
147 * should be ignored. */
148static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
149 char *alpha2, struct ieee80211_regdomain *rd)
150{
151 struct regulatory_request *last_request = NULL;
99 152
100#define REGDOM(_code) \ 153 /* All initial requests are respected */
101 { \ 154 if (list_empty(&regulatory_requests))
102 .code = __stringify(_code), \ 155 return 0;
103 .ranges = ieee80211_ ##_code## _channels, \ 156
104 .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \ 157 last_request = list_first_entry(&regulatory_requests,
158 struct regulatory_request, list);
159
160 switch (set_by) {
161 case REGDOM_SET_BY_INIT:
162 return -EINVAL;
163 case REGDOM_SET_BY_CORE:
164 /* Always respect new wireless core hints, should only
165 * come in for updating the world regulatory domain at init
166 * anyway */
167 return 0;
168 case REGDOM_SET_BY_COUNTRY_IE:
169 if (last_request->initiator == set_by) {
170 if (last_request->wiphy != wiphy) {
171 /* Two cards with two APs claiming different
172 * different Country IE alpha2s!
173 * You're special!! */
174 if (!alpha2_equal(last_request->alpha2,
175 cfg80211_regdomain->alpha2)) {
176 /* XXX: Deal with conflict, consider
177 * building a new one out of the
178 * intersection */
179 WARN_ON(1);
180 return -EOPNOTSUPP;
181 }
182 return -EALREADY;
183 }
184 /* Two consecutive Country IE hints on the same wiphy */
185 if (!alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
186 return 0;
187 return -EALREADY;
188 }
189 if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)),
190 "Invalid Country IE regulatory hint passed "
191 "to the wireless core\n")
192 return -EINVAL;
193 /* We ignore Country IE hints for now, as we haven't yet
194 * added the dot11MultiDomainCapabilityEnabled flag
195 * for wiphys */
196 return 1;
197 case REGDOM_SET_BY_DRIVER:
198 BUG_ON(!wiphy);
199 if (last_request->initiator == set_by) {
200 /* Two separate drivers hinting different things,
201 * this is possible if you have two devices present
202 * on a system with different EEPROM regulatory
203 * readings. XXX: Do intersection, we support only
204 * the first regulatory hint for now */
205 if (last_request->wiphy != wiphy)
206 return -EALREADY;
207 if (rd)
208 return -EALREADY;
209 /* Driver should not be trying to hint different
210 * regulatory domains! */
211 BUG_ON(!alpha2_equal(alpha2,
212 cfg80211_regdomain->alpha2));
213 return -EALREADY;
214 }
215 if (last_request->initiator == REGDOM_SET_BY_CORE)
216 return 0;
217 /* XXX: Handle intersection, and add the
218 * dot11MultiDomainCapabilityEnabled flag to wiphy. For now
219 * we assume the driver has this set to false, following the
220 * 802.11d dot11MultiDomainCapabilityEnabled documentation */
221 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
222 return 0;
223 return 0;
224 case REGDOM_SET_BY_USER:
225 if (last_request->initiator == set_by ||
226 last_request->initiator == REGDOM_SET_BY_CORE)
227 return 0;
228 /* Drivers can use their wiphy's reg_notifier()
229 * to override any information */
230 if (last_request->initiator == REGDOM_SET_BY_DRIVER)
231 return 0;
232 /* XXX: Handle intersection */
233 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
234 return -EOPNOTSUPP;
235 return 0;
236 default:
237 return -EINVAL;
105 } 238 }
239}
106 240
107static const struct ieee80211_regdomain ieee80211_regdoms[] = { 241static bool __reg_is_valid_request(char *alpha2,
108 REGDOM(US), 242 struct regulatory_request **request)
109 REGDOM(JP), 243{
110 REGDOM(EU), 244 struct regulatory_request *req;
111}; 245 if (list_empty(&regulatory_requests))
246 return false;
247 list_for_each_entry(req, &regulatory_requests, list) {
248 if (alpha2_equal(req->alpha2, alpha2)) {
249 *request = req;
250 return true;
251 }
252 }
253 return false;
254}
112 255
256/* Used by nl80211 before kmalloc'ing our regulatory domain */
257bool reg_is_valid_request(char *alpha2)
258{
259 struct regulatory_request *request = NULL;
260 return __reg_is_valid_request(alpha2, &request);
261}
113 262
114static const struct ieee80211_regdomain *get_regdom(void) 263/* Sanity check on a regulatory rule */
264static bool is_valid_reg_rule(struct ieee80211_reg_rule *rule)
115{ 265{
116 static const struct ieee80211_channel_range 266 struct ieee80211_freq_range *freq_range = &rule->freq_range;
117 ieee80211_world_channels[] = { 267 u32 freq_diff;
118 /* IEEE 802.11b/g, channels 1..11 */ 268
119 RANGE_PWR(2412, 2462, 27, 6, 0), 269 if (freq_range->start_freq_khz == 0 || freq_range->end_freq_khz == 0)
120 }; 270 return false;
121 static const struct ieee80211_regdomain regdom_world = REGDOM(world); 271
122 int i; 272 if (freq_range->start_freq_khz > freq_range->end_freq_khz)
273 return false;
274
275 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
276
277 if (freq_range->max_bandwidth_khz > freq_diff)
278 return false;
279
280 return true;
281}
282
283static bool is_valid_rd(struct ieee80211_regdomain *rd)
284{
285 struct ieee80211_reg_rule *reg_rule = NULL;
286 unsigned int i;
123 287
124 for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++) 288 if (!rd->n_reg_rules)
125 if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0) 289 return false;
126 return &ieee80211_regdoms[i];
127 290
128 return &regdom_world; 291 for (i = 0; i < rd->n_reg_rules; i++) {
292 reg_rule = &rd->reg_rules[i];
293 if (!is_valid_reg_rule(reg_rule))
294 return false;
295 }
296
297 return true;
129} 298}
130 299
300/* Returns value in KHz */
301static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range,
302 u32 freq)
303{
304 unsigned int i;
305 for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) {
306 u32 start_freq_khz = freq - supported_bandwidths[i]/2;
307 u32 end_freq_khz = freq + supported_bandwidths[i]/2;
308 if (start_freq_khz >= freq_range->start_freq_khz &&
309 end_freq_khz <= freq_range->end_freq_khz)
310 return supported_bandwidths[i];
311 }
312 return 0;
313}
131 314
132static void handle_channel(struct ieee80211_channel *chan, 315/* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
133 const struct ieee80211_regdomain *rd) 316 * want to just have the channel structure use these */
317static u32 map_regdom_flags(u32 rd_flags)
318{
319 u32 channel_flags = 0;
320 if (rd_flags & NL80211_RRF_PASSIVE_SCAN)
321 channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN;
322 if (rd_flags & NL80211_RRF_NO_IBSS)
323 channel_flags |= IEEE80211_CHAN_NO_IBSS;
324 if (rd_flags & NL80211_RRF_DFS)
325 channel_flags |= IEEE80211_CHAN_RADAR;
326 return channel_flags;
327}
328
329/**
330 * freq_reg_info - get regulatory information for the given frequency
331 * @center_freq: Frequency in KHz for which we want regulatory information for
332 * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one
333 * you can set this to 0. If this frequency is allowed we then set
334 * this value to the maximum allowed bandwidth.
335 * @reg_rule: the regulatory rule which we have for this frequency
336 *
337 * Use this function to get the regulatory rule for a specific frequency.
338 */
339static int freq_reg_info(u32 center_freq, u32 *bandwidth,
340 const struct ieee80211_reg_rule **reg_rule)
134{ 341{
135 int i; 342 int i;
136 u32 flags = chan->orig_flags; 343 u32 max_bandwidth = 0;
137 const struct ieee80211_channel_range *rg = NULL;
138 344
139 for (i = 0; i < rd->n_ranges; i++) { 345 if (!cfg80211_regdomain)
140 if (rd->ranges[i].start_freq <= chan->center_freq && 346 return -EINVAL;
141 chan->center_freq <= rd->ranges[i].end_freq) { 347
142 rg = &rd->ranges[i]; 348 for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) {
349 const struct ieee80211_reg_rule *rr;
350 const struct ieee80211_freq_range *fr = NULL;
351 const struct ieee80211_power_rule *pr = NULL;
352
353 rr = &cfg80211_regdomain->reg_rules[i];
354 fr = &rr->freq_range;
355 pr = &rr->power_rule;
356 max_bandwidth = freq_max_bandwidth(fr, center_freq);
357 if (max_bandwidth && *bandwidth <= max_bandwidth) {
358 *reg_rule = rr;
359 *bandwidth = max_bandwidth;
143 break; 360 break;
144 } 361 }
145 } 362 }
146 363
147 if (!rg) { 364 return !max_bandwidth;
148 /* not found */ 365}
366
367static void handle_channel(struct ieee80211_channel *chan)
368{
369 int r;
370 u32 flags = chan->orig_flags;
371 u32 max_bandwidth = 0;
372 const struct ieee80211_reg_rule *reg_rule = NULL;
373 const struct ieee80211_power_rule *power_rule = NULL;
374
375 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq),
376 &max_bandwidth, &reg_rule);
377
378 if (r) {
149 flags |= IEEE80211_CHAN_DISABLED; 379 flags |= IEEE80211_CHAN_DISABLED;
150 chan->flags = flags; 380 chan->flags = flags;
151 return; 381 return;
152 } 382 }
153 383
154 chan->flags = flags; 384 power_rule = &reg_rule->power_rule;
385
386 chan->flags = flags | map_regdom_flags(reg_rule->flags);
155 chan->max_antenna_gain = min(chan->orig_mag, 387 chan->max_antenna_gain = min(chan->orig_mag,
156 rg->max_antenna_gain); 388 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
389 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth);
157 if (chan->orig_mpwr) 390 if (chan->orig_mpwr)
158 chan->max_power = min(chan->orig_mpwr, rg->max_power); 391 chan->max_power = min(chan->orig_mpwr,
392 (int) MBM_TO_DBM(power_rule->max_eirp));
159 else 393 else
160 chan->max_power = rg->max_power; 394 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
161} 395}
162 396
163static void handle_band(struct ieee80211_supported_band *sband, 397static void handle_band(struct ieee80211_supported_band *sband)
164 const struct ieee80211_regdomain *rd)
165{ 398{
166 int i; 399 int i;
167 400
168 for (i = 0; i < sband->n_channels; i++) 401 for (i = 0; i < sband->n_channels; i++)
169 handle_channel(&sband->channels[i], rd); 402 handle_channel(&sband->channels[i]);
170} 403}
171 404
172void wiphy_update_regulatory(struct wiphy *wiphy) 405static void update_all_wiphy_regulatory(enum reg_set_by setby)
173{ 406{
174 enum ieee80211_band band; 407 struct cfg80211_registered_device *drv;
175 const struct ieee80211_regdomain *rd = get_regdom();
176 408
177 for (band = 0; band < IEEE80211_NUM_BANDS; band++) 409 list_for_each_entry(drv, &cfg80211_drv_list, list)
410 wiphy_update_regulatory(&drv->wiphy, setby);
411}
412
413void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
414{
415 enum ieee80211_band band;
416 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
178 if (wiphy->bands[band]) 417 if (wiphy->bands[band])
179 handle_band(wiphy->bands[band], rd); 418 handle_band(wiphy->bands[band]);
419 if (wiphy->reg_notifier)
420 wiphy->reg_notifier(wiphy, setby);
421 }
422}
423
424/* Caller must hold &cfg80211_drv_mutex */
425int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
426 const char *alpha2, struct ieee80211_regdomain *rd)
427{
428 struct regulatory_request *request;
429 char *rd_alpha2;
430 int r = 0;
431
432 r = ignore_request(wiphy, set_by, (char *) alpha2, rd);
433 if (r)
434 return r;
435
436 if (rd)
437 rd_alpha2 = rd->alpha2;
438 else
439 rd_alpha2 = (char *) alpha2;
440
441 switch (set_by) {
442 case REGDOM_SET_BY_CORE:
443 case REGDOM_SET_BY_COUNTRY_IE:
444 case REGDOM_SET_BY_DRIVER:
445 case REGDOM_SET_BY_USER:
446 request = kzalloc(sizeof(struct regulatory_request),
447 GFP_KERNEL);
448 if (!request)
449 return -ENOMEM;
450
451 request->alpha2[0] = rd_alpha2[0];
452 request->alpha2[1] = rd_alpha2[1];
453 request->initiator = set_by;
454 request->wiphy = wiphy;
455
456 list_add_tail(&request->list, &regulatory_requests);
457 if (rd)
458 break;
459 r = call_crda(alpha2);
460#ifndef CONFIG_WIRELESS_OLD_REGULATORY
461 if (r)
462 printk(KERN_ERR "cfg80211: Failed calling CRDA\n");
463#endif
464 break;
465 default:
466 r = -ENOTSUPP;
467 break;
468 }
469
470 return r;
471}
472
473/* If rd is not NULL and if this call fails the caller must free it */
474int regulatory_hint(struct wiphy *wiphy, const char *alpha2,
475 struct ieee80211_regdomain *rd)
476{
477 int r;
478 BUG_ON(!rd && !alpha2);
479
480 mutex_lock(&cfg80211_drv_mutex);
481
482 r = __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER, alpha2, rd);
483 if (r || !rd)
484 goto unlock_and_exit;
485
486 /* If the driver passed a regulatory domain we skipped asking
487 * userspace for one so we can now go ahead and set it */
488 r = set_regdom(rd);
489
490unlock_and_exit:
491 mutex_unlock(&cfg80211_drv_mutex);
492 return r;
493}
494EXPORT_SYMBOL(regulatory_hint);
495
496
497static void print_rd_rules(struct ieee80211_regdomain *rd)
498{
499 unsigned int i;
500 struct ieee80211_reg_rule *reg_rule = NULL;
501 struct ieee80211_freq_range *freq_range = NULL;
502 struct ieee80211_power_rule *power_rule = NULL;
503
504 printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), "
505 "(max_antenna_gain, max_eirp)\n");
506
507 for (i = 0; i < rd->n_reg_rules; i++) {
508 reg_rule = &rd->reg_rules[i];
509 freq_range = &reg_rule->freq_range;
510 power_rule = &reg_rule->power_rule;
511
512 /* There may not be documentation for max antenna gain
513 * in certain regions */
514 if (power_rule->max_antenna_gain)
515 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
516 "(%d mBi, %d mBm)\n",
517 freq_range->start_freq_khz,
518 freq_range->end_freq_khz,
519 freq_range->max_bandwidth_khz,
520 power_rule->max_antenna_gain,
521 power_rule->max_eirp);
522 else
523 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
524 "(N/A, %d mBm)\n",
525 freq_range->start_freq_khz,
526 freq_range->end_freq_khz,
527 freq_range->max_bandwidth_khz,
528 power_rule->max_eirp);
529 }
530}
531
532static void print_regdomain(struct ieee80211_regdomain *rd)
533{
534
535 if (is_world_regdom(rd->alpha2))
536 printk(KERN_INFO "cfg80211: World regulatory "
537 "domain updated:\n");
538 else {
539 if (is_unknown_alpha2(rd->alpha2))
540 printk(KERN_INFO "cfg80211: Regulatory domain "
541 "changed to driver built-in settings "
542 "(unknown country)\n");
543 else
544 printk(KERN_INFO "cfg80211: Regulatory domain "
545 "changed to country: %c%c\n",
546 rd->alpha2[0], rd->alpha2[1]);
547 }
548 print_rd_rules(rd);
549}
550
551void print_regdomain_info(struct ieee80211_regdomain *rd)
552{
553 printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n",
554 rd->alpha2[0], rd->alpha2[1]);
555 print_rd_rules(rd);
556}
557
558#ifdef CONFIG_WIRELESS_OLD_REGULATORY
559
560static bool is_old_static_regdom(struct ieee80211_regdomain *rd)
561{
562 if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom)
563 return true;
564 return false;
565}
566
567/* The old crap never deals with a world regulatory domain, it only
568 * deals with the static regulatory domain passed and if possible
569 * an updated "US" or "JP" regulatory domain. We do however store the
570 * old static regulatory domain in cfg80211_world_regdom for convenience
571 * of use here */
572static void reset_regdomains_static(void)
573{
574 if (!is_old_static_regdom(cfg80211_regdomain))
575 kfree(cfg80211_regdomain);
576 /* This is setting the regdom to the old static regdom */
577 cfg80211_regdomain =
578 (struct ieee80211_regdomain *) cfg80211_world_regdom;
579}
580#else
581static void reset_regdomains(void)
582{
583 if (cfg80211_world_regdom && cfg80211_world_regdom != &world_regdom) {
584 if (cfg80211_world_regdom == cfg80211_regdomain) {
585 kfree(cfg80211_regdomain);
586 } else {
587 kfree(cfg80211_world_regdom);
588 kfree(cfg80211_regdomain);
589 }
590 } else if (cfg80211_regdomain && cfg80211_regdomain != &world_regdom)
591 kfree(cfg80211_regdomain);
592
593 cfg80211_world_regdom = (struct ieee80211_regdomain *) &world_regdom;
594 cfg80211_regdomain = NULL;
595}
596
597/* Dynamic world regulatory domain requested by the wireless
598 * core upon initialization */
599static void update_world_regdomain(struct ieee80211_regdomain *rd)
600{
601 BUG_ON(list_empty(&regulatory_requests));
602
603 reset_regdomains();
604
605 cfg80211_world_regdom = rd;
606 cfg80211_regdomain = rd;
607}
608#endif
609
610static int __set_regdom(struct ieee80211_regdomain *rd)
611{
612 struct regulatory_request *request = NULL;
613
614 /* Some basic sanity checks first */
615
616#ifdef CONFIG_WIRELESS_OLD_REGULATORY
617 /* We ignore the world regdom with the old static regdomains setup
618 * as there is no point to it with satic regulatory definitions :(
619 * Don't worry this shit will be removed soon... */
620 if (is_world_regdom(rd->alpha2))
621 return -EINVAL;
622#else
623 if (is_world_regdom(rd->alpha2)) {
624 if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
625 return -EINVAL;
626 update_world_regdomain(rd);
627 return 0;
628 }
629#endif
630
631 if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
632 !is_unknown_alpha2(rd->alpha2))
633 return -EINVAL;
634
635 if (list_empty(&regulatory_requests))
636 return -EINVAL;
637
638#ifdef CONFIG_WIRELESS_OLD_REGULATORY
639 /* Static "US" and "JP" will be overridden, but just once */
640 if (!is_old_static_regdom(cfg80211_regdomain) &&
641 !regdom_changed(rd->alpha2))
642 return -EINVAL;
643#else
644 if (!regdom_changed(rd->alpha2))
645 return -EINVAL;
646#endif
647
648 /* Now lets set the regulatory domain, update all driver channels
649 * and finally inform them of what we have done, in case they want
650 * to review or adjust their own settings based on their own
651 * internal EEPROM data */
652
653 if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
654 return -EINVAL;
655
656#ifdef CONFIG_WIRELESS_OLD_REGULATORY
657 reset_regdomains_static();
658#else
659 reset_regdomains();
660#endif
661
662 /* Country IE parsing coming soon */
663 switch (request->initiator) {
664 case REGDOM_SET_BY_CORE:
665 case REGDOM_SET_BY_DRIVER:
666 case REGDOM_SET_BY_USER:
667 if (!is_valid_rd(rd)) {
668 printk(KERN_ERR "cfg80211: Invalid "
669 "regulatory domain detected:\n");
670 print_regdomain_info(rd);
671 return -EINVAL;
672 }
673 break;
674 case REGDOM_SET_BY_COUNTRY_IE: /* Not yet */
675 WARN_ON(1);
676 default:
677 return -EOPNOTSUPP;
678 }
679
680 /* Tada! */
681 cfg80211_regdomain = rd;
682 request->granted = 1;
683
684 return 0;
685}
686
687
688/* Use this call to set the current regulatory domain. Conflicts with
689 * multiple drivers can be ironed out later. Caller must've already
690 * kmalloc'd the rd structure. If this calls fails you should kfree()
691 * the passed rd. Caller must hold cfg80211_drv_mutex */
692int set_regdom(struct ieee80211_regdomain *rd)
693{
694 struct regulatory_request *this_request = NULL, *prev_request = NULL;
695 int r;
696
697 if (!list_empty(&regulatory_requests))
698 prev_request = list_first_entry(&regulatory_requests,
699 struct regulatory_request, list);
700
701 /* Note that this doesn't update the wiphys, this is done below */
702 r = __set_regdom(rd);
703 if (r)
704 return r;
705
706 BUG_ON((!__reg_is_valid_request(rd->alpha2, &this_request)));
707
708 /* The initial standard core update of the world regulatory domain, no
709 * need to keep that request info around if it didn't fail. */
710 if (is_world_regdom(rd->alpha2) &&
711 this_request->initiator == REGDOM_SET_BY_CORE &&
712 this_request->granted) {
713 list_del(&this_request->list);
714 kfree(this_request);
715 this_request = NULL;
716 }
717
718 /* Remove old requests, we only leave behind the last one */
719 if (prev_request) {
720 list_del(&prev_request->list);
721 kfree(prev_request);
722 prev_request = NULL;
723 }
724
725 /* This would make this whole thing pointless */
726 BUG_ON(rd != cfg80211_regdomain);
727
728 /* update all wiphys now with the new established regulatory domain */
729 update_all_wiphy_regulatory(this_request->initiator);
730
731 print_regdomain(rd);
732
733 return r;
734}
735
736int regulatory_init(void)
737{
738 reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
739 if (IS_ERR(reg_pdev))
740 return PTR_ERR(reg_pdev);
741 return 0;
742}
743
744void regulatory_exit(void)
745{
746 struct regulatory_request *req, *req_tmp;
747 mutex_lock(&cfg80211_drv_mutex);
748#ifdef CONFIG_WIRELESS_OLD_REGULATORY
749 reset_regdomains_static();
750#else
751 reset_regdomains();
752#endif
753 list_for_each_entry_safe(req, req_tmp, &regulatory_requests, list) {
754 list_del(&req->list);
755 kfree(req);
756 }
757 platform_device_unregister(reg_pdev);
758 mutex_unlock(&cfg80211_drv_mutex);
180} 759}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
new file mode 100644
index 000000000000..d75fd0232972
--- /dev/null
+++ b/net/wireless/reg.h
@@ -0,0 +1,44 @@
1#ifndef __NET_WIRELESS_REG_H
2#define __NET_WIRELESS_REG_H
3
4extern const struct ieee80211_regdomain world_regdom;
5#ifdef CONFIG_WIRELESS_OLD_REGULATORY
6extern const struct ieee80211_regdomain us_regdom;
7extern const struct ieee80211_regdomain jp_regdom;
8extern const struct ieee80211_regdomain eu_regdom;
9#endif
10
11extern struct ieee80211_regdomain *cfg80211_regdomain;
12extern struct ieee80211_regdomain *cfg80211_world_regdom;
13extern struct list_head regulatory_requests;
14
15struct regdom_last_setby {
16 struct wiphy *wiphy;
17 u8 initiator;
18};
19
20/* wiphy is set if this request's initiator is REGDOM_SET_BY_DRIVER */
21struct regulatory_request {
22 struct list_head list;
23 struct wiphy *wiphy;
24 int granted;
25 enum reg_set_by initiator;
26 char alpha2[2];
27};
28
29bool is_world_regdom(char *alpha2);
30bool reg_is_valid_request(char *alpha2);
31
32int set_regdom(struct ieee80211_regdomain *rd);
33int __regulatory_hint_alpha2(struct wiphy *wiphy, enum reg_set_by set_by,
34 const char *alpha2);
35
36int regulatory_init(void);
37void regulatory_exit(void);
38
39void print_regdomain_info(struct ieee80211_regdomain *);
40
41/* If a char is A-Z */
42#define IS_ALPHA(letter) (letter >= 65 && letter <= 90)
43
44#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 841b32a2e680..ef9ccbc38752 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -34,7 +34,7 @@
34 34
35#include "xfrm_hash.h" 35#include "xfrm_hash.h"
36 36
37int sysctl_xfrm_larval_drop __read_mostly; 37int sysctl_xfrm_larval_drop __read_mostly = 1;
38 38
39#ifdef CONFIG_XFRM_STATISTICS 39#ifdef CONFIG_XFRM_STATISTICS
40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; 40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
@@ -1077,6 +1077,7 @@ static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1077 struct hlist_head *chain = policy_hash_bysel(&pol->selector, 1077 struct hlist_head *chain = policy_hash_bysel(&pol->selector,
1078 pol->family, dir); 1078 pol->family, dir);
1079 1079
1080 list_add_tail(&pol->bytype, &xfrm_policy_bytype[pol->type]);
1080 hlist_add_head(&pol->bydst, chain); 1081 hlist_add_head(&pol->bydst, chain);
1081 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index)); 1082 hlist_add_head(&pol->byidx, xfrm_policy_byidx+idx_hash(pol->index));
1082 xfrm_policy_count[dir]++; 1083 xfrm_policy_count[dir]++;
@@ -1731,8 +1732,7 @@ restart:
1731 * We can't enlist stable bundles either. 1732 * We can't enlist stable bundles either.
1732 */ 1733 */
1733 write_unlock_bh(&policy->lock); 1734 write_unlock_bh(&policy->lock);
1734 if (dst) 1735 dst_free(dst);
1735 dst_free(dst);
1736 1736
1737 if (pol_dead) 1737 if (pol_dead)
1738 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD); 1738 XFRM_INC_STATS(LINUX_MIB_XFRMOUTPOLDEAD);
@@ -1748,8 +1748,7 @@ restart:
1748 err = xfrm_dst_update_origin(dst, fl); 1748 err = xfrm_dst_update_origin(dst, fl);
1749 if (unlikely(err)) { 1749 if (unlikely(err)) {
1750 write_unlock_bh(&policy->lock); 1750 write_unlock_bh(&policy->lock);
1751 if (dst) 1751 dst_free(dst);
1752 dst_free(dst);
1753 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1752 XFRM_INC_STATS(LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1754 goto error; 1753 goto error;
1755 } 1754 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 4c6914ef7d92..053970e8765d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -59,6 +59,14 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59static unsigned int xfrm_state_num; 59static unsigned int xfrm_state_num;
60static unsigned int xfrm_state_genid; 60static unsigned int xfrm_state_genid;
61 61
62/* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63static unsigned long xfrm_state_walk_ongoing;
64/* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65static unsigned long xfrm_state_walk_completed;
66
67/* List of outstanding state walks used to set the completed counter. */
68static LIST_HEAD(xfrm_state_walks);
69
62static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 70static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 71static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64 72
@@ -191,7 +199,8 @@ static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; 199static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192 200
193static struct work_struct xfrm_state_gc_work; 201static struct work_struct xfrm_state_gc_work;
194static HLIST_HEAD(xfrm_state_gc_list); 202static LIST_HEAD(xfrm_state_gc_leftovers);
203static LIST_HEAD(xfrm_state_gc_list);
195static DEFINE_SPINLOCK(xfrm_state_gc_lock); 204static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196 205
197int __xfrm_state_delete(struct xfrm_state *x); 206int __xfrm_state_delete(struct xfrm_state *x);
@@ -403,17 +412,23 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
403 412
404static void xfrm_state_gc_task(struct work_struct *data) 413static void xfrm_state_gc_task(struct work_struct *data)
405{ 414{
406 struct xfrm_state *x; 415 struct xfrm_state *x, *tmp;
407 struct hlist_node *entry, *tmp; 416 unsigned long completed;
408 struct hlist_head gc_list;
409 417
418 mutex_lock(&xfrm_cfg_mutex);
410 spin_lock_bh(&xfrm_state_gc_lock); 419 spin_lock_bh(&xfrm_state_gc_lock);
411 gc_list.first = xfrm_state_gc_list.first; 420 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
412 INIT_HLIST_HEAD(&xfrm_state_gc_list);
413 spin_unlock_bh(&xfrm_state_gc_lock); 421 spin_unlock_bh(&xfrm_state_gc_lock);
414 422
415 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst) 423 completed = xfrm_state_walk_completed;
424 mutex_unlock(&xfrm_cfg_mutex);
425
426 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
427 if ((long)(x->lastused - completed) > 0)
428 break;
429 list_del(&x->gclist);
416 xfrm_state_gc_destroy(x); 430 xfrm_state_gc_destroy(x);
431 }
417 432
418 wake_up(&km_waitq); 433 wake_up(&km_waitq);
419} 434}
@@ -540,12 +555,8 @@ void __xfrm_state_destroy(struct xfrm_state *x)
540{ 555{
541 WARN_ON(x->km.state != XFRM_STATE_DEAD); 556 WARN_ON(x->km.state != XFRM_STATE_DEAD);
542 557
543 spin_lock_bh(&xfrm_state_lock);
544 list_del(&x->all);
545 spin_unlock_bh(&xfrm_state_lock);
546
547 spin_lock_bh(&xfrm_state_gc_lock); 558 spin_lock_bh(&xfrm_state_gc_lock);
548 hlist_add_head(&x->bydst, &xfrm_state_gc_list); 559 list_add_tail(&x->gclist, &xfrm_state_gc_list);
549 spin_unlock_bh(&xfrm_state_gc_lock); 560 spin_unlock_bh(&xfrm_state_gc_lock);
550 schedule_work(&xfrm_state_gc_work); 561 schedule_work(&xfrm_state_gc_work);
551} 562}
@@ -558,6 +569,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
558 if (x->km.state != XFRM_STATE_DEAD) { 569 if (x->km.state != XFRM_STATE_DEAD) {
559 x->km.state = XFRM_STATE_DEAD; 570 x->km.state = XFRM_STATE_DEAD;
560 spin_lock(&xfrm_state_lock); 571 spin_lock(&xfrm_state_lock);
572 x->lastused = xfrm_state_walk_ongoing;
573 list_del_rcu(&x->all);
561 hlist_del(&x->bydst); 574 hlist_del(&x->bydst);
562 hlist_del(&x->bysrc); 575 hlist_del(&x->bysrc);
563 if (x->id.spi) 576 if (x->id.spi)
@@ -780,11 +793,13 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
780{ 793{
781 unsigned int h; 794 unsigned int h;
782 struct hlist_node *entry; 795 struct hlist_node *entry;
783 struct xfrm_state *x, *x0; 796 struct xfrm_state *x, *x0, *to_put;
784 int acquire_in_progress = 0; 797 int acquire_in_progress = 0;
785 int error = 0; 798 int error = 0;
786 struct xfrm_state *best = NULL; 799 struct xfrm_state *best = NULL;
787 800
801 to_put = NULL;
802
788 spin_lock_bh(&xfrm_state_lock); 803 spin_lock_bh(&xfrm_state_lock);
789 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family); 804 h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
790 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) { 805 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
@@ -833,7 +848,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
833 if (tmpl->id.spi && 848 if (tmpl->id.spi &&
834 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi, 849 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
835 tmpl->id.proto, family)) != NULL) { 850 tmpl->id.proto, family)) != NULL) {
836 xfrm_state_put(x0); 851 to_put = x0;
837 error = -EEXIST; 852 error = -EEXIST;
838 goto out; 853 goto out;
839 } 854 }
@@ -849,13 +864,14 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
849 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 864 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
850 if (error) { 865 if (error) {
851 x->km.state = XFRM_STATE_DEAD; 866 x->km.state = XFRM_STATE_DEAD;
852 xfrm_state_put(x); 867 to_put = x;
853 x = NULL; 868 x = NULL;
854 goto out; 869 goto out;
855 } 870 }
856 871
857 if (km_query(x, tmpl, pol) == 0) { 872 if (km_query(x, tmpl, pol) == 0) {
858 x->km.state = XFRM_STATE_ACQ; 873 x->km.state = XFRM_STATE_ACQ;
874 list_add_tail(&x->all, &xfrm_state_all);
859 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 875 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
860 h = xfrm_src_hash(daddr, saddr, family); 876 h = xfrm_src_hash(daddr, saddr, family);
861 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 877 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
@@ -870,7 +886,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
870 xfrm_hash_grow_check(x->bydst.next != NULL); 886 xfrm_hash_grow_check(x->bydst.next != NULL);
871 } else { 887 } else {
872 x->km.state = XFRM_STATE_DEAD; 888 x->km.state = XFRM_STATE_DEAD;
873 xfrm_state_put(x); 889 to_put = x;
874 x = NULL; 890 x = NULL;
875 error = -ESRCH; 891 error = -ESRCH;
876 } 892 }
@@ -881,6 +897,8 @@ out:
881 else 897 else
882 *err = acquire_in_progress ? -EAGAIN : error; 898 *err = acquire_in_progress ? -EAGAIN : error;
883 spin_unlock_bh(&xfrm_state_lock); 899 spin_unlock_bh(&xfrm_state_lock);
900 if (to_put)
901 xfrm_state_put(to_put);
884 return x; 902 return x;
885} 903}
886 904
@@ -1051,6 +1069,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
1051 xfrm_state_hold(x); 1069 xfrm_state_hold(x);
1052 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; 1070 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
1053 add_timer(&x->timer); 1071 add_timer(&x->timer);
1072 list_add_tail(&x->all, &xfrm_state_all);
1054 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 1073 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
1055 h = xfrm_src_hash(daddr, saddr, family); 1074 h = xfrm_src_hash(daddr, saddr, family);
1056 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 1075 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
@@ -1067,18 +1086,20 @@ static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
1067 1086
1068int xfrm_state_add(struct xfrm_state *x) 1087int xfrm_state_add(struct xfrm_state *x)
1069{ 1088{
1070 struct xfrm_state *x1; 1089 struct xfrm_state *x1, *to_put;
1071 int family; 1090 int family;
1072 int err; 1091 int err;
1073 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1092 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1074 1093
1075 family = x->props.family; 1094 family = x->props.family;
1076 1095
1096 to_put = NULL;
1097
1077 spin_lock_bh(&xfrm_state_lock); 1098 spin_lock_bh(&xfrm_state_lock);
1078 1099
1079 x1 = __xfrm_state_locate(x, use_spi, family); 1100 x1 = __xfrm_state_locate(x, use_spi, family);
1080 if (x1) { 1101 if (x1) {
1081 xfrm_state_put(x1); 1102 to_put = x1;
1082 x1 = NULL; 1103 x1 = NULL;
1083 err = -EEXIST; 1104 err = -EEXIST;
1084 goto out; 1105 goto out;
@@ -1088,7 +1109,7 @@ int xfrm_state_add(struct xfrm_state *x)
1088 x1 = __xfrm_find_acq_byseq(x->km.seq); 1109 x1 = __xfrm_find_acq_byseq(x->km.seq);
1089 if (x1 && ((x1->id.proto != x->id.proto) || 1110 if (x1 && ((x1->id.proto != x->id.proto) ||
1090 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { 1111 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
1091 xfrm_state_put(x1); 1112 to_put = x1;
1092 x1 = NULL; 1113 x1 = NULL;
1093 } 1114 }
1094 } 1115 }
@@ -1110,6 +1131,9 @@ out:
1110 xfrm_state_put(x1); 1131 xfrm_state_put(x1);
1111 } 1132 }
1112 1133
1134 if (to_put)
1135 xfrm_state_put(to_put);
1136
1113 return err; 1137 return err;
1114} 1138}
1115EXPORT_SYMBOL(xfrm_state_add); 1139EXPORT_SYMBOL(xfrm_state_add);
@@ -1269,10 +1293,12 @@ EXPORT_SYMBOL(xfrm_state_migrate);
1269 1293
1270int xfrm_state_update(struct xfrm_state *x) 1294int xfrm_state_update(struct xfrm_state *x)
1271{ 1295{
1272 struct xfrm_state *x1; 1296 struct xfrm_state *x1, *to_put;
1273 int err; 1297 int err;
1274 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1298 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1275 1299
1300 to_put = NULL;
1301
1276 spin_lock_bh(&xfrm_state_lock); 1302 spin_lock_bh(&xfrm_state_lock);
1277 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 1303 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1278 1304
@@ -1281,7 +1307,7 @@ int xfrm_state_update(struct xfrm_state *x)
1281 goto out; 1307 goto out;
1282 1308
1283 if (xfrm_state_kern(x1)) { 1309 if (xfrm_state_kern(x1)) {
1284 xfrm_state_put(x1); 1310 to_put = x1;
1285 err = -EEXIST; 1311 err = -EEXIST;
1286 goto out; 1312 goto out;
1287 } 1313 }
@@ -1295,6 +1321,9 @@ int xfrm_state_update(struct xfrm_state *x)
1295out: 1321out:
1296 spin_unlock_bh(&xfrm_state_lock); 1322 spin_unlock_bh(&xfrm_state_lock);
1297 1323
1324 if (to_put)
1325 xfrm_state_put(to_put);
1326
1298 if (err) 1327 if (err)
1299 return err; 1328 return err;
1300 1329
@@ -1578,6 +1607,41 @@ out:
1578} 1607}
1579EXPORT_SYMBOL(xfrm_state_walk); 1608EXPORT_SYMBOL(xfrm_state_walk);
1580 1609
1610void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1611{
1612 walk->proto = proto;
1613 walk->state = NULL;
1614 walk->count = 0;
1615 list_add_tail(&walk->list, &xfrm_state_walks);
1616 walk->genid = ++xfrm_state_walk_ongoing;
1617}
1618EXPORT_SYMBOL(xfrm_state_walk_init);
1619
1620void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1621{
1622 struct list_head *prev;
1623
1624 if (walk->state != NULL) {
1625 xfrm_state_put(walk->state);
1626 walk->state = NULL;
1627 }
1628
1629 prev = walk->list.prev;
1630 list_del(&walk->list);
1631
1632 if (prev != &xfrm_state_walks) {
1633 list_entry(prev, struct xfrm_state_walk, list)->genid =
1634 walk->genid;
1635 return;
1636 }
1637
1638 xfrm_state_walk_completed = walk->genid;
1639
1640 if (!list_empty(&xfrm_state_gc_leftovers))
1641 schedule_work(&xfrm_state_gc_work);
1642}
1643EXPORT_SYMBOL(xfrm_state_walk_done);
1644
1581 1645
1582void xfrm_replay_notify(struct xfrm_state *x, int event) 1646void xfrm_replay_notify(struct xfrm_state *x, int event)
1583{ 1647{
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 7bd296cca041..46f23971f7e4 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -508,12 +508,11 @@ int devcgroup_inode_permission(struct inode *inode, int mask)
508 return 0; 508 return 0;
509 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) 509 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
510 return 0; 510 return 0;
511 dev_cgroup = css_to_devcgroup(task_subsys_state(current,
512 devices_subsys_id));
513 if (!dev_cgroup)
514 return 0;
515 511
516 rcu_read_lock(); 512 rcu_read_lock();
513
514 dev_cgroup = task_devcgroup(current);
515
517 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { 516 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
518 if (wh->type & DEV_ALL) 517 if (wh->type & DEV_ALL)
519 goto acc_check; 518 goto acc_check;
@@ -533,6 +532,7 @@ acc_check:
533 rcu_read_unlock(); 532 rcu_read_unlock();
534 return 0; 533 return 0;
535 } 534 }
535
536 rcu_read_unlock(); 536 rcu_read_unlock();
537 537
538 return -EPERM; 538 return -EPERM;
@@ -543,12 +543,10 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
543 struct dev_cgroup *dev_cgroup; 543 struct dev_cgroup *dev_cgroup;
544 struct dev_whitelist_item *wh; 544 struct dev_whitelist_item *wh;
545 545
546 dev_cgroup = css_to_devcgroup(task_subsys_state(current,
547 devices_subsys_id));
548 if (!dev_cgroup)
549 return 0;
550
551 rcu_read_lock(); 546 rcu_read_lock();
547
548 dev_cgroup = task_devcgroup(current);
549
552 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 550 list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
553 if (wh->type & DEV_ALL) 551 if (wh->type & DEV_ALL)
554 goto acc_check; 552 goto acc_check;
@@ -566,6 +564,8 @@ acc_check:
566 rcu_read_unlock(); 564 rcu_read_unlock();
567 return 0; 565 return 0;
568 } 566 }
567
569 rcu_read_unlock(); 568 rcu_read_unlock();
569
570 return -EPERM; 570 return -EPERM;
571} 571}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b52f923ce680..d11a8154500f 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -811,11 +811,12 @@ static int string_to_context_struct(struct policydb *pol,
811 /* Check the validity of the new context. */ 811 /* Check the validity of the new context. */
812 if (!policydb_context_isvalid(pol, ctx)) { 812 if (!policydb_context_isvalid(pol, ctx)) {
813 rc = -EINVAL; 813 rc = -EINVAL;
814 context_destroy(ctx);
815 goto out; 814 goto out;
816 } 815 }
817 rc = 0; 816 rc = 0;
818out: 817out:
818 if (rc)
819 context_destroy(ctx);
819 return rc; 820 return rc;
820} 821}
821 822
@@ -868,8 +869,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
868 } else if (rc) 869 } else if (rc)
869 goto out; 870 goto out;
870 rc = sidtab_context_to_sid(&sidtab, &context, sid); 871 rc = sidtab_context_to_sid(&sidtab, &context, sid);
871 if (rc) 872 context_destroy(&context);
872 context_destroy(&context);
873out: 873out:
874 read_unlock(&policy_rwlock); 874 read_unlock(&policy_rwlock);
875 kfree(scontext2); 875 kfree(scontext2);
diff --git a/sound/Kconfig b/sound/Kconfig
index a37bee094eba..8ebf512ced6c 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -91,6 +91,9 @@ endif # SOUND_PRIME
91 91
92endif # !M68K 92endif # !M68K
93 93
94endif # SOUND
95
96# AC97_BUS is used from both sound and ucb1400
94config AC97_BUS 97config AC97_BUS
95 tristate 98 tristate
96 help 99 help
@@ -99,4 +102,3 @@ config AC97_BUS
99 sound although they're sharing the AC97 bus. Concerned drivers 102 sound although they're sharing the AC97 bus. Concerned drivers
100 should "select" this. 103 should "select" this.
101 104
102endif # SOUND
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 909f1c101c95..66025161bd69 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6197,7 +6197,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
6197 SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG), 6197 SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
6198 SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG), 6198 SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
6199 SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG), 6199 SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
6200 SND_PCI_QUIRK(0x106b, 0x00a0, "Apple iMac 24''", ALC885_IMAC24),
6201 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG), 6200 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
6202 SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */ 6201 SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
6203 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG), 6202 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
@@ -14067,6 +14066,13 @@ static struct hda_verb alc662_auto_init_verbs[] = {
14067 { } 14066 { }
14068}; 14067};
14069 14068
14069/* additional verbs for ALC663 */
14070static struct hda_verb alc663_auto_init_verbs[] = {
14071 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
14072 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
14073 { }
14074};
14075
14070static struct hda_verb alc663_m51va_init_verbs[] = { 14076static struct hda_verb alc663_m51va_init_verbs[] = {
14071 {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 14077 {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
14072 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 14078 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
@@ -14595,6 +14601,14 @@ static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
14595 if (!pin) 14601 if (!pin)
14596 return 0; 14602 return 0;
14597 14603
14604 if (pin == 0x17) {
14605 /* ALC663 has a mono output pin on 0x17 */
14606 sprintf(name, "%s Playback Switch", pfx);
14607 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
14608 HDA_COMPOSE_AMP_VAL(pin, 2, 0, HDA_OUTPUT));
14609 return err;
14610 }
14611
14598 if (alc880_is_fixed_pin(pin)) { 14612 if (alc880_is_fixed_pin(pin)) {
14599 nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin)); 14613 nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin));
14600 /* printk("DAC nid=%x\n",nid); */ 14614 /* printk("DAC nid=%x\n",nid); */
@@ -14765,6 +14779,14 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
14765 spec->input_mux = &spec->private_imux; 14779 spec->input_mux = &spec->private_imux;
14766 14780
14767 spec->init_verbs[spec->num_init_verbs++] = alc662_auto_init_verbs; 14781 spec->init_verbs[spec->num_init_verbs++] = alc662_auto_init_verbs;
14782 if (codec->vendor_id == 0x10ec0663)
14783 spec->init_verbs[spec->num_init_verbs++] =
14784 alc663_auto_init_verbs;
14785
14786 err = alc_auto_add_mic_boost(codec);
14787 if (err < 0)
14788 return err;
14789
14768 spec->mixers[spec->num_mixers] = alc662_capture_mixer; 14790 spec->mixers[spec->num_mixers] = alc662_capture_mixer;
14769 spec->num_mixers++; 14791 spec->num_mixers++;
14770 return 1; 14792 return 1;
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7fdafcb0015d..ad994fcab725 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -560,8 +560,9 @@ static struct hda_verb dell_eq_core_init[] = {
560}; 560};
561 561
562static struct hda_verb dell_m6_core_init[] = { 562static struct hda_verb dell_m6_core_init[] = {
563 /* set master volume and direct control */ 563 /* set master volume to max value without distortion
564 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, 564 * and direct control */
565 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec},
565 /* setup audio connections */ 566 /* setup audio connections */
566 { 0x0d, AC_VERB_SET_CONNECT_SEL, 0x00}, 567 { 0x0d, AC_VERB_SET_CONNECT_SEL, 0x00},
567 { 0x0a, AC_VERB_SET_CONNECT_SEL, 0x01}, 568 { 0x0a, AC_VERB_SET_CONNECT_SEL, 0x01},
diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
index 7442460583dd..dad393ae040a 100644
--- a/sound/pci/oxygen/hifier.c
+++ b/sound/pci/oxygen/hifier.c
@@ -17,6 +17,7 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#include <linux/delay.h>
20#include <linux/pci.h> 21#include <linux/pci.h>
21#include <sound/control.h> 22#include <sound/control.h>
22#include <sound/core.h> 23#include <sound/core.h>
@@ -107,6 +108,9 @@ static void set_ak4396_params(struct oxygen *chip,
107 else 108 else
108 value |= AK4396_DFS_QUAD; 109 value |= AK4396_DFS_QUAD;
109 data->ak4396_ctl2 = value; 110 data->ak4396_ctl2 = value;
111
112 msleep(1); /* wait for the new MCLK to become stable */
113
110 ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB); 114 ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB);
111 ak4396_write(chip, AK4396_CONTROL_2, value); 115 ak4396_write(chip, AK4396_CONTROL_2, value);
112 ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN); 116 ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 7c8ae31eb468..c5829d30ef86 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -28,6 +28,7 @@
28 * GPIO 1 -> DFS1 of AK5385 28 * GPIO 1 -> DFS1 of AK5385
29 */ 29 */
30 30
31#include <linux/delay.h>
31#include <linux/mutex.h> 32#include <linux/mutex.h>
32#include <linux/pci.h> 33#include <linux/pci.h>
33#include <sound/ac97_codec.h> 34#include <sound/ac97_codec.h>
@@ -213,6 +214,9 @@ static void set_ak4396_params(struct oxygen *chip,
213 else 214 else
214 value |= AK4396_DFS_QUAD; 215 value |= AK4396_DFS_QUAD;
215 data->ak4396_ctl2 = value; 216 data->ak4396_ctl2 = value;
217
218 msleep(1); /* wait for the new MCLK to become stable */
219
216 for (i = 0; i < 4; ++i) { 220 for (i = 0; i < 4; ++i) {
217 ak4396_write(chip, i, 221 ak4396_write(chip, i,
218 AK4396_CONTROL_1, AK4396_DIF_24_MSB); 222 AK4396_CONTROL_1, AK4396_DIF_24_MSB);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 8548818eea08..c796b1882776 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -16,6 +16,7 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/platform_device.h>
19#include <sound/core.h> 20#include <sound/core.h>
20#include <sound/pcm.h> 21#include <sound/pcm.h>
21#include <sound/initval.h> 22#include <sound/initval.h>
@@ -81,7 +82,6 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream)
81 struct snd_soc_pcm_runtime *rtd = substream->private_data; 82 struct snd_soc_pcm_runtime *rtd = substream->private_data;
82 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 83 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
83 84
84 clk_i2s = clk_get(NULL, "I2SCLK");
85 if (IS_ERR(clk_i2s)) 85 if (IS_ERR(clk_i2s))
86 return PTR_ERR(clk_i2s); 86 return PTR_ERR(clk_i2s);
87 87
@@ -152,6 +152,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
152 pxa_gpio_mode(gpio_bus[pxa_i2s.master].tx); 152 pxa_gpio_mode(gpio_bus[pxa_i2s.master].tx);
153 pxa_gpio_mode(gpio_bus[pxa_i2s.master].frm); 153 pxa_gpio_mode(gpio_bus[pxa_i2s.master].frm);
154 pxa_gpio_mode(gpio_bus[pxa_i2s.master].clk); 154 pxa_gpio_mode(gpio_bus[pxa_i2s.master].clk);
155 BUG_ON(IS_ERR(clk_i2s));
155 clk_enable(clk_i2s); 156 clk_enable(clk_i2s);
156 pxa_i2s_wait(); 157 pxa_i2s_wait();
157 158
@@ -317,6 +318,43 @@ struct snd_soc_dai pxa_i2s_dai = {
317 318
318EXPORT_SYMBOL_GPL(pxa_i2s_dai); 319EXPORT_SYMBOL_GPL(pxa_i2s_dai);
319 320
321static int pxa2xx_i2s_probe(struct platform_device *dev)
322{
323 clk_i2s = clk_get(&dev->dev, "I2SCLK");
324 return IS_ERR(clk_i2s) ? PTR_ERR(clk_i2s) : 0;
325}
326
327static int __devexit pxa2xx_i2s_remove(struct platform_device *dev)
328{
329 clk_put(clk_i2s);
330 clk_i2s = ERR_PTR(-ENOENT);
331 return 0;
332}
333
334static struct platform_driver pxa2xx_i2s_driver = {
335 .probe = pxa2xx_i2s_probe,
336 .remove = __devexit_p(pxa2xx_i2s_remove),
337
338 .driver = {
339 .name = "pxa2xx-i2s",
340 .owner = THIS_MODULE,
341 },
342};
343
344static int __init pxa2xx_i2s_init(void)
345{
346 clk_i2s = ERR_PTR(-ENOENT);
347 return platform_driver_register(&pxa2xx_i2s_driver);
348}
349
350static void __exit pxa2xx_i2s_exit(void)
351{
352 platform_driver_unregister(&pxa2xx_i2s_driver);
353}
354
355module_init(pxa2xx_i2s_init);
356module_exit(pxa2xx_i2s_exit);
357
320/* Module information */ 358/* Module information */
321MODULE_AUTHOR("Liam Girdwood, liam.girdwood@wolfsonmicro.com, www.wolfsonmicro.com"); 359MODULE_AUTHOR("Liam Girdwood, liam.girdwood@wolfsonmicro.com, www.wolfsonmicro.com");
322MODULE_DESCRIPTION("pxa2xx I2S SoC Interface"); 360MODULE_DESCRIPTION("pxa2xx I2S SoC Interface");